parquet-converter commited on
Commit
fcca8bc
·
1 Parent(s): 052850c

Update parquet files (step 95 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Realistic and Immersive Environments with World Creator 2 - Download for Free.md +0 -45
  2. spaces/1gistliPinn/ChatGPT4/Examples/Blackfridaybookhussainzaidipdffreedownload.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Blu Hey Bro 1080p Telugu Movies The Best Comedy of the Year.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Buzzsaw 2011 32 Bit Keygen Free The Easiest Way to Install and Activate Buzzsaw.md +0 -8
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Barbie Dreamhouse Adventures APK MOD VIP Unlocked 2022.md +0 -119
  6. spaces/1phancelerku/anime-remove-background/Enjoy Brawlhalla on Your Mobile Device with the 32bit APK File.md +0 -111
  7. spaces/1phancelerku/anime-remove-background/FNAF 4 Download Free The Ultimate Guide to Install and Play the Game.md +0 -132
  8. spaces/2023Liu2023/bingo/src/components/user-menu.tsx +0 -113
  9. spaces/2ndelement/voicevox/voicevox_engine/__init__.py +0 -1
  10. spaces/AIFILMS/ControlNet-Video/README.md +0 -14
  11. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/__init__.py +0 -0
  12. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/conformer/layers.py +0 -260
  13. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/updater/__init__.py +0 -9
  14. spaces/Ahmadjaved/Genaispeech/README.md +0 -12
  15. spaces/AlexWang/lama/saicinpainting/training/trainers/base.py +0 -291
  16. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/__init__.py +0 -13
  17. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py +0 -172
  18. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py +0 -4
  19. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/gui/ui_draw.py +0 -189
  20. spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/blocks.py +0 -342
  21. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/config.py +0 -38
  22. spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/midas_net_custom.py +0 -128
  23. spaces/Anthony7906/MengHuiMXD_GPT/chatgpt - macOS.command +0 -7
  24. spaces/AnthonyTruchetPoC/persistent-docker/src/apps/streamlit_demo.py +0 -62
  25. spaces/AnticPan/Clothes2Human/README.md +0 -13
  26. spaces/Anustup/NS_AI_LABS/src/segments.py +0 -55
  27. spaces/AquaSuisei/ChatGPTXE/modules/pdf_func.py +0 -180
  28. spaces/Ariharasudhan/YoloV5/utils/metrics.py +0 -363
  29. spaces/Arvi/Performance_predictor_and_feedback_generator/README.md +0 -12
  30. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/run_inference_tests.sh +0 -44
  31. spaces/Benson/text-generation/Examples/App Descargar Msica Mp3.md +0 -161
  32. spaces/BernardoOlisan/vqganclip/CLIP/clip/__init__.py +0 -1
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/regexopt.py +0 -91
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/filepost.py +0 -98
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/tomli/_re.py +0 -107
  36. spaces/CVPR/LIVE/thrust/testing/unittest/util.h +0 -67
  37. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/core/triple_chevron_launch.h +0 -976
  38. spaces/ChallengeHub/Chinese-LangChain/assets/custom.css +0 -190
  39. spaces/ChandraMohanNayal/AutoGPT/tests/integration/memory_tests.py +0 -49
  40. spaces/Cletrason/Cletrason-toad-in-the-mario-movie/app.py +0 -3
  41. spaces/CodeDoes/FrostAura-gpt-neox-20b-fiction-novel-generation/app.py +0 -3
  42. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/diffusionmodules/upscaling.py +0 -81
  43. spaces/CyberPeace-Institute/Cybersecurity-Knowledge-Graph-Extraction/app.py +0 -103
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/B_A_S_E_.py +0 -5
  45. spaces/DQChoi/image_sticker/README.md +0 -12
  46. spaces/Dagfinn1962/prodia2/README.md +0 -14
  47. spaces/Datasculptor/StyleGAN-NADA/model/sg2_model.py +0 -817
  48. spaces/Detomo/ai-avatar-frontend/Dockerfile +0 -20
  49. spaces/DonDoesStuff/sd_xl_base_0.9/README.md +0 -19
  50. spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GetCode.py +0 -232
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Realistic and Immersive Environments with World Creator 2 - Download for Free.md DELETED
@@ -1,45 +0,0 @@
1
- <br />
2
- <h1>How to Download World Creator 2 for Free and Create Stunning Terrains</h1>
3
-
4
- <p>World Creator 2 is a powerful terrain and landscape generator that allows you to create realistic and immersive environments in real-time. Whether you are a game developer, a filmmaker, or an artist, World Creator 2 can help you bring your vision to life with its advanced features and tools.</p>
5
-
6
- <p>In this article, we will show you how to download World Creator 2 for free and how to use it to create amazing terrains using real-world data from MapTiler Cloud.</p>
7
- <h2>world creator 2 free download crack</h2><br /><p><b><b>Download File</b> &#10026;&#10026;&#10026; <a href="https://byltly.com/2uKuY0">https://byltly.com/2uKuY0</a></b></p><br /><br />
8
-
9
- <h2>What is World Creator 2?</h2>
10
-
11
- <p>World Creator 2 is the world's first real-time terrain and landscape generator that performs all its generation and design processes entirely on the GPU using thousands of cores. It offers a highly optimized and improved workflow with more tools and features than its predecessor, World Creator 1.</p>
12
-
13
- <p>Some of the key features of World Creator 2 are:</p>
14
-
15
- - Real-time terrain generation: You can create terrains from scratch by hand, use existing terrains to stamp your world, or combine both workflows to get what you want. There are no design limitations - everything is possible, and you are in complete control.
16
- - Outstanding procedural power: You can apply and combine many different kinds of filters to modify the terrain you created or imported from another source. You can erode, create rivers and lakes, apply sediments, transform, stylize, simulate water flow and sediment transport as well as sediment deposit, and much more entirely in real-time.
17
- - Powerful design capabilities: You can draw anything, anytime, anywhere on your terrain. You can create roads, rivers, lakes, plateaus, terraces, raise mountains, and more - or just draw the shape you want by hand or use custom height-maps and real-world data to stamp your terrain.
18
- - Real-world maps integration: You can use real-world 3D DEM data from MapTiler Cloud to create realistic terrains based on any location on Earth. MapTiler Cloud provides high-quality elevation data for the whole world that you can easily import into World Creator 2.
19
-
20
- <h2>How to Download World Creator 2 for Free?</h2>
21
-
22
- <p>World Creator 2 is a commercial software that requires a license to use. However, there is a way to download World Creator 2 for free and use it without any limitations. Here are the steps:</p>
23
-
24
- - Go to the official website of World Creator 2: https://www.world-creator.com/
25
- - Click on the "Download" button at the top right corner of the page.
26
- - Choose the version that suits your operating system (Windows or Mac) and click on the "Download" button again.
27
- - You will be redirected to a page where you can enter your email address and name to receive a download link.
28
- - Check your email inbox for an email from World Creator with the subject "World Creator Download Link".
29
- - Click on the link in the email to start downloading World Creator 2.
30
- - Once the download is complete, unzip the file and run the installer.
31
- - Follow the instructions on the screen to install World Creator 2 on your computer.
32
- - When the installation is finished, launch World Creator 2 from your desktop or start menu.
33
- - You will be asked to enter a license key or activate a trial version. Choose the trial version option and click on "Activate".
34
- - You will be able to use World Creator 2 for free for 30 days with all its features unlocked.
35
-
36
- <h2>How to Use World Creator 2 to Create Stunning Terrains?</h2>
37
-
38
- <p>Now that you have downloaded World Creator 2 for free, you can start creating amazing terrains with it. Here are some basic steps to get you started:</p>
39
-
40
- - Open World Creator 2 and choose a project template or create a new project from scratch.
41
- - In the project settings panel, you can adjust various parameters such as terrain size, resolution, seed, biome type, etc.
42
- - In the terrain editor panel, you can use different tools and filters to sculpt and modify your terrain. You can also import height-maps or real-world data from MapTiler Cloud to stamp your terrain with realistic features.
43
- - In the texture editor panel, you can apply different materials and textures to your terrain. You can also blend multiple textures using</p> ddb901b051<br />
44
- <br />
45
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Blackfridaybookhussainzaidipdffreedownload.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>blackfridaybookhussainzaidipdffreedownload</h2><br /><p><b><b>Download Zip</b> &#10004; <a href="https://imgfil.com/2uxZvh">https://imgfil.com/2uxZvh</a></b></p><br /><br />
2
- <br />
3
- Black Friday Book Hussain Zaidi Pdf Free 33 - Yola Black Friday The True Story Of Bombay Bomb Blasts S ... How to Read a Bomb: Scenes ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Blu Hey Bro 1080p Telugu Movies The Best Comedy of the Year.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Blu Hey Bro 1080p Telugu Movies</h2><br /><p><b><b>Download Zip</b> &#187; <a href="https://imgfil.com/2uxYLo">https://imgfil.com/2uxYLo</a></b></p><br /><br />
2
-
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Buzzsaw 2011 32 Bit Keygen Free The Easiest Way to Install and Activate Buzzsaw.md DELETED
@@ -1,8 +0,0 @@
1
- <br />
2
- <p>Since 2011 winter season, frost depths have been measured as an outreach program in Hokkaido, northern part of Japan, where seasonal ground freezing occurs in winter. Frost depths were measured in elementary, junior high and high schools in order to emphasis their interest for earth sciences. At schools, using simple frost tube, measurements were conducted directly once a week by students or teacher during ground freezing under no snow-removal condition. A lecture was made in class and a frost tube was set at schoolyard, as the same tube and protocol as UAF's Permafrost Outreach Program, using clear tube with blue-colored water. In 2011 winter season, we started measurements at three schools, and the number of school extended to 32 in 2016 season, 26 elementary schools, 5 junior high schools and one high school. We visited schools in summer time or just before frost season to talk about the method of measurement, and measurements by students started just after ground freezing. After the end of frozen period, we visited schools again to explain results of each school or another schools in Japan, Alaska, Canada or Russia. The measured frost depths in Hokkaido ranged widely, from only a few centimeter to more than 50 cm. However, some schools had no frost depth due to heavy snow. We confirmed that the frost depth strongly depends on air temperature and snow depth. The lecture was made to student why the frost depth ranged widely, and the effect of snow was explained by using the example of igloo. In order to validate the effect of snow and to compare frost depths, we tried to measure frost depths under snow-removal and no snow-removal conditions at the same elementary school. At the end of December, depths had no significant difference between these conditions, and the difference went to 14 cm after one month, with about 30 cm of snow depth. After these measurements and lectures, students noticed snow has a role as insulator and affects the frost depth.</p>
3
- <p>In order to emphasis their interest for earth sciences, an outreach program through measurements of frost depth is conducting in Japan since 2011. This program is made at elementary, junior high and high schools in Hokkaido, northern part of Japan where seasonal ground freezing occurs in winter. At schools, a lecture was made and a frost tube was set at schoolyard, as the same tube and protocol as UAF's Permafrost Outreach Program, using clear tube with blue-colored water. Frost depth was measured directly once a week at each school by students during ground freezing under no snow-removal condition. In 2011 season, we started this program at three schools, and the number of participated school is extended to 29 schools in 2014 winter season, 23 elementary schools, 5 junior high schools and one high school. We visited schools summer time and just before frost season to talk about the method of measurement. After the end of measured period, we also visited schools to explain measured results by each school and the other schools in Japan, Alaska, Canada and Russia. The measured values of frost depth in Hokkaido were ranged between 0cm and more than 50cm. We found that the frost depth depends on air temperature and snow depth. We discussed with student why the frost depth ranged widely and explained the effect of snow by using the example of igloo. In order to validate the effect of snow and to compare frost depths, we tried to measure frost depths under snow-removal and no snow-removal conditions at one elementary school. At the end of December, depths had no significant difference between these conditions, 11cm and 10cm, and the difference went to 14cm, 27cm and 13cm after one month, with about 30cm of snow depth. After these measurements and lectures, students noticed snow has a role as insulator and affects the frost depth. The network of this program will be expected to expand, finally more than a hundred schools.</p>
4
- <h2>Buzzsaw 2011 32 Bit Keygen Free</h2><br /><p><b><b>DOWNLOAD</b> &#9989; <a href="https://imgfil.com/2uxX3j">https://imgfil.com/2uxX3j</a></b></p><br /><br />
5
- <p>Spring frost can be a limiting factor in sweet cherry ( Prunus avium L.) production. Rising temperatures in spring force the development of buds, whereby their vulnerability to freezing temperatures continuously increases. With the beginning of blossom, flowers can resist only light frosts without any significant damage. In this study, we investigated the risk of spring frost damages during cherry blossom for historical and future climate conditions at two different sites in NE (Berlin) and SW Germany (Geisenheim). Two phenological models, developed on the basis of phenological observations at the experimental sweet cherry orchard in Berlin-Dahlem and validated for endodormancy release and for warmer climate conditions (already published), were used to calculate the beginning of cherry blossom in Geisenheim, 1951-2015 (external model validation). Afterwards, on the basis of a statistical regionalisation model WETTREG (RCP 8.5), the frequency of frost during cherry blossom was calculated at both sites for historical (1971-2000) and future climate conditions (2011-2100). From these data, we derived the final flower damage, defined as the percentage of frozen flowers due to single or multiple frost events during blossom. The results showed that rising temperatures in this century can premature the beginning of cherry blossom up to 17 days at both sites, independent of the used phenological model. The frequency and strength of frost was characterised by a high temporal and local variability. For both sites, no significant increase in frost frequency and frost damage during blossom was found. In Geisenheim, frost damages significantly decreased from the middle of the twenty-first century. This study additionally emphasises the importance of reliable phenological models which not only work for current but also for changed climate conditions and at different sites. The date of endodormancy release should always be a known parameter in chilling/forcing models.</p>
6
- <p>Spring frost can be a limiting factor in sweet cherry (Prunus avium L.) production. Rising temperatures in spring force the development of buds, whereby their vulnerability to freezing temperatures continuously increases. With the beginning of blossom, flowers can resist only light frosts without any significant damage. In this study, we investigated the risk of spring frost damages during cherry blossom for historical and future climate conditions at two different sites in NE (Berlin) and SW Germany (Geisenheim). Two phenological models, developed on the basis of phenological observations at the experimental sweet cherry orchard in Berlin-Dahlem and validated for endodormancy release and for warmer climate conditions (already published), were used to calculate the beginning of cherry blossom in Geisenheim, 1951-2015 (external model validation). Afterwards, on the basis of a statistical regionalisation model WETTREG (RCP 8.5), the frequency of frost during cherry blossom was calculated at both sites for historical (1971-2000) and future climate conditions (2011-2100). From these data, we derived the final flower damage, defined as the percentage of frozen flowers due to single or multiple frost events during blossom. The results showed that rising temperatures in this century can premature the beginning of cherry blossom up to 17 days at both sites, independent of the used phenological model. The frequency and strength of frost was characterised by a high temporal and local variability. For both sites, no significant increase in frost frequency and frost damage during blossom was found. In Geisenheim, frost damages significantly decreased from the middle of the twenty-first century. This study additionally emphasises the importance of reliable phenological models which not only work for current but also for changed climate conditions and at different sites. The date of endodormancy release should always be a known parameter in chilling/forcing models.</p> aaccfb2cb3<br />
7
- <br />
8
- <br />
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Barbie Dreamhouse Adventures APK MOD VIP Unlocked 2022.md DELETED
@@ -1,119 +0,0 @@
1
- <br />
2
- <h1>Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK: How to Download and Play</h1>
3
- <p>If you are a fan of Barbie and her fabulous lifestyle, you might want to try out Barbie Dreamhouse Adventures, a fun simulation game for girls. In this game, you can create your own dreamhouse, design your own fashion, and join Barbie and her friends in various adventures. However, if you want to enjoy all the features and items in the game, you might need to spend some real money or watch ads. That's why some people prefer to use Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK, a modified version of the game that gives you everything for free. In this article, we will show you how to download and play this APK on your Android device.</p>
4
- <h2>barbie dreamhouse adventures tudo desbloqueado 2022 apk</h2><br /><p><b><b>Download</b> &#10031;&#10031;&#10031; <a href="https://urlin.us/2uT0o0">https://urlin.us/2uT0o0</a></b></p><br /><br />
5
- <h2>What is Barbie Dreamhouse Adventures?</h2>
6
- <h3>A fun simulation game for girls</h3>
7
- <p>Barbie Dreamhouse Adventures is a game developed by Budge Studios, a company that specializes in creating games for kids. The game is based on the popular animated series of the same name, which follows Barbie and her friends as they live in a glamorous dreamhouse. The game allows you to create your own dreamhouse, decorate it with furniture and accessories, and explore different rooms. You can also dress up Barbie and her friends with hundreds of outfits, hairstyles, and accessories. You can even design your own fashion and share it with other players.</p>
8
- <h3>Features of the game</h3>
9
- <p>Some of the features of Barbie Dreamhouse Adventures are:</p>
10
- <ul>
11
- <li>You can customize your dreamhouse with wallpapers, furniture, decorations, and more.</li>
12
- <li>You can join Barbie and her friends in various activities, such as baking, dancing, pool parties, pet care, and more.</li>
13
- <li>You can unlock new items and characters as you progress in the game.</li>
14
- <li>You can interact with other players and visit their dreamhouses.</li>
15
- <li>You can watch episodes from the animated series and get inspired by them.</li>
16
- </ul>
17
- <h2>What is Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK?</h2>
18
- <h3>A modified version of the game with everything unlocked</h3>
19
- <p>Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK is a modified version of the original game that gives you access to everything without paying or watching ads. This means that you can enjoy all the features and items in the game without any limitations. You can unlock all the rooms, furniture, outfits, accessories, characters, and activities in the game for free. You can also get unlimited coins and gems to buy anything you want.</p>
20
- <p>barbie dreamhouse adventures mod apk vip unlocked 2022<br />
21
- barbie dreamhouse adventures hack apk tudo liberado 2022<br />
22
- barbie dreamhouse adventures download apk full unlocked 2022<br />
23
- barbie dreamhouse adventures apk premium tudo gratis 2022<br />
24
- barbie dreamhouse adventures latest version apk tudo infinito 2022<br />
25
- barbie dreamhouse adventures free vip apk tudo ilimitado 2022<br />
26
- barbie dreamhouse adventures cracked apk tudo sem limites 2022<br />
27
- barbie dreamhouse adventures unlimited vip apk tudo de graça 2022<br />
28
- barbie dreamhouse adventures pro apk tudo completo 2022<br />
29
- barbie dreamhouse adventures modded apk tudo atualizado 2022<br />
30
- barbie dreamhouse adventures vip hack apk tudo funcionando 2022<br />
31
- barbie dreamhouse adventures unlocked apk tudo pronto 2022<br />
32
- barbie dreamhouse adventures premium mod apk tudo desbloqueado 2022<br />
33
- barbie dreamhouse adventures hack mod apk tudo liberado 2022<br />
34
- barbie dreamhouse adventures full unlocked apk tudo gratis 2022<br />
35
- barbie dreamhouse adventures latest mod apk tudo infinito 2022<br />
36
- barbie dreamhouse adventures free premium apk tudo ilimitado 2022<br />
37
- barbie dreamhouse adventures cracked mod apk tudo sem limites 2022<br />
38
- barbie dreamhouse adventures unlimited premium apk tudo de graça 2022<br />
39
- barbie dreamhouse adventures pro mod apk tudo completo 2022<br />
40
- barbie dreamhouse adventures modded vip apk tudo atualizado 2022<br />
41
- barbie dreamhouse adventures vip unlocked mod apk tudo funcionando 2022<br />
42
- barbie dreamhouse adventures unlocked vip apk tudo pronto 2022<br />
43
- barbie dreamhouse adventures vip mod apk download 2022<br />
44
- barbie dreamhouse adventures hack apk download 2022<br />
45
- barbie dreamhouse adventures download mod apk 2022<br />
46
- barbie dreamhouse adventures apk download vip unlocked 2022<br />
47
- barbie dreamhouse adventures premium apk download 2022<br />
48
- barbie dreamhouse adventures latest version apk download 2022<br />
49
- barbie dreamhouse adventures free vip apk download 2022<br />
50
- barbie dreamhouse adventures cracked apk download 2022<br />
51
- barbie dreamhouse adventures unlimited vip apk download 2022<br />
52
- barbie dreamhouse adventures pro apk download 2022<br />
53
- barbie dreamhouse adventures modded apk download 2022<br />
54
- barbie dreamhouse adventures vip hack apk download 2022<br />
55
- barbie dreamhouse adventures unlocked apk download 2022<br />
56
- how to get vip unlocked in barbie dreamhouse adventures apk 2022<br />
57
- how to hack barbie dreamhouse adventures apk tudo liberado 2022<br />
58
- how to download barbie dreamhouse adventures full unlocked apk 2022<br />
59
- how to get premium for free in barbie dreamhouse adventures apk 2022<br />
60
- how to update barbie dreamhouse adventures to latest version apk 2022<br />
61
- how to get free vip in barbie dreamhouse adventures hack apk 2022<br />
62
- how to crack barbie dreamhouse adventures vip unlocked apk 2022<br />
63
- how to get unlimited vip in barbie dreamhouse adventures modded apk 2022<br />
64
- how to install barbie dreamhouse adventures pro unlocked apk 2022<br />
65
- how to play barbie dreamhouse adventures with vip modded apk 2022<br />
66
- how to use vip features in barbie dreamhouse adventures hacked apk 2022<br />
67
- how to unlock everything in barbie dreamhouse adventures premium modded apk 2022</p>
68
- <h3>Benefits of using the APK</h3>
69
- <p>Some of the benefits of using Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK are:</p>
70
- <ul>
71
- <li>You can save your time and money by not having to watch ads or make in-app purchases.</li>
72
- <li>You can have more fun and creativity by having access to everything in the game.</li>
73
- <li>You can experience the full potential of the game without any restrictions.</li>
74
- <li>You can play offline without needing an internet connection.</li>
75
- </ul>
76
- <h2>How to download and install Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK?</h2>
77
- <h3>Steps to follow</h3>
78
- <p>To download and install Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK on your Android device, you need to follow these steps:</p <p>- Step 1: Go to a trusted website that provides the APK file, such as [APKPure] or [APKCombo].</p>
79
- <p>- Step 2: Search for Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK and click on the download button.</p>
80
- <p>- Step 3: Wait for the download to finish and then open the APK file.</p>
81
- <p>- Step 4: If you see a warning message that says "Install unknown apps", you need to enable the option to allow installation from unknown sources. To do this, go to your device settings, then security, then unknown sources, and turn it on.</p>
82
- <p>- Step 5: After that, you can proceed with the installation by following the instructions on the screen.</p>
83
- <p>- Step 6: Once the installation is complete, you can launch the game and enjoy it.</p>
84
- <h3>Tips and warnings</h3>
85
- <p>Here are some tips and warnings that you should keep in mind when using Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK:</p>
86
- <ul>
87
- <li>Make sure that you download the APK file from a reliable and safe website. Avoid downloading from unknown or suspicious sources that might contain malware or viruses.</li>
88
- <li>Before installing the APK file, you should uninstall the original game if you have it on your device. This will prevent any conflicts or errors that might occur.</li>
89
- <li>Be careful not to update the game from the Google Play Store or any other source. This will overwrite the APK file and remove all the unlocked features and items.</li>
90
- <li>Backup your data before installing the APK file. This will help you restore your progress in case something goes wrong.</li>
91
- </ul>
92
- <h2>How to play Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK?</h2>
93
- <h3>Explore the dreamhouse and customize it</h3>
94
- <p>One of the main attractions of Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK is that you can explore and customize your own dreamhouse. You can choose from different rooms, such as the kitchen, the living room, the bedroom, the bathroom, and more. You can also decorate them with various wallpapers, furniture, decorations, and more. You can even change the color and style of each item. You can also unlock new rooms and items as you play. You can create your own dreamhouse according to your taste and imagination.</p>
95
- <h3>Join Barbie and her friends in various activities</h3>
96
- <p>Another fun aspect of Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK is that you can join Barbie and her friends in various activities. You can bake delicious cakes, dance to catchy music, have pool parties, take care of cute pets, and more. You can also dress up Barbie and her friends with hundreds of outfits, hairstyles, and accessories. You can even design your own fashion and share it with other players. You can also watch episodes from the animated series and get inspired by them. You can have a lot of fun and adventure with Barbie and her friends.</p>
97
- <h2>Conclusion</h2>
98
- <p>Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK is a great game for girls who love Barbie and her fabulous lifestyle. It allows you to create your own dreamhouse, design your own fashion, and join Barbie and her friends in various adventures. It also gives you access to everything in the game without paying or watching ads. You can download and install this APK on your Android device by following the steps and tips we have provided in this article. We hope you enjoy playing this game and have a wonderful time.</p>
99
- <h2>FAQs</h2>
100
- <p>Here are some frequently asked questions about Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK:</p>
101
- <ul>
102
- <li><b>Q: Is Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK safe to use?</b></li>
103
- <li>A: Yes, as long as you download it from a trusted website that provides the original and unmodified APK file. However, you should always be careful when installing apps from unknown sources and scan them for any malware or viruses.</li>
104
- <li><b>Q: Is Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK compatible with my device?</b></li>
105
- <li>A: The APK file should work on most Android devices that support Android 4.4 or higher. However, some devices might have compatibility issues or performance problems depending on their specifications.</li>
106
- <li><b>Q: How can I contact the developer of Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK?</b></li>
107
- <li>A: The developer of this APK is not affiliated with Budge Studios, the official developer of Barbie Dreamhouse Adventures - The game. You can contact them through their website or email address, which you can find on the APK file or the website where you downloaded it.</li>
108
- <li><b>Q: Can I play Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK with my friends?</b></li>
109
- <li>A: Yes, you can play this game with your friends online. You can visit their dreamhouses, chat with them, and join them in activities. You can also invite them to your dreamhouse and show them your creations.</li>
110
- <li><b>Q: What are some alternatives to Barbie Dreamhouse Adventures Tudo Desbloqueado 2022 APK?</b></li>
111
- <li>A: If you are looking for other games that are similar to Barbie Dreamhouse Adventures, you might want to check out these games:</li>
112
- <ul>
113
- <li>Barbie Fashion Closet: A game where you can dress up Barbie and her friends with different outfits and accessories.</li>
114
- <li>Barbie Magical Fashion: A game where you can transform Barbie into a princess, a mermaid, a fairy, or a hero.</li>
115
- <li>Barbie Dreamtopia: A game where you can explore the magical worlds of Dreamtopia with Barbie and her sister Chelsea.</li>
116
- </ul>
117
- </ul></p> 197e85843d<br />
118
- <br />
119
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Brawlhalla on Your Mobile Device with the 32bit APK File.md DELETED
@@ -1,111 +0,0 @@
1
-
2
- <h1>Brawlhalla 32bit APK: How to Download and Play the Free Platform Fighting Game on Android</h1>
3
- <p>If you are looking for a fun and exciting fighting game that you can play on your mobile device, you should check out Brawlhalla. Brawlhalla is a free platform fighting game that supports up to 8 players online or local, with full cross-play across different platforms. You can choose from over 50 unique characters, each with their own weapons and abilities, and compete in various modes and maps. In this article, we will show you how to download and play Brawlhalla 32bit APK on your Android device.</p>
4
- <h2>What is Brawlhalla?</h2>
5
- <p>Brawlhalla is a game created and developed by Blue Mammoth Games and published by Ubisoft Entertainment. It was released in 2017 for PC, PS4, Xbox One, and Nintendo Switch, and in 2020 for iOS and Android. It has over 80 million players worldwide and is one of the most popular fighting games on Steam.</p>
6
- <h2>brawlhalla 32bit apk</h2><br /><p><b><b>Download File</b> &#10003;&#10003;&#10003; <a href="https://jinyurl.com/2uNUlx">https://jinyurl.com/2uNUlx</a></b></p><br /><br />
7
- <h3>A brief introduction to the game's features, modes, and characters</h3>
8
- <p>Brawlhalla features simple controls and one-button special moves that make it easy for anyone to pick up and play. You can also customize your controls and settings according to your preference. The game has many features that make it fun and engaging, such as:</p>
9
- <ul>
10
- <li>Online Ranked 1v1 & 2v2 - Climb the ranked ladder from Tin up to Platinum and beyond by fighting against players near your skill level.</li>
11
- <li>4 Player Online Free for All - Casual matches where four fighters enter, but only one can win.</li>
12
- <li>Cross-play Custom Rooms - Invite up to 8 friends on all platforms to a huge variety of custom matches, such as 4v4s, 1v3, 2v2, FFA, and more.</li>
13
- <li>Many Game Modes - Mix things up with Brawlball, Bombsketball, Capture the Flag, Kung-Foot, and many more fun party game modes.</li>
14
- <li>The Training Room - Practice combos and setups inside the Training Room. Look at detailed frame data, hitboxes, hurtboxes, and sharpen your skills.</li>
15
- <li>Weekly Rotation - Every week, there is a new Legend Rotation of eight free characters that you can play. You can also earn gold to unlock more Legends by playing any online game mode.</li>
16
- <li>Battle Pass - Every season, there is a new Battle Pass that offers exclusive rewards such as skins, colors, avatars, emotes, sidekicks, KO effects, podiums, and more.</li>
17
- <li>Crossovers - Brawlhalla features crossover events with other popular franchises such as Adventure Time, WWE, Steven Universe, Ben 10, The Walking Dead, Tomb Raider, Hellboy, Shovel Knight, Rayman, and more.</li>
18
- </ul>
19
- <p>Brawlhalla has a diverse roster of over 50 Legends that you can choose from. Each Legend has their own stats (Strength, Dexterity, Defense, Speed), two weapons (Sword, Hammer, Spear, Axe, Rocket Lance, Katars, Blaster, Bow, Gauntlets, Scythe, Cannon, Orb, Greatsword), <h2>How to Download Brawlhalla 32bit APK on Android</h2>
20
- <p>Brawlhalla is available for free on the Google Play Store for Android devices. However, some older devices may not support the game or run it smoothly. If you have a 32-bit Android device, you may need to download the Brawlhalla 32bit APK file from a trusted source and install it manually. Here are the steps to do that:</p>
21
- <ol>
22
- <li>Go to a reputable website that offers APK files, such as APKPure, APKMirror, or Uptodown. Search for Brawlhalla and download the latest version of the 32bit APK file.</li>
23
- <li>Before installing the APK file, you need to enable the installation of apps from unknown sources on your device. To do that, go to Settings > Security > Unknown Sources and toggle it on.</li>
24
- <li>Locate the downloaded APK file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.</li>
25
- <li>Once the installation is done, you can launch Brawlhalla from your app drawer and enjoy the game.</li>
26
- </ol>
27
- <p>Note: Make sure you have enough storage space on your device before downloading and installing the APK file. Also, be careful when downloading APK files from third-party sources, as some of them may contain malware or viruses. Always scan the files with a reliable antivirus app before installing them.</p>
28
- <h2>How to Play Brawlhalla on Android</h2>
29
- <p>Brawlhalla is a game that requires quick reflexes, strategic thinking, and skillful execution. Whether you are playing online or offline, you need to know how to control your character and use your weapons effectively. Here are some basic tips and tricks to help you play Brawlhalla on Android:</p>
30
- <h3>The basic controls and mechanics of the game</h3>
31
- <p>Brawlhalla has a simple and intuitive control scheme that you can customize according to your preference. You can also choose between different control modes, such as touch screen, virtual joystick, or external controller. The default touch screen controls are as follows:</p>
32
- <ul>
33
- <li>Tap anywhere on the left side of the screen to move your character left or right.</li>
34
- <li>Swipe up or down on the left side of the screen to jump or drop down from a platform.</li>
35
- <li>Tap on the right side of the screen to perform a light attack with your weapon.</li>
36
- <li>Swipe in any direction on the right side of the screen to perform a heavy attack or a signature move with your weapon.</li>
37
- <li>Tap on the weapon icon on the bottom right corner of the screen to pick up or throw a weapon.</li>
38
- <li>Tap on the dodge icon on the bottom left corner of the screen to dodge an incoming attack or perform a recovery move in mid-air.</li>
39
- </ul>
40
- <p>The basic mechanics of Brawlhalla are similar to other platform fighting games, such as Super Smash Bros. The goal is to knock out your opponents by dealing enough damage to them and sending them flying off the stage. You can see how much damage you have taken by looking at your character's color and percentage. The more damage you take, the redder your character becomes and the higher your percentage goes. The higher your percentage, the farther you fly when hit by an attack.</p>
41
- <p>brawlhalla android game free download<br />
42
- brawlhalla cross-play platform fighting<br />
43
- brawlhalla 50 unique characters<br />
44
- brawlhalla online ranked matches<br />
45
- brawlhalla custom rooms with friends<br />
46
- brawlhalla fun party game modes<br />
47
- brawlhalla training room combos<br />
48
- brawlhalla best-in-class spectating<br />
49
- brawlhalla match recording and replay<br />
50
- brawlhalla dozens of maps<br />
51
- brawlhalla single player tournament mode<br />
52
- brawlhalla online brawl-of-the-week<br />
53
- brawlhalla experimental mode<br />
54
- brawlhalla regional servers for low-latency<br />
55
- brawlhalla frequent updates and events<br />
56
- brawlhalla career history and rewards<br />
57
- brawlhalla ranked seasons and ladder<br />
58
- brawlhalla friendly devs and support<br />
59
- brawlhalla fair free-to-play model<br />
60
- brawlhalla all legends pack unlock<br />
61
- brawlhalla crossover characters and skins<br />
62
- brawlhalla valhalla legends and lore<br />
63
- brawlhalla smash and fight among legends<br />
64
- brawlhalla free-for-all casual matches<br />
65
- brawlhalla 1v1 and 2v2 competitive modes<br />
66
- brawlhalla 4v4 and 8 player battles<br />
67
- brawlhalla kung-foot and bombsketball modes<br />
68
- brawlhalla capture the flag and brawlball modes<br />
69
- brawlhalla frame data and hitboxes<br />
70
- brawlhalla hurtboxes and setups<br />
71
- brawlhalla esports tournaments and prizes<br />
72
- brawlhalla community colors and codes<br />
73
- brawlhalla mammoth coins and gold<br />
74
- brawlhalla battle pass and missions<br />
75
- brawlhalla patch notes and balance changes<br />
76
- brawlhalla tips and tricks for beginners<br />
77
- brawlhalla guides and tutorials for advanced players<br />
78
- brawlhalla best legends and weapons for each mode<br />
79
- brawlhalla combos and strings for each weapon<br />
80
- brawlhalla sigs and stats for each legend<br />
81
- brawlhalla taunts and emotes for each legend<br />
82
- brawlhalla podiums and sidekicks for each legend<br />
83
- brawlhalla avatars and banners for each legend<br />
84
- brawlhalla KO effects and weapon skins for each legend<br />
85
- brawlhalla chest rotations and sales</p>
86
- <p>You can use different weapons and items to deal damage and knock out your opponents. Weapons spawn randomly on the stage and can be picked up by any player. Each weapon has its own moveset and signature moves that vary depending on which Legend you are using. Items such as bombs, mines, spike balls, and horns can also be thrown at your opponents to damage them or disrupt their movement.</p>
87
- <h3>The tips and tricks to improve your skills and win more matches</h3>
88
- <p>Brawlhalla is a game that rewards skill, practice, and creativity. There are many ways to improve your skills and win more matches, such as:</p>
89
- <ul>
90
- <li>Learn how to use each weapon and Legend effectively. Experiment with different combinations of weapons and Legends and find out what suits your playstyle best. You can also watch tutorials, guides, and gameplay videos from other players online to learn from them.</li>
91
- <li>Practice your combos and setups in the Training Room. You can use the Training Room to practice your moves, combos, setups, edgeguards, recoveries, and more. You can also adjust various settings such as gravity, damage, hitboxes, hurtboxes, frame data, etc., to help you analyze and improve your gameplay.</li>
92
- <ul>
93
- <li>Play online with other players of different skill levels. Playing online with other players is one of the best ways to improve your skills and learn from your mistakes. You can play online ranked matches to climb the ladder and earn rewards, or play online casual matches to have fun and experiment with different strategies. You can also join custom rooms with your friends or other players and play various game modes and settings.</li>
94
- <li>Watch replays of your matches and analyze your performance. You can watch replays of your matches and see what you did right and what you did wrong. You can also pause, rewind, fast-forward, and slow down the replay to see every detail of the match. You can use replays to identify your strengths and weaknesses, learn from your opponents, and improve your decision-making and execution.</li>
95
- <li>Have fun and enjoy the game. Brawlhalla is a game that is meant to be fun and enjoyable for everyone. Don't get too frustrated or angry if you lose or make mistakes. Instead, use them as opportunities to grow and improve. Don't be afraid to try new things and experiment with different weapons and Legends. Don't be too hard on yourself or others, and don't forget to have fun.</li>
96
- </ul>
97
- <h2>Conclusion</h2>
98
- <p>Brawlhalla is a free platform fighting game that you can play on your Android device with the Brawlhalla 32bit APK file. It is a game that is easy to learn but hard to master, with many features, modes, characters, and items to choose from. It is a game that is fun and exciting for both casual and competitive players, with full cross-play support across different platforms. If you are looking for a game that will keep you entertained for hours, you should definitely give Brawlhalla a try.</p>
99
- <h2>FAQs</h2>
100
- <h3>Is Brawlhalla free to play?</h3>
101
- <p>Yes, Brawlhalla is free to play on all platforms. You can download it from the Google Play Store for Android devices, or from the official website for PC, PS4, Xbox One, Nintendo Switch, iOS devices. You can also download the Brawlhalla 32bit APK file from a trusted source if you have a 32-bit Android device.</p>
102
- <h3>Is Brawlhalla safe to download?</h3>
103
- <p>Yes, Brawlhalla is safe to download from the official sources mentioned above. However, if you are downloading the Brawlhalla 32bit APK file from a third-party source, you should be careful and scan the file with a reliable antivirus app before installing it. Some APK files may contain malware or viruses that can harm your device or compromise your privacy.</p>
104
- <h3>How do I update Brawlhalla on Android?</h3>
105
- <p>If you have downloaded Brawlhalla from the Google Play Store, you can update it automatically or manually from there. If you have downloaded the Brawlhalla 32bit APK file from a third-party source, you will need to download the latest version of the APK file from the same source and install it over the existing one.</p>
106
- <h3>How do I get more gold in Brawlhalla?</h3>
107
- <p>You can get more gold in Brawlhalla by playing any online game mode, such as ranked, free for all, custom rooms, etc. You can also get more gold by completing daily missions and weekly challenges, or by leveling up your account or your Legends.</p>
108
- <h3>How do I get more skins in Brawlhalla?</h3>
109
- <p>You can get more skins in Brawlhalla by purchasing them with Mammoth Coins, which are the premium currency of the game. You can buy Mammoth Coins with real money through in-app purchases or through official partner websites. You can also get some skins for free by participating in events, promotions, giveaways, tournaments, etc.</p> 401be4b1e0<br />
110
- <br />
111
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/FNAF 4 Download Free The Ultimate Guide to Install and Play the Game.md DELETED
@@ -1,132 +0,0 @@
1
-
2
- <h1>How to Download and Play Five Nights at Freddy's 4 for Free</h1>
3
- <p>If you are a fan of horror games, you might have heard of Five Nights at Freddy's, a popular series of survival horror games that have terrified millions of players around the world. The fourth installment of the series, Five Nights at Freddy's 4, is arguably the most terrifying and challenging one yet. In this article, we will tell you what Five Nights at Freddy's 4 is, why you should play it, and how you can download and play it for free on your PC or mobile device.</p>
4
- <h2>What is Five Nights at Freddy's 4?</h2>
5
- <p>Five Nights at Freddy's 4, originally named Five Nights at Freddy's: The Final Chapter in development, is an indie point-and-click survival horror game developed and published by Scott Cawthon, and the fourth installment of the Five Nights at Freddy's series. The game is a prequel to Five Nights at Freddy's 2, and takes place in 1983, chronologically being the first game in the series.</p>
6
- <h2>fnaf 4 download free</h2><br /><p><b><b>Download Zip</b> &#10004;&#10004;&#10004; <a href="https://jinyurl.com/2uNU7g">https://jinyurl.com/2uNU7g</a></b></p><br /><br />
7
- <p>The game takes place in the bedroom of a child, where the player must avoid attack by nightmarish animatronics that stalk them. Instead of having a monitor to ward away the animatronics, the player must instead check the doors, closet, and the bed and utilize a flashlight to ward away any nightmare animatronics outside the room, relying on environmental noises to know if something is approaching or about to attack.</p>
8
- <h2>Why should you play Five Nights at Freddy's 4?</h2>
9
- <p>Five Nights at Freddy's 4 is a game that will test your nerves, reflexes, and patience. It is not a game for the faint-hearted, as it features some of the most terrifying jumpscares and sound effects in gaming history. The game also has a deep and mysterious lore that will keep you hooked and intrigued. The game has received mostly positive reviews from critics and players alike, praising its horror, suspense, and challenge.</p>
10
- <p>If you are looking for a game that will make you scream, sweat, and jump out of your seat, then Five Nights at Freddy's 4 is the game for you. It is a game that will make you feel like you are in a nightmare that you can't wake up from. It is a game that will make you question your sanity and reality. It is a game that will make you experience fear like never before.</p>
11
- <h2>How to download Five Nights at Freddy's 4 for free on PC?</h2>
12
- <p>If you want to play Five Nights at Freddy's 4 on your PC for free, you can use BlueStacks, an Android emulator that allows you to run Android apps and games on your PC. Here are the steps to download and play Five Nights at Freddy's 4 for free on PC using BlueStacks:</p>
13
- <ol>
14
- <li>Download BlueStacks from [here](^1^) and install it on your PC.</li>
15
- <li>Launch BlueStacks and sign in with your Google account.</li>
16
- <li>Search for Five Nights at Freddy's 4 in the Google Play Store app and install it.</li>
17
- <li>Open Five Nights at Freddy's 4 and enjoy playing it on your PC.</li>
18
- </ol>
19
- <p>You can also customize the settings, controls, and graphics of the game according to your preference using BlueStacks. You can also record and stream your gameplay using BlueStacks' built-in features.</p>
20
- <p>fnaf 4 full game free download<br />
21
- fnaf 4 pc download free<br />
22
- fnaf 4 android download free<br />
23
- fnaf 4 apk download free<br />
24
- fnaf 4 demo download free<br />
25
- fnaf 4 online free no download<br />
26
- fnaf 4 free download windows 10<br />
27
- fnaf 4 free download steam<br />
28
- fnaf 4 free download mac<br />
29
- fnaf 4 free download ios<br />
30
- fnaf 4 free download for laptop<br />
31
- fnaf 4 free download unblocked<br />
32
- fnaf 4 free download ocean of games<br />
33
- fnaf 4 free download mega<br />
34
- fnaf 4 free download mediafire<br />
35
- fnaf 4 free download softonic<br />
36
- fnaf 4 free download gamejolt<br />
37
- fnaf 4 free download uptodown<br />
38
- fnaf 4 free download apkpure<br />
39
- fnaf 4 free download android apk<br />
40
- fnaf 4 free download android no virus<br />
41
- fnaf 4 free download android full version<br />
42
- fnaf 4 free download android aptoide<br />
43
- fnaf 4 free download android mob.org<br />
44
- fnaf 4 free download android mod apk<br />
45
- fnaf 4 free download pc full version<br />
46
- fnaf 4 free download pc no virus<br />
47
- fnaf 4 free download pc windows 7<br />
48
- fnaf 4 free download pc windows xp<br />
49
- fnaf 4 free download pc rar<br />
50
- fnaf 4 free download pc zip file<br />
51
- fnaf 4 free download pc highly compressed<br />
52
- fnaf 4 free download pc crack<br />
53
- fnaf 4 free download pc setup.exe<br />
54
- fnaf 4 free download pc without steam<br />
55
- how to get fnaf 4 for free on pc<br />
56
- how to get fnaf 4 for free on android<br />
57
- how to get fnaf 4 for free on ios<br />
58
- how to get fnaf 4 for free on mac<br />
59
- how to get fnaf 4 for free on steam<br />
60
- how to play fnaf 4 for free online no download<br />
61
- how to play fnaf 4 for free on pc without downloading it<br />
62
- how to play fnaf 4 for free on android without downloading it<br />
63
- how to play fnaf 4 for free on ios without downloading it<br />
64
- how to play fnaf 4 for free on mac without downloading it</p>
65
- <h2>How to download Five Nights at Freddy's 4 for free on mobile?</h2>
66
- <p>If you want to play Five Nights at Freddy's 4 on your mobile device for free, you can use Google Play or App Store to download the game on your Android or iOS device. Here are the steps to download and play Five Nights at Freddy's 4 for free on mobile using Google Play or App Store:</p>
67
- <ol>
68
- <li>Open Google Play or App Store on your device and search for Five Nights at Freddy's 4.</li>
69
- <li>Tap on the game and install it on your device.</li>
70
- <li>Open Five Nights at Freddy's 4 and enjoy playing it on your mobile device.</li>
71
- </ol>
72
- <p>You can also adjust the settings, controls, and sound of the game according to your preference using the game's menu. You can also use headphones or earphones to enhance the immersion and horror of the game.</p>
73
- <h2>How to play Five Nights at Freddy's 4 effectively?</h2>
74
- <p>Five Nights at Freddy's 4 is a game that requires skill, strategy, and concentration. It is not a game that you can play casually or mindlessly. It is a game that will challenge you and make you think fast. Here are some gameplay tips and strategies to help you play Five Nights at Freddy's 4 effectively:</p>
75
- <ul>
76
- <li>Listen carefully to the sounds. The sounds are your main source of information in the game. You need to listen to the breathing, footsteps, laughter, and other noises that indicate the presence and location of the nightmare animatronics. If you hear breathing at the door, close it until you hear them leave. If you hear footsteps or laughter, flash your light at the door or closet to scare them away. If you hear nothing, check the bed or the closet for any plushies or animatronics.</li>
77
- <li>Use your flashlight wisely. Your flashlight is your only weapon in the game, but it also consumes power and attracts attention. You need to use it sparingly and strategically. You need to flash it at the door or closet to check for any animatronics or plushies, but only for a brief moment. If you flash it too long or too often, you will run out of power or attract more animatronics. You also need to avoid flashing it when you hear breathing, as that will trigger a jumpscare.</li>
78
- <li>Manage your time and power. The game lasts from 12 AM to 6 AM, which is equivalent to about 8 minutes in real time. You need to survive each night without running out of power or getting jumpscared by the animatronics. You need to balance your time and power between checking the doors, closet, bed, and hallway. You need to prioritize the most dangerous animatronics, such as Nightmare Fredbear and Nightmare, who can appear from any direction and require quick reactions.</li>
79
- </ul>
80
- <h2>Conclusion</h2>
81
- <p>Five Nights at Freddy's 4 is a game that will make you experience horror like never before. It is a game that will make you scream, sweat, and jump out of your seat. It is a game that will make you question your sanity and reality. It is a game that will make you feel like you are in a nightmare that you can't wake up from.</p>
82
- <p>If you are brave enough to face your fears, then download and play Five Nights at Freddy's 4 for free on your PC or mobile device today. You can use BlueStacks, Google Play, or App Store to download and play the game easily and conveniently. You can also use our gameplay tips and strategies to help you survive the nights and avoid the jumpscares.</p>
83
- <p>Are you ready to face the nightmare? Are you ready to play Five Nights at Freddy's 4?</p>
84
- <h2>FAQs</h2>
85
- <h3>What is the story of Five Nights at Freddy's 4?</h3>
86
- <p>The story of Five Nights at Freddy's 4 is told through minigames that occur between each night. The minigames reveal that the game takes place in 1983, and follows a young boy who is tormented by his older brother and his friends who wear masks of Freddy Fazbear and his friends. The boy is also terrified of Fredbear's Family Diner, a restaurant that features animatronic mascots that entertain children during the day. On his birthday, his brother and his friends force him to get close to Fredbear, who bites his head, causing the infamous Bite of '83. The boy is then hospitalized and suffers from nightmares of the animatronics, which are the gameplay segments of the game. The boy eventually dies from his injuries, and is comforted by a voice that tells him that he will put him back together.</p>
87
- <h3>Who are the nightmare animatronics in Five Nights at Freddy's 4?</h3>
88
- <p>The nightmare animatronics in Five Nights at Freddy's 4 are twisted and monstrous versions of the original animatronics from the previous games. They are the manifestations of the boy's fear and trauma, and they include:</p>
89
- <ul>
90
- <li>Nightmare Freddy: A dark brown bear with three smaller Freddles on his body. He can appear from the bed or the right hall.</li>
91
- <li>Nightmare Bonnie: A dark blue rabbit with sharp teeth and claws. He can appear from the left hall or the closet.</li>
92
- <li>Nightmare Chica: A dark yellow chicken with a cupcake on a plate. She can appear from the right hall or the closet.</li>
93
- <li>Nightmare Foxy: A dark red fox with a hook and an eye patch. He can appear from the closet or the left hall.</li>
94
- <li>Nightmare Fredbear: A golden bear with purple hat and bow tie. He can appear from any direction and replaces all other animatronics on Night 5 and 6.</li>
95
- <li>Nightmare: A black and transparent version of Nightmare Fredbear with white eyes and teeth. He can appear from any direction and replaces Nightmare Fredbear on Night 7 and 8.</li>
96
- <li>Plushtrap: A small green rabbit with a spring-loaded mechanism. He can appear in a separate minigame called Fun with Plushtrap, where the player must stop him on an X mark using a flashlight.</li>
97
- <li>Nightmarionne: A black and white puppet with long arms and legs. He can appear in the Halloween Edition of the game, where he replaces Plushtrap.</li>
98
- <li>Nightmare Mangle: A mangled version of Foxy with multiple heads and limbs. He can appear in the Halloween Edition of the game, where he replaces Nightmare Foxy.</li>
99
- <li>Jack-O-Bonnie: A dark orange rabbit with a jack-o-lantern head. He can appear in the Halloween Edition of the game, where he replaces Nightmare Bonnie.</li>
100
- <li>Jack-O-Chica: A dark orange chicken with a jack-o-lantern head and a pumpkin on a plate. She can appear in the Halloween Edition of the game, where she replaces Nightmare Chica.</li>
101
- </ul>
102
- <h3>What are the secrets and easter eggs in Five Nights at Freddy's 4?</h3>
103
- <p>Five Nights at Freddy's 4 is a game that is full of secrets and easter eggs that add to its lore and mystery. Some of the secrets and easter eggs in Five Nights at Freddy's 4 are:</p>
104
- <ul>
105
- <li>The clock ending: If the player collects four keys hidden in various minigames, they can unlock a secret ending where they play as Fredbear plushie and guide the crying child to a locked box that contains "the pieces put together". However, the box never opens, leaving its contents unknown.</li>
106
- <li>The newspaper clippings: If the player looks closely at some of the newspapers on the walls of the minigames, they can see some references to previous games, such as "Fredbear's Family Diner to close after tragedy", "Local pizzeria threatened with shutdown over sanitation", and "Local pizzeria said to close by year's end".</li>
107
- <li>The IV drip, flowers, and pills: If the player looks closely at some of the objects in the bedroom, they can see an IV drip, flowers, and pills that appear and disappear randomly. These objects imply that the child is in a coma and is being treated in a hospital.</li>
108
- <li>The phone call: If the player listens carefully to the background noise of Night 1, they can hear a distorted version of the phone call from Five Nights at Freddy's 1, where Phone Guy mentions the Bite of '87. This suggests that the game is connected to the first game and that the Bite of '83 and the Bite of '87 are two separate incidents.</li>
109
- <li>The purple guy: If the player completes the Night 3 minigame, they can see a brief glimpse of a man in a purple uniform putting a Spring Bonnie suit on an employee. This man is implied to be William Afton, the main antagonist of the series and the killer of the children who possess the animatronics.</li>
110
- </ul>
111
- <h3>Is Five Nights at Freddy's 4 the last game in the series?</h3>
112
- <p>No, Five Nights at Freddy's 4 is not the last game in the series. Although it was originally intended to be the final chapter of the original story, Scott Cawthon later announced that he would continue to make more games in the series, as well as spin-offs, novels, and movies. Some of the games that have been released after Five Nights at Freddy's 4 are:</p>
113
- <ul>
114
- <li>Five Nights at Freddy's: Sister Location: A game that takes place in a sister location of Freddy Fazbear's Pizza, where the player must survive against new animatronics called Circus Baby, Ballora, Funtime Freddy, and Funtime Foxy.</li>
115
- <li>Freddy Fazbear's Pizzeria Simulator: A game that combines a tycoon simulator and a survival horror game, where the player must manage their own pizzeria and deal with salvaged animatronics that try to kill them.</li>
116
- <li>Ultimate Custom Night: A game that features 50 selectable animatronics from previous games, where the player can customize their difficulty and challenge themselves to survive as long as possible.</li>
117
- <li>Five Nights at Freddy's VR: Help Wanted: A game that features virtual reality versions of classic and original minigames set in the Five Nights at Freddy's universe.</li>
118
- <li>Five Nights at Freddy's AR: Special Delivery: A game that uses augmented reality to bring animatronics to life in the real world, where the player must collect, repair, and fight them.</li>
119
- <li>Five Nights at Freddy's: Security Breach: A game that is set to be released in late 2021, where the player will explore a new location called Freddy Fazbear's Mega Pizza Plex, and face new animatronics such as Glamrock Freddy, Glamrock Chica, Montgomery Gator, Roxanne Wolf, and Vanny.</li>
120
- </ul>
121
- <h3>Where can I find more information about Five Nights at Freddy's 4?</h3>
122
- <p>If you want to find more information about Five Nights at Freddy's 4, you can visit some of these websites:</p>
123
- <table>
124
- <tr><th>Website</th><th>Description</th></tr>
125
- <tr><td>[Five Nights at Freddy's Wiki]</td><td>A comprehensive wiki that contains information about the characters, locations, gameplay, lore, and secrets of Five Nights at Freddy's 4 and other games in the series.</td></tr>
126
- <tr><td>[Scott Games]</td><td>The official website of Scott Cawthon, the creator of Five Nights at Freddy's 4 and other games in the series. The website features teasers, updates, and announcements about his projects.</td></tr>
127
- <tr><td>[Steam]</td><td>The official store page of Five Nights at Freddy's 4 on Steam, where you can buy the game, read reviews, and join discussions.</td></tr>
128
- <tr><td>[YouTube]</td><td>A popular video-sharing platform where you can watch gameplay videos, trailers, theories, and reactions of Five Nights at Freddy's 4 and other games in the series.</td></tr>
129
- <tr><td>[Reddit]</td><td>A popular online community where you can join subreddits, such as r/fivenightsatfreddys, r/fnaf, and r/fnaf4, to share your thoughts, opinions, fan art, memes, and questions about Five Nights at Freddy's 4 and other games in the series.</td></tr>
130
- </table></p> 197e85843d<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/user-menu.tsx DELETED
@@ -1,113 +0,0 @@
1
- 'use client'
2
-
3
- import { useEffect, useState } from 'react'
4
- import Image from 'next/image'
5
- import { toast } from 'react-hot-toast'
6
- import { Button } from '@/components/ui/button'
7
- import pkg from '../../package.json'
8
- import {
9
- DropdownMenu,
10
- DropdownMenuContent,
11
- DropdownMenuItem,
12
- DropdownMenuSeparator,
13
- DropdownMenuTrigger
14
- } from '@/components/ui/dropdown-menu'
15
- import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons'
16
- import SettingIcon from '@/assets/images/settings.svg'
17
- import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
18
-
19
- export function UserMenu() {
20
- const [host, setHost] = useState('')
21
- const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
22
- useEffect(() => {
23
- setHost(location.host)
24
- }, [])
25
-
26
- useEffect(() => {
27
- if (isCopied) {
28
- toast.success('复制成功')
29
- }
30
- }, [isCopied])
31
- return (
32
- <div className="flex items-center justify-between">
33
- <DropdownMenu>
34
- <DropdownMenuTrigger asChild>
35
- <Button className="pl-0">
36
- <div className="flex items-center justify-center text-xs font-medium uppercase rounded-full select-none h-7 w-7 shrink-0 bg-muted/50 text-muted-foreground">
37
- <Image alt="settings" src={SettingIcon} width={20} />
38
- </div>
39
- <span className="ml-2">设置</span>
40
- </Button>
41
- </DropdownMenuTrigger>
42
- <DropdownMenuContent sideOffset={8} align="start" className="w-[180px] bg-background">
43
- <DropdownMenuItem
44
- onClick={() =>
45
- location.href='#dialog="settings"'
46
- }
47
- className="cursor-pointer"
48
- >
49
- 设置用户
50
- </DropdownMenuItem>
51
- <DropdownMenuSeparator />
52
- <DropdownMenuItem
53
- onClick={() =>
54
- location.href='#dialog="voice"'
55
- }
56
- className="cursor-pointer"
57
- >
58
- 语音设置
59
- </DropdownMenuItem>
60
- <DropdownMenuSeparator />
61
- <DropdownMenuItem asChild>
62
- <a
63
- href="https://github.com/weaigc/bingo/"
64
- target="_blank"
65
- rel="noopener noreferrer"
66
- className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
67
- >
68
- 开源地址
69
- <IconGitHub />
70
- <IconExternalLink className="w-3 h-3 ml-auto" />
71
- </a>
72
- </DropdownMenuItem>
73
- <DropdownMenuSeparator />
74
- <DropdownMenuItem asChild>
75
- <a
76
- href="https://huggingface.co/spaces/hf4all/bingo"
77
- target="_blank"
78
- rel="noopener noreferrer"
79
- className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
80
- >
81
- 托管地址
82
- 🤗
83
- <IconExternalLink className="w-3 h-3 ml-auto" />
84
- </a>
85
- </DropdownMenuItem>
86
- <DropdownMenuSeparator />
87
- <DropdownMenuItem asChild>
88
- <a
89
- href="https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic"
90
- target="_blank"
91
- rel="noopener noreferrer"
92
- className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
93
- >
94
- 复制站点
95
- <IconExternalLink className="w-3 h-3 ml-auto" />
96
- </a>
97
- </DropdownMenuItem>
98
- <DropdownMenuSeparator />
99
- <DropdownMenuItem className="flex-col items-start">
100
- <div className="font-medium">版本信息 {pkg.version}</div>
101
- </DropdownMenuItem>
102
- <DropdownMenuSeparator />
103
- <DropdownMenuItem className="flex-col items-start">
104
- <div className="font-medium">站点域名</div>
105
- <div onClick={() => copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer">
106
- {host} <IconCopy />
107
- </div>
108
- </DropdownMenuItem>
109
- </DropdownMenuContent>
110
- </DropdownMenu>
111
- </div>
112
- )
113
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/voicevox_engine/__init__.py DELETED
@@ -1 +0,0 @@
1
- __version__ = "latest"
 
 
spaces/AIFILMS/ControlNet-Video/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: ControlNet-Video
3
- emoji: 🕹
4
- colorFrom: pink
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.18.0
8
- python_version: 3.10.9
9
- app_file: app.py
10
- pinned: false
11
- duplicated_from: fffiloni/ControlNet-Video
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/__init__.py DELETED
File without changes
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/conformer/layers.py DELETED
@@ -1,260 +0,0 @@
1
- from torch import nn
2
- import torch
3
-
4
- from text_to_speech.modules.commons.layers import LayerNorm
5
-
6
-
7
- class ConvolutionModule(nn.Module):
8
- """ConvolutionModule in Conformer model.
9
- Args:
10
- channels (int): The number of channels of conv layers.
11
- kernel_size (int): Kernerl size of conv layers.
12
- """
13
-
14
- def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
15
- """Construct an ConvolutionModule object."""
16
- super(ConvolutionModule, self).__init__()
17
- # kernerl_size should be a odd number for 'SAME' padding
18
- assert (kernel_size - 1) % 2 == 0
19
-
20
- self.pointwise_conv1 = nn.Conv1d(
21
- channels,
22
- 2 * channels,
23
- kernel_size=1,
24
- stride=1,
25
- padding=0,
26
- bias=bias,
27
- )
28
- self.depthwise_conv = nn.Conv1d(
29
- channels,
30
- channels,
31
- kernel_size,
32
- stride=1,
33
- padding=(kernel_size - 1) // 2,
34
- groups=channels,
35
- bias=bias,
36
- )
37
- self.norm = nn.BatchNorm1d(channels)
38
- self.pointwise_conv2 = nn.Conv1d(
39
- channels,
40
- channels,
41
- kernel_size=1,
42
- stride=1,
43
- padding=0,
44
- bias=bias,
45
- )
46
- self.activation = activation
47
-
48
- def forward(self, x):
49
- """Compute convolution module.
50
- Args:
51
- x (torch.Tensor): Input tensor (#batch, time, channels).
52
- Returns:
53
- torch.Tensor: Output tensor (#batch, time, channels).
54
- """
55
- # exchange the temporal dimension and the feature dimension
56
- x = x.transpose(1, 2)
57
-
58
- # GLU mechanism
59
- x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
60
- x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
61
-
62
- # 1D Depthwise Conv
63
- x = self.depthwise_conv(x)
64
- x = self.activation(self.norm(x))
65
-
66
- x = self.pointwise_conv2(x)
67
-
68
- return x.transpose(1, 2)
69
-
70
-
71
- class MultiLayeredConv1d(torch.nn.Module):
72
- """Multi-layered conv1d for Transformer block.
73
- This is a module of multi-leyered conv1d designed
74
- to replace positionwise feed-forward network
75
- in Transforner block, which is introduced in
76
- `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
77
- .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
78
- https://arxiv.org/pdf/1905.09263.pdf
79
- """
80
-
81
- def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
82
- """Initialize MultiLayeredConv1d module.
83
- Args:
84
- in_chans (int): Number of input channels.
85
- hidden_chans (int): Number of hidden channels.
86
- kernel_size (int): Kernel size of conv1d.
87
- dropout_rate (float): Dropout rate.
88
- """
89
- super(MultiLayeredConv1d, self).__init__()
90
- self.w_1 = torch.nn.Conv1d(
91
- in_chans,
92
- hidden_chans,
93
- kernel_size,
94
- stride=1,
95
- padding=(kernel_size - 1) // 2,
96
- )
97
- self.w_2 = torch.nn.Conv1d(
98
- hidden_chans,
99
- in_chans,
100
- kernel_size,
101
- stride=1,
102
- padding=(kernel_size - 1) // 2,
103
- )
104
- self.dropout = torch.nn.Dropout(dropout_rate)
105
-
106
- def forward(self, x):
107
- """Calculate forward propagation.
108
- Args:
109
- x (torch.Tensor): Batch of input tensors (B, T, in_chans).
110
- Returns:
111
- torch.Tensor: Batch of output tensors (B, T, hidden_chans).
112
- """
113
- x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
114
- return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
115
-
116
-
117
- class Swish(torch.nn.Module):
118
- """Construct an Swish object."""
119
-
120
- def forward(self, x):
121
- """Return Swich activation function."""
122
- return x * torch.sigmoid(x)
123
-
124
-
125
- class EncoderLayer(nn.Module):
126
- """Encoder layer module.
127
- Args:
128
- size (int): Input dimension.
129
- self_attn (torch.nn.Module): Self-attention module instance.
130
- `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
131
- can be used as the argument.
132
- feed_forward (torch.nn.Module): Feed-forward module instance.
133
- `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
134
- can be used as the argument.
135
- feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.
136
- `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
137
- can be used as the argument.
138
- conv_module (torch.nn.Module): Convolution module instance.
139
- `ConvlutionModule` instance can be used as the argument.
140
- dropout_rate (float): Dropout rate.
141
- normalize_before (bool): Whether to use layer_norm before the first block.
142
- concat_after (bool): Whether to concat attention layer's input and output.
143
- if True, additional linear will be applied.
144
- i.e. x -> x + linear(concat(x, att(x)))
145
- if False, no additional linear will be applied. i.e. x -> x + att(x)
146
- """
147
-
148
- def __init__(
149
- self,
150
- size,
151
- self_attn,
152
- feed_forward,
153
- feed_forward_macaron,
154
- conv_module,
155
- dropout_rate,
156
- normalize_before=True,
157
- concat_after=False,
158
- ):
159
- """Construct an EncoderLayer object."""
160
- super(EncoderLayer, self).__init__()
161
- self.self_attn = self_attn
162
- self.feed_forward = feed_forward
163
- self.feed_forward_macaron = feed_forward_macaron
164
- self.conv_module = conv_module
165
- self.norm_ff = LayerNorm(size) # for the FNN module
166
- self.norm_mha = LayerNorm(size) # for the MHA module
167
- if feed_forward_macaron is not None:
168
- self.norm_ff_macaron = LayerNorm(size)
169
- self.ff_scale = 0.5
170
- else:
171
- self.ff_scale = 1.0
172
- if self.conv_module is not None:
173
- self.norm_conv = LayerNorm(size) # for the CNN module
174
- self.norm_final = LayerNorm(size) # for the final output of the block
175
- self.dropout = nn.Dropout(dropout_rate)
176
- self.size = size
177
- self.normalize_before = normalize_before
178
- self.concat_after = concat_after
179
- if self.concat_after:
180
- self.concat_linear = nn.Linear(size + size, size)
181
-
182
- def forward(self, x_input, mask, cache=None):
183
- """Compute encoded features.
184
- Args:
185
- x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.
186
- - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].
187
- - w/o pos emb: Tensor (#batch, time, size).
188
- mask (torch.Tensor): Mask tensor for the input (#batch, time).
189
- cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
190
- Returns:
191
- torch.Tensor: Output tensor (#batch, time, size).
192
- torch.Tensor: Mask tensor (#batch, time).
193
- """
194
- if isinstance(x_input, tuple):
195
- x, pos_emb = x_input[0], x_input[1]
196
- else:
197
- x, pos_emb = x_input, None
198
-
199
- # whether to use macaron style
200
- if self.feed_forward_macaron is not None:
201
- residual = x
202
- if self.normalize_before:
203
- x = self.norm_ff_macaron(x)
204
- x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))
205
- if not self.normalize_before:
206
- x = self.norm_ff_macaron(x)
207
-
208
- # multi-headed self-attention module
209
- residual = x
210
- if self.normalize_before:
211
- x = self.norm_mha(x)
212
-
213
- if cache is None:
214
- x_q = x
215
- else:
216
- assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
217
- x_q = x[:, -1:, :]
218
- residual = residual[:, -1:, :]
219
- mask = None if mask is None else mask[:, -1:, :]
220
-
221
- if pos_emb is not None:
222
- x_att = self.self_attn(x_q, x, x, pos_emb, mask)
223
- else:
224
- x_att = self.self_attn(x_q, x, x, mask)
225
-
226
- if self.concat_after:
227
- x_concat = torch.cat((x, x_att), dim=-1)
228
- x = residual + self.concat_linear(x_concat)
229
- else:
230
- x = residual + self.dropout(x_att)
231
- if not self.normalize_before:
232
- x = self.norm_mha(x)
233
-
234
- # convolution module
235
- if self.conv_module is not None:
236
- residual = x
237
- if self.normalize_before:
238
- x = self.norm_conv(x)
239
- x = residual + self.dropout(self.conv_module(x))
240
- if not self.normalize_before:
241
- x = self.norm_conv(x)
242
-
243
- # feed forward module
244
- residual = x
245
- if self.normalize_before:
246
- x = self.norm_ff(x)
247
- x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
248
- if not self.normalize_before:
249
- x = self.norm_ff(x)
250
-
251
- if self.conv_module is not None:
252
- x = self.norm_final(x)
253
-
254
- if cache is not None:
255
- x = torch.cat([cache, x], dim=1)
256
-
257
- if pos_emb is not None:
258
- return (x, pos_emb), mask
259
-
260
- return x, mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/updater/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- from agentverse.registry import Registry
2
-
3
- updater_registry = Registry(name="UpdaterRegistry")
4
-
5
- from .base import BaseUpdater
6
- from .basic import BasicUpdater
7
- from .classroom import ClassroomUpdater
8
- from .sde_team import SdeTeamUpdater
9
- from .pokemon import PokemonUpdater
 
 
 
 
 
 
 
 
 
 
spaces/Ahmadjaved/Genaispeech/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Genaispeech
3
- emoji: 😻
4
- colorFrom: red
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/training/trainers/base.py DELETED
@@ -1,291 +0,0 @@
1
- import copy
2
- import logging
3
- from typing import Dict, Tuple
4
-
5
- import pandas as pd
6
- import pytorch_lightning as ptl
7
- import torch
8
- import torch.nn as nn
9
- import torch.nn.functional as F
10
- from torch.utils.data import DistributedSampler
11
-
12
- from saicinpainting.evaluation import make_evaluator
13
- from saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader
14
- from saicinpainting.training.losses.adversarial import make_discrim_loss
15
- from saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL
16
- from saicinpainting.training.modules import make_generator, make_discriminator
17
- from saicinpainting.training.visualizers import make_visualizer
18
- from saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \
19
- get_has_ddp_rank
20
-
21
- LOGGER = logging.getLogger(__name__)
22
-
23
-
24
- def make_optimizer(parameters, kind='adamw', **kwargs):
25
- if kind == 'adam':
26
- optimizer_class = torch.optim.Adam
27
- elif kind == 'adamw':
28
- optimizer_class = torch.optim.AdamW
29
- else:
30
- raise ValueError(f'Unknown optimizer kind {kind}')
31
- return optimizer_class(parameters, **kwargs)
32
-
33
-
34
- def update_running_average(result: nn.Module, new_iterate_model: nn.Module, decay=0.999):
35
- with torch.no_grad():
36
- res_params = dict(result.named_parameters())
37
- new_params = dict(new_iterate_model.named_parameters())
38
-
39
- for k in res_params.keys():
40
- res_params[k].data.mul_(decay).add_(new_params[k].data, alpha=1 - decay)
41
-
42
-
43
- def make_multiscale_noise(base_tensor, scales=6, scale_mode='bilinear'):
44
- batch_size, _, height, width = base_tensor.shape
45
- cur_height, cur_width = height, width
46
- result = []
47
- align_corners = False if scale_mode in ('bilinear', 'bicubic') else None
48
- for _ in range(scales):
49
- cur_sample = torch.randn(batch_size, 1, cur_height, cur_width, device=base_tensor.device)
50
- cur_sample_scaled = F.interpolate(cur_sample, size=(height, width), mode=scale_mode, align_corners=align_corners)
51
- result.append(cur_sample_scaled)
52
- cur_height //= 2
53
- cur_width //= 2
54
- return torch.cat(result, dim=1)
55
-
56
-
57
- class BaseInpaintingTrainingModule(ptl.LightningModule):
58
- def __init__(self, config, use_ddp, *args, predict_only=False, visualize_each_iters=100,
59
- average_generator=False, generator_avg_beta=0.999, average_generator_start_step=30000,
60
- average_generator_period=10, store_discr_outputs_for_vis=False,
61
- **kwargs):
62
- super().__init__(*args, **kwargs)
63
- LOGGER.info('BaseInpaintingTrainingModule init called')
64
-
65
- self.config = config
66
-
67
- self.generator = make_generator(config, **self.config.generator)
68
- self.use_ddp = use_ddp
69
-
70
- if not get_has_ddp_rank():
71
- LOGGER.info(f'Generator\n{self.generator}')
72
-
73
- if not predict_only:
74
- self.save_hyperparameters(self.config)
75
- self.discriminator = make_discriminator(**self.config.discriminator)
76
- self.adversarial_loss = make_discrim_loss(**self.config.losses.adversarial)
77
- self.visualizer = make_visualizer(**self.config.visualizer)
78
- self.val_evaluator = make_evaluator(**self.config.evaluator)
79
- self.test_evaluator = make_evaluator(**self.config.evaluator)
80
-
81
- if not get_has_ddp_rank():
82
- LOGGER.info(f'Discriminator\n{self.discriminator}')
83
-
84
- extra_val = self.config.data.get('extra_val', ())
85
- if extra_val:
86
- self.extra_val_titles = list(extra_val)
87
- self.extra_evaluators = nn.ModuleDict({k: make_evaluator(**self.config.evaluator)
88
- for k in extra_val})
89
- else:
90
- self.extra_evaluators = {}
91
-
92
- self.average_generator = average_generator
93
- self.generator_avg_beta = generator_avg_beta
94
- self.average_generator_start_step = average_generator_start_step
95
- self.average_generator_period = average_generator_period
96
- self.generator_average = None
97
- self.last_generator_averaging_step = -1
98
- self.store_discr_outputs_for_vis = store_discr_outputs_for_vis
99
-
100
- if self.config.losses.get("l1", {"weight_known": 0})['weight_known'] > 0:
101
- self.loss_l1 = nn.L1Loss(reduction='none')
102
-
103
- if self.config.losses.get("mse", {"weight": 0})['weight'] > 0:
104
- self.loss_mse = nn.MSELoss(reduction='none')
105
-
106
- if self.config.losses.perceptual.weight > 0:
107
- self.loss_pl = PerceptualLoss()
108
-
109
- if self.config.losses.get("resnet_pl", {"weight": 0})['weight'] > 0:
110
- self.loss_resnet_pl = ResNetPL(**self.config.losses.resnet_pl)
111
- else:
112
- self.loss_resnet_pl = None
113
-
114
- self.visualize_each_iters = visualize_each_iters
115
- LOGGER.info('BaseInpaintingTrainingModule init done')
116
-
117
- def configure_optimizers(self):
118
- discriminator_params = list(self.discriminator.parameters())
119
- return [
120
- dict(optimizer=make_optimizer(self.generator.parameters(), **self.config.optimizers.generator)),
121
- dict(optimizer=make_optimizer(discriminator_params, **self.config.optimizers.discriminator)),
122
- ]
123
-
124
- def train_dataloader(self):
125
- kwargs = dict(self.config.data.train)
126
- if self.use_ddp:
127
- kwargs['ddp_kwargs'] = dict(num_replicas=self.trainer.num_nodes * self.trainer.num_processes,
128
- rank=self.trainer.global_rank,
129
- shuffle=True)
130
- dataloader = make_default_train_dataloader(**self.config.data.train)
131
- return dataloader
132
-
133
- def val_dataloader(self):
134
- res = [make_default_val_dataloader(**self.config.data.val)]
135
-
136
- if self.config.data.visual_test is not None:
137
- res = res + [make_default_val_dataloader(**self.config.data.visual_test)]
138
- else:
139
- res = res + res
140
-
141
- extra_val = self.config.data.get('extra_val', ())
142
- if extra_val:
143
- res += [make_default_val_dataloader(**extra_val[k]) for k in self.extra_val_titles]
144
-
145
- return res
146
-
147
- def training_step(self, batch, batch_idx, optimizer_idx=None):
148
- self._is_training_step = True
149
- return self._do_step(batch, batch_idx, mode='train', optimizer_idx=optimizer_idx)
150
-
151
- def validation_step(self, batch, batch_idx, dataloader_idx):
152
- extra_val_key = None
153
- if dataloader_idx == 0:
154
- mode = 'val'
155
- elif dataloader_idx == 1:
156
- mode = 'test'
157
- else:
158
- mode = 'extra_val'
159
- extra_val_key = self.extra_val_titles[dataloader_idx - 2]
160
- self._is_training_step = False
161
- return self._do_step(batch, batch_idx, mode=mode, extra_val_key=extra_val_key)
162
-
163
- def training_step_end(self, batch_parts_outputs):
164
- if self.training and self.average_generator \
165
- and self.global_step >= self.average_generator_start_step \
166
- and self.global_step >= self.last_generator_averaging_step + self.average_generator_period:
167
- if self.generator_average is None:
168
- self.generator_average = copy.deepcopy(self.generator)
169
- else:
170
- update_running_average(self.generator_average, self.generator, decay=self.generator_avg_beta)
171
- self.last_generator_averaging_step = self.global_step
172
-
173
- full_loss = (batch_parts_outputs['loss'].mean()
174
- if torch.is_tensor(batch_parts_outputs['loss']) # loss is not tensor when no discriminator used
175
- else torch.tensor(batch_parts_outputs['loss']).float().requires_grad_(True))
176
- log_info = {k: v.mean() for k, v in batch_parts_outputs['log_info'].items()}
177
- self.log_dict(log_info, on_step=True, on_epoch=False)
178
- return full_loss
179
-
180
- def validation_epoch_end(self, outputs):
181
- outputs = [step_out for out_group in outputs for step_out in out_group]
182
- averaged_logs = average_dicts(step_out['log_info'] for step_out in outputs)
183
- self.log_dict({k: v.mean() for k, v in averaged_logs.items()})
184
-
185
- pd.set_option('display.max_columns', 500)
186
- pd.set_option('display.width', 1000)
187
-
188
- # standard validation
189
- val_evaluator_states = [s['val_evaluator_state'] for s in outputs if 'val_evaluator_state' in s]
190
- val_evaluator_res = self.val_evaluator.evaluation_end(states=val_evaluator_states)
191
- val_evaluator_res_df = pd.DataFrame(val_evaluator_res).stack(1).unstack(0)
192
- val_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
193
- LOGGER.info(f'Validation metrics after epoch #{self.current_epoch}, '
194
- f'total {self.global_step} iterations:\n{val_evaluator_res_df}')
195
-
196
- for k, v in flatten_dict(val_evaluator_res).items():
197
- self.log(f'val_{k}', v)
198
-
199
- # standard visual test
200
- test_evaluator_states = [s['test_evaluator_state'] for s in outputs
201
- if 'test_evaluator_state' in s]
202
- test_evaluator_res = self.test_evaluator.evaluation_end(states=test_evaluator_states)
203
- test_evaluator_res_df = pd.DataFrame(test_evaluator_res).stack(1).unstack(0)
204
- test_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
205
- LOGGER.info(f'Test metrics after epoch #{self.current_epoch}, '
206
- f'total {self.global_step} iterations:\n{test_evaluator_res_df}')
207
-
208
- for k, v in flatten_dict(test_evaluator_res).items():
209
- self.log(f'test_{k}', v)
210
-
211
- # extra validations
212
- if self.extra_evaluators:
213
- for cur_eval_title, cur_evaluator in self.extra_evaluators.items():
214
- cur_state_key = f'extra_val_{cur_eval_title}_evaluator_state'
215
- cur_states = [s[cur_state_key] for s in outputs if cur_state_key in s]
216
- cur_evaluator_res = cur_evaluator.evaluation_end(states=cur_states)
217
- cur_evaluator_res_df = pd.DataFrame(cur_evaluator_res).stack(1).unstack(0)
218
- cur_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
219
- LOGGER.info(f'Extra val {cur_eval_title} metrics after epoch #{self.current_epoch}, '
220
- f'total {self.global_step} iterations:\n{cur_evaluator_res_df}')
221
- for k, v in flatten_dict(cur_evaluator_res).items():
222
- self.log(f'extra_val_{cur_eval_title}_{k}', v)
223
-
224
- def _do_step(self, batch, batch_idx, mode='train', optimizer_idx=None, extra_val_key=None):
225
- if optimizer_idx == 0: # step for generator
226
- set_requires_grad(self.generator, True)
227
- set_requires_grad(self.discriminator, False)
228
- elif optimizer_idx == 1: # step for discriminator
229
- set_requires_grad(self.generator, False)
230
- set_requires_grad(self.discriminator, True)
231
-
232
- batch = self(batch)
233
-
234
- total_loss = 0
235
- metrics = {}
236
-
237
- if optimizer_idx is None or optimizer_idx == 0: # step for generator
238
- total_loss, metrics = self.generator_loss(batch)
239
-
240
- elif optimizer_idx is None or optimizer_idx == 1: # step for discriminator
241
- if self.config.losses.adversarial.weight > 0:
242
- total_loss, metrics = self.discriminator_loss(batch)
243
-
244
- if self.get_ddp_rank() in (None, 0) and (batch_idx % self.visualize_each_iters == 0 or mode == 'test'):
245
- if self.config.losses.adversarial.weight > 0:
246
- if self.store_discr_outputs_for_vis:
247
- with torch.no_grad():
248
- self.store_discr_outputs(batch)
249
- vis_suffix = f'_{mode}'
250
- if mode == 'extra_val':
251
- vis_suffix += f'_{extra_val_key}'
252
- self.visualizer(self.current_epoch, batch_idx, batch, suffix=vis_suffix)
253
-
254
- metrics_prefix = f'{mode}_'
255
- if mode == 'extra_val':
256
- metrics_prefix += f'{extra_val_key}_'
257
- result = dict(loss=total_loss, log_info=add_prefix_to_keys(metrics, metrics_prefix))
258
- if mode == 'val':
259
- result['val_evaluator_state'] = self.val_evaluator.process_batch(batch)
260
- elif mode == 'test':
261
- result['test_evaluator_state'] = self.test_evaluator.process_batch(batch)
262
- elif mode == 'extra_val':
263
- result[f'extra_val_{extra_val_key}_evaluator_state'] = self.extra_evaluators[extra_val_key].process_batch(batch)
264
-
265
- return result
266
-
267
- def get_current_generator(self, no_average=False):
268
- if not no_average and not self.training and self.average_generator and self.generator_average is not None:
269
- return self.generator_average
270
- return self.generator
271
-
272
- def forward(self, batch: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
273
- """Pass data through generator and obtain at leas 'predicted_image' and 'inpainted' keys"""
274
- raise NotImplementedError()
275
-
276
- def generator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
277
- raise NotImplementedError()
278
-
279
- def discriminator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
280
- raise NotImplementedError()
281
-
282
- def store_discr_outputs(self, batch):
283
- out_size = batch['image'].shape[2:]
284
- discr_real_out, _ = self.discriminator(batch['image'])
285
- discr_fake_out, _ = self.discriminator(batch['predicted_image'])
286
- batch['discr_output_real'] = F.interpolate(discr_real_out, size=out_size, mode='nearest')
287
- batch['discr_output_fake'] = F.interpolate(discr_fake_out, size=out_size, mode='nearest')
288
- batch['discr_output_diff'] = batch['discr_output_real'] - batch['discr_output_fake']
289
-
290
- def get_ddp_rank(self):
291
- return self.trainer.global_rank if (self.trainer.num_nodes * self.trainer.num_processes) > 1 else None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- from .base_bbox_coder import BaseBBoxCoder
2
- from .bucketing_bbox_coder import BucketingBBoxCoder
3
- from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
4
- from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
5
- from .pseudo_bbox_coder import PseudoBBoxCoder
6
- from .tblr_bbox_coder import TBLRBBoxCoder
7
- from .yolo_bbox_coder import YOLOBBoxCoder
8
-
9
- __all__ = [
10
- 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
11
- 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
12
- 'BucketingBBoxCoder'
13
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py DELETED
@@ -1,172 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import ConvModule, normal_init, xavier_init
3
-
4
- from mmdet.models.backbones.resnet import Bottleneck
5
- from mmdet.models.builder import HEADS
6
- from .bbox_head import BBoxHead
7
-
8
-
9
- class BasicResBlock(nn.Module):
10
- """Basic residual block.
11
-
12
- This block is a little different from the block in the ResNet backbone.
13
- The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
14
-
15
- Args:
16
- in_channels (int): Channels of the input feature map.
17
- out_channels (int): Channels of the output feature map.
18
- conv_cfg (dict): The config dict for convolution layers.
19
- norm_cfg (dict): The config dict for normalization layers.
20
- """
21
-
22
- def __init__(self,
23
- in_channels,
24
- out_channels,
25
- conv_cfg=None,
26
- norm_cfg=dict(type='BN')):
27
- super(BasicResBlock, self).__init__()
28
-
29
- # main path
30
- self.conv1 = ConvModule(
31
- in_channels,
32
- in_channels,
33
- kernel_size=3,
34
- padding=1,
35
- bias=False,
36
- conv_cfg=conv_cfg,
37
- norm_cfg=norm_cfg)
38
- self.conv2 = ConvModule(
39
- in_channels,
40
- out_channels,
41
- kernel_size=1,
42
- bias=False,
43
- conv_cfg=conv_cfg,
44
- norm_cfg=norm_cfg,
45
- act_cfg=None)
46
-
47
- # identity path
48
- self.conv_identity = ConvModule(
49
- in_channels,
50
- out_channels,
51
- kernel_size=1,
52
- conv_cfg=conv_cfg,
53
- norm_cfg=norm_cfg,
54
- act_cfg=None)
55
-
56
- self.relu = nn.ReLU(inplace=True)
57
-
58
- def forward(self, x):
59
- identity = x
60
-
61
- x = self.conv1(x)
62
- x = self.conv2(x)
63
-
64
- identity = self.conv_identity(identity)
65
- out = x + identity
66
-
67
- out = self.relu(out)
68
- return out
69
-
70
-
71
- @HEADS.register_module()
72
- class DoubleConvFCBBoxHead(BBoxHead):
73
- r"""Bbox head used in Double-Head R-CNN
74
-
75
- .. code-block:: none
76
-
77
- /-> cls
78
- /-> shared convs ->
79
- \-> reg
80
- roi features
81
- /-> cls
82
- \-> shared fc ->
83
- \-> reg
84
- """ # noqa: W605
85
-
86
- def __init__(self,
87
- num_convs=0,
88
- num_fcs=0,
89
- conv_out_channels=1024,
90
- fc_out_channels=1024,
91
- conv_cfg=None,
92
- norm_cfg=dict(type='BN'),
93
- **kwargs):
94
- kwargs.setdefault('with_avg_pool', True)
95
- super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
96
- assert self.with_avg_pool
97
- assert num_convs > 0
98
- assert num_fcs > 0
99
- self.num_convs = num_convs
100
- self.num_fcs = num_fcs
101
- self.conv_out_channels = conv_out_channels
102
- self.fc_out_channels = fc_out_channels
103
- self.conv_cfg = conv_cfg
104
- self.norm_cfg = norm_cfg
105
-
106
- # increase the channel of input features
107
- self.res_block = BasicResBlock(self.in_channels,
108
- self.conv_out_channels)
109
-
110
- # add conv heads
111
- self.conv_branch = self._add_conv_branch()
112
- # add fc heads
113
- self.fc_branch = self._add_fc_branch()
114
-
115
- out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
116
- self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
117
-
118
- self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
119
- self.relu = nn.ReLU(inplace=True)
120
-
121
- def _add_conv_branch(self):
122
- """Add the fc branch which consists of a sequential of conv layers."""
123
- branch_convs = nn.ModuleList()
124
- for i in range(self.num_convs):
125
- branch_convs.append(
126
- Bottleneck(
127
- inplanes=self.conv_out_channels,
128
- planes=self.conv_out_channels // 4,
129
- conv_cfg=self.conv_cfg,
130
- norm_cfg=self.norm_cfg))
131
- return branch_convs
132
-
133
- def _add_fc_branch(self):
134
- """Add the fc branch which consists of a sequential of fc layers."""
135
- branch_fcs = nn.ModuleList()
136
- for i in range(self.num_fcs):
137
- fc_in_channels = (
138
- self.in_channels *
139
- self.roi_feat_area if i == 0 else self.fc_out_channels)
140
- branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
141
- return branch_fcs
142
-
143
- def init_weights(self):
144
- # conv layers are already initialized by ConvModule
145
- normal_init(self.fc_cls, std=0.01)
146
- normal_init(self.fc_reg, std=0.001)
147
-
148
- for m in self.fc_branch.modules():
149
- if isinstance(m, nn.Linear):
150
- xavier_init(m, distribution='uniform')
151
-
152
- def forward(self, x_cls, x_reg):
153
- # conv head
154
- x_conv = self.res_block(x_reg)
155
-
156
- for conv in self.conv_branch:
157
- x_conv = conv(x_conv)
158
-
159
- if self.with_avg_pool:
160
- x_conv = self.avg_pool(x_conv)
161
-
162
- x_conv = x_conv.view(x_conv.size(0), -1)
163
- bbox_pred = self.fc_reg(x_conv)
164
-
165
- # fc head
166
- x_fc = x_cls.view(x_cls.size(0), -1)
167
- for fc in self.fc_branch:
168
- x_fc = self.relu(fc(x_fc))
169
-
170
- cls_score = self.fc_cls(x_fc)
171
-
172
- return cls_score, bbox_pred
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/gui/ui_draw.py DELETED
@@ -1,189 +0,0 @@
1
- from PyQt5 import QtGui, QtCore, QtWidgets
2
-
3
-
4
- #######################################################################################################
5
- # painter function
6
- #######################################################################################################
7
- class painter(QtWidgets.QWidget):
8
- """the class for a painter"""
9
- def __init__(self, parent, image=None):
10
- super(painter, self).__init__()
11
- if image is None:
12
- w = h = 256
13
- else:
14
- w, h = image.size().width(), image.size().height()
15
- self.ParentLink = parent
16
- self.setPalette(QtGui.QPalette(QtCore.Qt.white))
17
- self.setAutoFillBackground(True)
18
- self.setMaximumSize(w, h)
19
- self.map = QtGui.QImage(w, h, QtGui.QImage.Format_RGB32)
20
- self.map.fill(QtCore.Qt.black)
21
- self.image = image
22
- self.shape = self.ParentLink.shape
23
- self.CurrentWidth = self.ParentLink.CurrentWidth
24
- self.MouseLoc = point(0, 0)
25
- self.LastPos = point(0, 0)
26
- self.Brush = False
27
- self.DrawingShapes_free = shapes()
28
- self.DrawingShapes_rec = shapes()
29
- self.IsPainting = False
30
- self.IsEraseing = False
31
- self.iteration = 0
32
-
33
- self.CurrentColor = colour3(255, 255, 255)
34
-
35
- self.ShapeNum = 0
36
- self.IsMouseing = False
37
- self.PaintPanel = 0
38
-
39
- def drawLines(self, painter):
40
- """draw free-form masks"""
41
- painter.setRenderHint(QtGui.QPainter.Antialiasing)
42
- for i in range(self.DrawingShapes_free.NumberOfShapes()-1):
43
- T = self.DrawingShapes_free.GetShape(i)
44
- T1 = self.DrawingShapes_free.GetShape(i + 1)
45
-
46
- if T.ShapeNumber == T1.ShapeNumber:
47
- pen = QtGui.QPen(QtGui.QColor(T.Color.R, T.Color.G, T.Color.B), T.Width / 2, QtCore.Qt.SolidLine)
48
- painter.setPen(pen)
49
- painter.drawLine(T.Location.X, T.Location.Y, T1.Location.X, T1.Location.Y)
50
-
51
- def drawRectangle(self, painter):
52
- """draw rectangle mask"""
53
- painter.setRenderHint(QtGui.QPainter.Antialiasing)
54
- for i in range(self.DrawingShapes_rec.NumberOfShapes()-1):
55
- T = self.DrawingShapes_rec.GetShape(i)
56
- T1 = self.DrawingShapes_rec.GetShape(i+1)
57
-
58
- if T.ShapeNumber == T1.ShapeNumber:
59
- pen = QtGui.QPen(QtGui.QColor(T.Color.R, T.Color.G, T.Color.B), T.Width/2, QtCore.Qt.SolidLine)
60
- painter.setPen(pen)
61
- painter.setBrush(QtGui.QColor(T.Color.R, T.Color.G, T.Color.B))
62
- painter.drawRects(QtCore.QRect(QtCore.QPoint(T.Location.X, T.Location.Y), QtCore.QPoint(T1.Location.X, T1.Location.Y)))
63
-
64
- def saveDraw(self):
65
- """save the painted masks"""
66
- painter = QtGui.QPainter()
67
- painter.begin(self.map)
68
- if self.shape == 'line':
69
- self.drawLines(painter)
70
- if self.shape == 'rectangle':
71
- self.drawRectangle(painter)
72
- painter.end()
73
-
74
- def mousePressEvent(self, event):
75
- """mouse down event for the drawing"""
76
- if self.Brush:
77
- self.IsPainting = True
78
- self.ShapeNum += 1
79
- if self.shape == 'rectangle':
80
- self.DrawingShapes_rec.NewShape(point(event.x(), event.y()), self.CurrentWidth, self.CurrentColor, self.ShapeNum)
81
- else:
82
- self.LastPos = point(0, 0)
83
- else:
84
- self.IsEraseing = True
85
- if self.shape == 'rectangle':
86
- self.DrawingShapes_rec.NewShape(point(event.x(), event.y()), self.CurrentWidth, self.CurrentColor, self.ShapeNum)
87
-
88
- def mouseMoveEvent(self, event):
89
- """mouse move event to record the track"""
90
- if self.IsPainting:
91
- self.MouseLoc = point(event.x(), event.y())
92
- if self.LastPos.X != self.MouseLoc.X or self.LastPos.Y != self.MouseLoc.Y:
93
- self.LastPos = point(event.x(), event.y())
94
- if self.shape == 'line':
95
- self.DrawingShapes_free.NewShape(self.LastPos, self.CurrentWidth, self.CurrentColor, self.ShapeNum)
96
- self.repaint()
97
- if self.IsEraseing:
98
- self.MouseLoc = point(event.x(), event.y())
99
- if self.shape == 'line':
100
- self.DrawingShapes_free.RemoveShape(self.MouseLoc, 10)
101
- elif self.shape == 'rectangle':
102
- self.DrawingShapes_rec.RemoveShape(self.MouseLoc, 10)
103
- self.repaint()
104
-
105
- def mouseReleaseEvent(self, event):
106
- """mouse up event"""
107
- if self.IsEraseing:
108
- self.IsEraseing = False
109
- self.repaint()
110
- elif self.shape == 'rectangle':
111
- self.DrawingShapes_rec.NewShape(point(event.x(), event.y()), self.CurrentWidth, self.CurrentColor, self.ShapeNum)
112
- self.repaint()
113
-
114
- def paintEvent(self, event):
115
- painter = QtGui.QPainter()
116
- painter.begin(self)
117
- if self.image != None:
118
- painter.drawImage(0, 0, self.image)
119
- if self.shape == 'line':
120
- self.drawLines(painter)
121
- if self.shape == 'rectangle':
122
- self.drawRectangle(painter)
123
- painter.end()
124
- self.iteration = 0
125
-
126
-
127
- #######################################################################################################
128
- # base drawing function
129
- #######################################################################################################
130
- class colour3:
131
- """define the colour plane for the drawing"""
132
- def __init__(self, nR=0, nG=0, nB=0):
133
- self.R = nR
134
- self.G = nG
135
- self.B = nB
136
-
137
-
138
- class point():
139
- """define the location"""
140
- def __init__(self, nX=0, nY=0):
141
- self.X = nX
142
- self.Y = nY
143
-
144
- def Set(self, nX, nY):
145
- self.X = nX
146
- self.Y = nY
147
-
148
-
149
- class shape():
150
- """define the painter shape"""
151
- def __init__(self, location=point(0,0), width=1, color=colour3(255, 255, 255), number=0):
152
- self.Location = location
153
- self.Width = width
154
- self.Color = color
155
- self.ShapeNumber = number
156
-
157
-
158
- class shapes():
159
- """a set of shape"""
160
- def __init__(self):
161
- self.shapes = []
162
-
163
- def NumberOfShapes(self):
164
- return len(self.shapes)
165
-
166
- def NewShape(self, location=point(0,0), width=1, color=colour3(255,255,255), number=0):
167
- Sh = shape(location, width, color, number)
168
- self.shapes.append(Sh)
169
-
170
- def GetShape(self, Index):
171
- return self.shapes[Index]
172
-
173
- def RemoveShape(self, L, threshold):
174
- i = 0
175
- while True:
176
- if (i == len(self.shapes)):
177
- break
178
- # Finds if a point is within a certain distance of the point to remove.
179
- if ((abs(L.X - self.shapes[i].Location.X) < threshold) and (
180
- abs(L.Y - self.shapes[i].Location.Y) < threshold)):
181
- # removes all data for that number
182
- del self.shapes[i]
183
- # goes through the rest of the data and adds an extra
184
- # 1 to defined them as a seprate shape and shuffles on the effect.
185
- for n in range(len(self.shapes) - i):
186
- self.shapes[n + i].ShapeNumber += 1
187
- # Go back a step so we dont miss a point.
188
- i -= 1
189
- i += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/blocks.py DELETED
@@ -1,342 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from .vit import (
5
- _make_pretrained_vitb_rn50_384,
6
- _make_pretrained_vitl16_384,
7
- _make_pretrained_vitb16_384,
8
- forward_vit,
9
- )
10
-
11
- def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
12
- if backbone == "vitl16_384":
13
- pretrained = _make_pretrained_vitl16_384(
14
- use_pretrained, hooks=hooks, use_readout=use_readout
15
- )
16
- scratch = _make_scratch(
17
- [256, 512, 1024, 1024], features, groups=groups, expand=expand
18
- ) # ViT-L/16 - 85.0% Top1 (backbone)
19
- elif backbone == "vitb_rn50_384":
20
- pretrained = _make_pretrained_vitb_rn50_384(
21
- use_pretrained,
22
- hooks=hooks,
23
- use_vit_only=use_vit_only,
24
- use_readout=use_readout,
25
- )
26
- scratch = _make_scratch(
27
- [256, 512, 768, 768], features, groups=groups, expand=expand
28
- ) # ViT-H/16 - 85.0% Top1 (backbone)
29
- elif backbone == "vitb16_384":
30
- pretrained = _make_pretrained_vitb16_384(
31
- use_pretrained, hooks=hooks, use_readout=use_readout
32
- )
33
- scratch = _make_scratch(
34
- [96, 192, 384, 768], features, groups=groups, expand=expand
35
- ) # ViT-B/16 - 84.6% Top1 (backbone)
36
- elif backbone == "resnext101_wsl":
37
- pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
38
- scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
39
- elif backbone == "efficientnet_lite3":
40
- pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
41
- scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
42
- else:
43
- print(f"Backbone '{backbone}' not implemented")
44
- assert False
45
-
46
- return pretrained, scratch
47
-
48
-
49
- def _make_scratch(in_shape, out_shape, groups=1, expand=False):
50
- scratch = nn.Module()
51
-
52
- out_shape1 = out_shape
53
- out_shape2 = out_shape
54
- out_shape3 = out_shape
55
- out_shape4 = out_shape
56
- if expand==True:
57
- out_shape1 = out_shape
58
- out_shape2 = out_shape*2
59
- out_shape3 = out_shape*4
60
- out_shape4 = out_shape*8
61
-
62
- scratch.layer1_rn = nn.Conv2d(
63
- in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
64
- )
65
- scratch.layer2_rn = nn.Conv2d(
66
- in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
67
- )
68
- scratch.layer3_rn = nn.Conv2d(
69
- in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
70
- )
71
- scratch.layer4_rn = nn.Conv2d(
72
- in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
73
- )
74
-
75
- return scratch
76
-
77
-
78
- def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
79
- efficientnet = torch.hub.load(
80
- "rwightman/gen-efficientnet-pytorch",
81
- "tf_efficientnet_lite3",
82
- pretrained=use_pretrained,
83
- exportable=exportable
84
- )
85
- return _make_efficientnet_backbone(efficientnet)
86
-
87
-
88
- def _make_efficientnet_backbone(effnet):
89
- pretrained = nn.Module()
90
-
91
- pretrained.layer1 = nn.Sequential(
92
- effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
93
- )
94
- pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
95
- pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
96
- pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
97
-
98
- return pretrained
99
-
100
-
101
- def _make_resnet_backbone(resnet):
102
- pretrained = nn.Module()
103
- pretrained.layer1 = nn.Sequential(
104
- resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
105
- )
106
-
107
- pretrained.layer2 = resnet.layer2
108
- pretrained.layer3 = resnet.layer3
109
- pretrained.layer4 = resnet.layer4
110
-
111
- return pretrained
112
-
113
-
114
- def _make_pretrained_resnext101_wsl(use_pretrained):
115
- resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
116
- return _make_resnet_backbone(resnet)
117
-
118
-
119
-
120
- class Interpolate(nn.Module):
121
- """Interpolation module.
122
- """
123
-
124
- def __init__(self, scale_factor, mode, align_corners=False):
125
- """Init.
126
-
127
- Args:
128
- scale_factor (float): scaling
129
- mode (str): interpolation mode
130
- """
131
- super(Interpolate, self).__init__()
132
-
133
- self.interp = nn.functional.interpolate
134
- self.scale_factor = scale_factor
135
- self.mode = mode
136
- self.align_corners = align_corners
137
-
138
- def forward(self, x):
139
- """Forward pass.
140
-
141
- Args:
142
- x (tensor): input
143
-
144
- Returns:
145
- tensor: interpolated data
146
- """
147
-
148
- x = self.interp(
149
- x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
150
- )
151
-
152
- return x
153
-
154
-
155
- class ResidualConvUnit(nn.Module):
156
- """Residual convolution module.
157
- """
158
-
159
- def __init__(self, features):
160
- """Init.
161
-
162
- Args:
163
- features (int): number of features
164
- """
165
- super().__init__()
166
-
167
- self.conv1 = nn.Conv2d(
168
- features, features, kernel_size=3, stride=1, padding=1, bias=True
169
- )
170
-
171
- self.conv2 = nn.Conv2d(
172
- features, features, kernel_size=3, stride=1, padding=1, bias=True
173
- )
174
-
175
- self.relu = nn.ReLU(inplace=True)
176
-
177
- def forward(self, x):
178
- """Forward pass.
179
-
180
- Args:
181
- x (tensor): input
182
-
183
- Returns:
184
- tensor: output
185
- """
186
- out = self.relu(x)
187
- out = self.conv1(out)
188
- out = self.relu(out)
189
- out = self.conv2(out)
190
-
191
- return out + x
192
-
193
-
194
- class FeatureFusionBlock(nn.Module):
195
- """Feature fusion block.
196
- """
197
-
198
- def __init__(self, features):
199
- """Init.
200
-
201
- Args:
202
- features (int): number of features
203
- """
204
- super(FeatureFusionBlock, self).__init__()
205
-
206
- self.resConfUnit1 = ResidualConvUnit(features)
207
- self.resConfUnit2 = ResidualConvUnit(features)
208
-
209
- def forward(self, *xs):
210
- """Forward pass.
211
-
212
- Returns:
213
- tensor: output
214
- """
215
- output = xs[0]
216
-
217
- if len(xs) == 2:
218
- output += self.resConfUnit1(xs[1])
219
-
220
- output = self.resConfUnit2(output)
221
-
222
- output = nn.functional.interpolate(
223
- output, scale_factor=2, mode="bilinear", align_corners=True
224
- )
225
-
226
- return output
227
-
228
-
229
-
230
-
231
- class ResidualConvUnit_custom(nn.Module):
232
- """Residual convolution module.
233
- """
234
-
235
- def __init__(self, features, activation, bn):
236
- """Init.
237
-
238
- Args:
239
- features (int): number of features
240
- """
241
- super().__init__()
242
-
243
- self.bn = bn
244
-
245
- self.groups=1
246
-
247
- self.conv1 = nn.Conv2d(
248
- features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
249
- )
250
-
251
- self.conv2 = nn.Conv2d(
252
- features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
253
- )
254
-
255
- if self.bn==True:
256
- self.bn1 = nn.BatchNorm2d(features)
257
- self.bn2 = nn.BatchNorm2d(features)
258
-
259
- self.activation = activation
260
-
261
- self.skip_add = nn.quantized.FloatFunctional()
262
-
263
- def forward(self, x):
264
- """Forward pass.
265
-
266
- Args:
267
- x (tensor): input
268
-
269
- Returns:
270
- tensor: output
271
- """
272
-
273
- out = self.activation(x)
274
- out = self.conv1(out)
275
- if self.bn==True:
276
- out = self.bn1(out)
277
-
278
- out = self.activation(out)
279
- out = self.conv2(out)
280
- if self.bn==True:
281
- out = self.bn2(out)
282
-
283
- if self.groups > 1:
284
- out = self.conv_merge(out)
285
-
286
- return self.skip_add.add(out, x)
287
-
288
- # return out + x
289
-
290
-
291
- class FeatureFusionBlock_custom(nn.Module):
292
- """Feature fusion block.
293
- """
294
-
295
- def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
296
- """Init.
297
-
298
- Args:
299
- features (int): number of features
300
- """
301
- super(FeatureFusionBlock_custom, self).__init__()
302
-
303
- self.deconv = deconv
304
- self.align_corners = align_corners
305
-
306
- self.groups=1
307
-
308
- self.expand = expand
309
- out_features = features
310
- if self.expand==True:
311
- out_features = features//2
312
-
313
- self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
314
-
315
- self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
316
- self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
317
-
318
- self.skip_add = nn.quantized.FloatFunctional()
319
-
320
- def forward(self, *xs):
321
- """Forward pass.
322
-
323
- Returns:
324
- tensor: output
325
- """
326
- output = xs[0]
327
-
328
- if len(xs) == 2:
329
- res = self.resConfUnit1(xs[1])
330
- output = self.skip_add.add(output, res)
331
- # output += res
332
-
333
- output = self.resConfUnit2(output)
334
-
335
- output = nn.functional.interpolate(
336
- output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
337
- )
338
-
339
- output = self.out_conv(output)
340
-
341
- return output
342
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/config.py DELETED
@@ -1,38 +0,0 @@
1
- _base_ = [
2
- '../../configs/_base_/models/upernet_uniformer.py',
3
- '../../configs/_base_/datasets/ade20k.py',
4
- '../../configs/_base_/default_runtime.py',
5
- '../../configs/_base_/schedules/schedule_160k.py'
6
- ]
7
- model = dict(
8
- backbone=dict(
9
- type='UniFormer',
10
- embed_dim=[64, 128, 320, 512],
11
- layers=[3, 4, 8, 3],
12
- head_dim=64,
13
- drop_path_rate=0.25,
14
- windows=False,
15
- hybrid=False
16
- ),
17
- decode_head=dict(
18
- in_channels=[64, 128, 320, 512],
19
- num_classes=150
20
- ),
21
- auxiliary_head=dict(
22
- in_channels=320,
23
- num_classes=150
24
- ))
25
-
26
- # AdamW optimizer, no weight decay for position embedding & layer norm in backbone
27
- optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
28
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
29
- 'relative_position_bias_table': dict(decay_mult=0.),
30
- 'norm': dict(decay_mult=0.)}))
31
-
32
- lr_config = dict(_delete_=True, policy='poly',
33
- warmup='linear',
34
- warmup_iters=1500,
35
- warmup_ratio=1e-6,
36
- power=1.0, min_lr=0.0, by_epoch=False)
37
-
38
- data=dict(samples_per_gpu=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/midas_net_custom.py DELETED
@@ -1,128 +0,0 @@
1
- """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
- This file contains code that is adapted from
3
- https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
- """
5
- import torch
6
- import torch.nn as nn
7
-
8
- from .base_model import BaseModel
9
- from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
10
-
11
-
12
- class MidasNet_small(BaseModel):
13
- """Network for monocular depth estimation.
14
- """
15
-
16
- def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
17
- blocks={'expand': True}):
18
- """Init.
19
-
20
- Args:
21
- path (str, optional): Path to saved model. Defaults to None.
22
- features (int, optional): Number of features. Defaults to 256.
23
- backbone (str, optional): Backbone network for encoder. Defaults to resnet50
24
- """
25
- print("Loading weights: ", path)
26
-
27
- super(MidasNet_small, self).__init__()
28
-
29
- use_pretrained = False if path else True
30
-
31
- self.channels_last = channels_last
32
- self.blocks = blocks
33
- self.backbone = backbone
34
-
35
- self.groups = 1
36
-
37
- features1=features
38
- features2=features
39
- features3=features
40
- features4=features
41
- self.expand = False
42
- if "expand" in self.blocks and self.blocks['expand'] == True:
43
- self.expand = True
44
- features1=features
45
- features2=features*2
46
- features3=features*4
47
- features4=features*8
48
-
49
- self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
50
-
51
- self.scratch.activation = nn.ReLU(False)
52
-
53
- self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
54
- self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
55
- self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
56
- self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
57
-
58
-
59
- self.scratch.output_conv = nn.Sequential(
60
- nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
61
- Interpolate(scale_factor=2, mode="bilinear"),
62
- nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
63
- self.scratch.activation,
64
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
65
- nn.ReLU(True) if non_negative else nn.Identity(),
66
- nn.Identity(),
67
- )
68
-
69
- if path:
70
- self.load(path)
71
-
72
-
73
- def forward(self, x):
74
- """Forward pass.
75
-
76
- Args:
77
- x (tensor): input data (image)
78
-
79
- Returns:
80
- tensor: depth
81
- """
82
- if self.channels_last==True:
83
- print("self.channels_last = ", self.channels_last)
84
- x.contiguous(memory_format=torch.channels_last)
85
-
86
-
87
- layer_1 = self.pretrained.layer1(x)
88
- layer_2 = self.pretrained.layer2(layer_1)
89
- layer_3 = self.pretrained.layer3(layer_2)
90
- layer_4 = self.pretrained.layer4(layer_3)
91
-
92
- layer_1_rn = self.scratch.layer1_rn(layer_1)
93
- layer_2_rn = self.scratch.layer2_rn(layer_2)
94
- layer_3_rn = self.scratch.layer3_rn(layer_3)
95
- layer_4_rn = self.scratch.layer4_rn(layer_4)
96
-
97
-
98
- path_4 = self.scratch.refinenet4(layer_4_rn)
99
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
100
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
101
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
102
-
103
- out = self.scratch.output_conv(path_1)
104
-
105
- return torch.squeeze(out, dim=1)
106
-
107
-
108
-
109
- def fuse_model(m):
110
- prev_previous_type = nn.Identity()
111
- prev_previous_name = ''
112
- previous_type = nn.Identity()
113
- previous_name = ''
114
- for name, module in m.named_modules():
115
- if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
116
- # print("FUSED ", prev_previous_name, previous_name, name)
117
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
118
- elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
119
- # print("FUSED ", prev_previous_name, previous_name)
120
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
121
- # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
122
- # print("FUSED ", previous_name, name)
123
- # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
124
-
125
- prev_previous_type = previous_type
126
- prev_previous_name = previous_name
127
- previous_type = type(module)
128
- previous_name = name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anthony7906/MengHuiMXD_GPT/chatgpt - macOS.command DELETED
@@ -1,7 +0,0 @@
1
- #!/bin/bash
2
- echo Opening ChuanhuChatGPT...
3
- cd "$(dirname "${BASH_SOURCE[0]}")"
4
- nohup python3 ChuanhuChatbot.py >/dev/null 2>&1 &
5
- sleep 5
6
- open http://127.0.0.1:7860
7
- echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). If you kill ChuanhuChatbot, Use "pkill -f 'ChuanhuChatbot'" command in terminal.
 
 
 
 
 
 
 
 
spaces/AnthonyTruchetPoC/persistent-docker/src/apps/streamlit_demo.py DELETED
@@ -1,62 +0,0 @@
1
- import os
2
- from pathlib import Path
3
-
4
- import numpy as np
5
- import pandas as pd
6
- import streamlit as st
7
-
8
- from athai.data_utils import cached_download_csv
9
-
10
-
11
- st.title("Uber pickups in NYC")
12
-
13
- DATE_COLUMN = "date/time"
14
- DATA_URL = (
15
- "https://s3-us-west-2.amazonaws.com/"
16
- "streamlit-demo-data/uber-raw-data-sep14.csv.gz"
17
- )
18
-
19
- DATA_PATH = Path(os.environ.get("APP_DATA"))
20
-
21
-
22
- @st.cache_resource
23
- def load_data(nrows):
24
- data = cached_download_csv(DATA_PATH, DATA_URL, nrows=nrows)
25
-
26
- def lowercase(x):
27
- return str(x).lower()
28
-
29
- data.rename(lowercase, axis="columns", inplace=True)
30
- data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
31
- return data
32
-
33
-
34
- data_load_state = st.text("Loading data...")
35
- data = load_data(10000)
36
- data_load_state.text("Done! (using st.cache)")
37
-
38
- if st.checkbox("Show raw data"):
39
- st.subheader("Raw data")
40
- st.write(data)
41
-
42
- st.subheader("Number of pickups by hour")
43
- hist_values = np.histogram(data[DATE_COLUMN].dt.hour, bins=24, range=(0, 24))[
44
- 0
45
- ]
46
- st.bar_chart(hist_values)
47
-
48
- # Some number in the range 0-23
49
- hour_to_filter = st.slider("hour", 0, 23, 17)
50
- filtered_data = data[data[DATE_COLUMN].dt.hour == hour_to_filter]
51
-
52
- st.subheader("Map of all pickups at %s:00" % hour_to_filter)
53
- st.map(filtered_data)
54
-
55
- uploaded_file = st.file_uploader("Choose a file")
56
- if uploaded_file is not None:
57
- st.write(uploaded_file.name)
58
- bytes_data = uploaded_file.getvalue()
59
- st.write(len(bytes_data), "bytes")
60
-
61
-
62
- st.markdown("![Kitty](./app/static/cat.jpeg)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnticPan/Clothes2Human/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Clothes2Human
3
- emoji: 🏃
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.44.4
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anustup/NS_AI_LABS/src/segments.py DELETED
@@ -1,55 +0,0 @@
1
- from typing import Any, Dict, List
2
-
3
- import copy
4
-
5
- def merge_timestamps(timestamps: List[Dict[str, Any]], merge_window: float = 5, max_merge_size: float = 30, padding_left: float = 1, padding_right: float = 1):
6
- result = []
7
-
8
- if len(timestamps) == 0:
9
- return result
10
- if max_merge_size is None:
11
- return timestamps
12
-
13
- if padding_left is None:
14
- padding_left = 0
15
- if padding_right is None:
16
- padding_right = 0
17
-
18
- processed_time = 0
19
- current_segment = None
20
-
21
- for i in range(len(timestamps)):
22
- next_segment = timestamps[i]
23
-
24
- delta = next_segment['start'] - processed_time
25
-
26
- # Note that segments can still be longer than the max merge size, they just won't be merged in that case
27
- if current_segment is None or (merge_window is not None and delta > merge_window) \
28
- or next_segment['end'] - current_segment['start'] > max_merge_size:
29
- # Finish the current segment
30
- if current_segment is not None:
31
- # Add right padding
32
- finish_padding = min(padding_right, delta / 2) if delta < padding_left + padding_right else padding_right
33
- current_segment['end'] += finish_padding
34
- delta -= finish_padding
35
-
36
- result.append(current_segment)
37
-
38
- # Start a new segment
39
- current_segment = copy.deepcopy(next_segment)
40
-
41
- # Pad the segment
42
- current_segment['start'] = current_segment['start'] - min(padding_left, delta)
43
- processed_time = current_segment['end']
44
-
45
- else:
46
- # Merge the segment
47
- current_segment['end'] = next_segment['end']
48
- processed_time = current_segment['end']
49
-
50
- # Add the last segment
51
- if current_segment is not None:
52
- current_segment['end'] += padding_right
53
- result.append(current_segment)
54
-
55
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AquaSuisei/ChatGPTXE/modules/pdf_func.py DELETED
@@ -1,180 +0,0 @@
1
- from types import SimpleNamespace
2
- import pdfplumber
3
- import logging
4
- from llama_index import Document
5
-
6
- def prepare_table_config(crop_page):
7
- """Prepare table查找边界, 要求page为原始page
8
-
9
- From https://github.com/jsvine/pdfplumber/issues/242
10
- """
11
- page = crop_page.root_page # root/parent
12
- cs = page.curves + page.edges
13
- def curves_to_edges():
14
- """See https://github.com/jsvine/pdfplumber/issues/127"""
15
- edges = []
16
- for c in cs:
17
- edges += pdfplumber.utils.rect_to_edges(c)
18
- return edges
19
- edges = curves_to_edges()
20
- return {
21
- "vertical_strategy": "explicit",
22
- "horizontal_strategy": "explicit",
23
- "explicit_vertical_lines": edges,
24
- "explicit_horizontal_lines": edges,
25
- "intersection_y_tolerance": 10,
26
- }
27
-
28
- def get_text_outside_table(crop_page):
29
- ts = prepare_table_config(crop_page)
30
- if len(ts["explicit_vertical_lines"]) == 0 or len(ts["explicit_horizontal_lines"]) == 0:
31
- return crop_page
32
-
33
- ### Get the bounding boxes of the tables on the page.
34
- bboxes = [table.bbox for table in crop_page.root_page.find_tables(table_settings=ts)]
35
- def not_within_bboxes(obj):
36
- """Check if the object is in any of the table's bbox."""
37
- def obj_in_bbox(_bbox):
38
- """See https://github.com/jsvine/pdfplumber/blob/stable/pdfplumber/table.py#L404"""
39
- v_mid = (obj["top"] + obj["bottom"]) / 2
40
- h_mid = (obj["x0"] + obj["x1"]) / 2
41
- x0, top, x1, bottom = _bbox
42
- return (h_mid >= x0) and (h_mid < x1) and (v_mid >= top) and (v_mid < bottom)
43
- return not any(obj_in_bbox(__bbox) for __bbox in bboxes)
44
-
45
- return crop_page.filter(not_within_bboxes)
46
- # 请使用 LaTeX 表达公式,行内公式以 $ 包裹,行间公式以 $$ 包裹
47
-
48
- extract_words = lambda page: page.extract_words(keep_blank_chars=True, y_tolerance=0, x_tolerance=1, extra_attrs=["fontname", "size", "object_type"])
49
- # dict_keys(['text', 'x0', 'x1', 'top', 'doctop', 'bottom', 'upright', 'direction', 'fontname', 'size'])
50
-
51
- def get_title_with_cropped_page(first_page):
52
- title = [] # 处理标题
53
- x0,top,x1,bottom = first_page.bbox # 获取页面边框
54
-
55
- for word in extract_words(first_page):
56
- word = SimpleNamespace(**word)
57
-
58
- if word.size >= 14:
59
- title.append(word.text)
60
- title_bottom = word.bottom
61
- elif word.text == "Abstract": # 获取页面abstract
62
- top = word.top
63
-
64
- user_info = [i["text"] for i in extract_words(first_page.within_bbox((x0,title_bottom,x1,top)))]
65
- # 裁剪掉上半部分, within_bbox: full_included; crop: partial_included
66
- return title, user_info, first_page.within_bbox((x0,top,x1,bottom))
67
-
68
- def get_column_cropped_pages(pages, two_column=True):
69
- new_pages = []
70
- for page in pages:
71
- if two_column:
72
- left = page.within_bbox((0, 0, page.width/2, page.height),relative=True)
73
- right = page.within_bbox((page.width/2, 0, page.width, page.height), relative=True)
74
- new_pages.append(left)
75
- new_pages.append(right)
76
- else:
77
- new_pages.append(page)
78
-
79
- return new_pages
80
-
81
- def parse_pdf(filename, two_column = True):
82
- level = logging.getLogger().level
83
- if level == logging.getLevelName("DEBUG"):
84
- logging.getLogger().setLevel("INFO")
85
-
86
- with pdfplumber.open(filename) as pdf:
87
- title, user_info, first_page = get_title_with_cropped_page(pdf.pages[0])
88
- new_pages = get_column_cropped_pages([first_page] + pdf.pages[1:], two_column)
89
-
90
- chapters = []
91
- # tuple (chapter_name, [pageid] (start,stop), chapter_text)
92
- create_chapter = lambda page_start,name_top,name_bottom: SimpleNamespace(
93
- name=[],
94
- name_top=name_top,
95
- name_bottom=name_bottom,
96
- record_chapter_name = True,
97
-
98
- page_start=page_start,
99
- page_stop=None,
100
-
101
- text=[],
102
- )
103
- cur_chapter = None
104
-
105
- # 按页遍历PDF文档
106
- for idx, page in enumerate(new_pages):
107
- page = get_text_outside_table(page)
108
-
109
- # 按行遍历页面文本
110
- for word in extract_words(page):
111
- word = SimpleNamespace(**word)
112
-
113
- # 检查行文本是否以12号字体打印,如果是,则将其作为新章节开始
114
- if word.size >= 11: # 出现chapter name
115
- if cur_chapter is None:
116
- cur_chapter = create_chapter(page.page_number, word.top, word.bottom)
117
- elif not cur_chapter.record_chapter_name or (cur_chapter.name_bottom != cur_chapter.name_bottom and cur_chapter.name_top != cur_chapter.name_top):
118
- # 不再继续写chapter name
119
- cur_chapter.page_stop = page.page_number # stop id
120
- chapters.append(cur_chapter)
121
- # 重置当前chapter信息
122
- cur_chapter = create_chapter(page.page_number, word.top, word.bottom)
123
-
124
- # print(word.size, word.top, word.bottom, word.text)
125
- cur_chapter.name.append(word.text)
126
- else:
127
- cur_chapter.record_chapter_name = False # chapter name 结束
128
- cur_chapter.text.append(word.text)
129
- else:
130
- # 处理最后一个章节
131
- cur_chapter.page_stop = page.page_number # stop id
132
- chapters.append(cur_chapter)
133
-
134
- for i in chapters:
135
- logging.info(f"section: {i.name} pages:{i.page_start, i.page_stop} word-count:{len(i.text)}")
136
- logging.debug(" ".join(i.text))
137
-
138
- title = " ".join(title)
139
- user_info = " ".join(user_info)
140
- text = f"Article Title: {title}, Information:{user_info}\n"
141
- for idx, chapter in enumerate(chapters):
142
- chapter.name = " ".join(chapter.name)
143
- text += f"The {idx}th Chapter {chapter.name}: " + " ".join(chapter.text) + "\n"
144
-
145
- logging.getLogger().setLevel(level)
146
- return Document(text=text, extra_info={"title": title})
147
-
148
- BASE_POINTS = """
149
- 1. Who are the authors?
150
- 2. What is the process of the proposed method?
151
- 3. What is the performance of the proposed method? Please note down its performance metrics.
152
- 4. What are the baseline models and their performances? Please note down these baseline methods.
153
- 5. What dataset did this paper use?
154
- """
155
-
156
- READING_PROMPT = """
157
- You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n
158
- Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n
159
- When you are reading, You need to focus on these key points:{}
160
- """
161
-
162
- READING_PROMT_V2 = """
163
- You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n
164
- Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n
165
- When you are reading, You need to focus on these key points:{},
166
-
167
- And You need to generate a brief but informative title for this part.
168
- Your return format:
169
- - title: '...'
170
- - summary: '...'
171
- """
172
-
173
- SUMMARY_PROMPT = "You are a researcher helper bot. Now you need to read the summaries of a research paper."
174
-
175
-
176
- if __name__ == '__main__':
177
- # Test code
178
- z = parse_pdf("./build/test.pdf")
179
- print(z["user_info"])
180
- print(z["title"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/metrics.py DELETED
@@ -1,363 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Model validation metrics
4
- """
5
-
6
- import math
7
- import warnings
8
- from pathlib import Path
9
-
10
- import matplotlib.pyplot as plt
11
- import numpy as np
12
- import torch
13
-
14
- from utils import TryExcept, threaded
15
-
16
-
17
- def fitness(x):
18
- # Model fitness as a weighted combination of metrics
19
- w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, [email protected], [email protected]:0.95]
20
- return (x[:, :4] * w).sum(1)
21
-
22
-
23
- def smooth(y, f=0.05):
24
- # Box filter of fraction f
25
- nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd)
26
- p = np.ones(nf // 2) # ones padding
27
- yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded
28
- return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed
29
-
30
-
31
- def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""):
32
- """ Compute the average precision, given the recall and precision curves.
33
- Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
34
- # Arguments
35
- tp: True positives (nparray, nx1 or nx10).
36
- conf: Objectness value from 0-1 (nparray).
37
- pred_cls: Predicted object classes (nparray).
38
- target_cls: True object classes (nparray).
39
- plot: Plot precision-recall curve at [email protected]
40
- save_dir: Plot save directory
41
- # Returns
42
- The average precision as computed in py-faster-rcnn.
43
- """
44
-
45
- # Sort by objectness
46
- i = np.argsort(-conf)
47
- tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
48
-
49
- # Find unique classes
50
- unique_classes, nt = np.unique(target_cls, return_counts=True)
51
- nc = unique_classes.shape[0] # number of classes, number of detections
52
-
53
- # Create Precision-Recall curve and compute AP for each class
54
- px, py = np.linspace(0, 1, 1000), [] # for plotting
55
- ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
56
- for ci, c in enumerate(unique_classes):
57
- i = pred_cls == c
58
- n_l = nt[ci] # number of labels
59
- n_p = i.sum() # number of predictions
60
- if n_p == 0 or n_l == 0:
61
- continue
62
-
63
- # Accumulate FPs and TPs
64
- fpc = (1 - tp[i]).cumsum(0)
65
- tpc = tp[i].cumsum(0)
66
-
67
- # Recall
68
- recall = tpc / (n_l + eps) # recall curve
69
- r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
70
-
71
- # Precision
72
- precision = tpc / (tpc + fpc) # precision curve
73
- p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
74
-
75
- # AP from recall-precision curve
76
- for j in range(tp.shape[1]):
77
- ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
78
- if plot and j == 0:
79
- py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
80
-
81
- # Compute F1 (harmonic mean of precision and recall)
82
- f1 = 2 * p * r / (p + r + eps)
83
- names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data
84
- names = dict(enumerate(names)) # to dict
85
- if plot:
86
- plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names)
87
- plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1')
88
- plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision')
89
- plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall')
90
-
91
- i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
92
- p, r, f1 = p[:, i], r[:, i], f1[:, i]
93
- tp = (r * nt).round() # true positives
94
- fp = (tp / (p + eps) - tp).round() # false positives
95
- return tp, fp, p, r, f1, ap, unique_classes.astype(int)
96
-
97
-
98
- def compute_ap(recall, precision):
99
- """ Compute the average precision, given the recall and precision curves
100
- # Arguments
101
- recall: The recall curve (list)
102
- precision: The precision curve (list)
103
- # Returns
104
- Average precision, precision curve, recall curve
105
- """
106
-
107
- # Append sentinel values to beginning and end
108
- mrec = np.concatenate(([0.0], recall, [1.0]))
109
- mpre = np.concatenate(([1.0], precision, [0.0]))
110
-
111
- # Compute the precision envelope
112
- mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
113
-
114
- # Integrate area under curve
115
- method = 'interp' # methods: 'continuous', 'interp'
116
- if method == 'interp':
117
- x = np.linspace(0, 1, 101) # 101-point interp (COCO)
118
- ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
119
- else: # 'continuous'
120
- i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
121
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
122
-
123
- return ap, mpre, mrec
124
-
125
-
126
- class ConfusionMatrix:
127
- # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
128
- def __init__(self, nc, conf=0.25, iou_thres=0.45):
129
- self.matrix = np.zeros((nc + 1, nc + 1))
130
- self.nc = nc # number of classes
131
- self.conf = conf
132
- self.iou_thres = iou_thres
133
-
134
- def process_batch(self, detections, labels):
135
- """
136
- Return intersection-over-union (Jaccard index) of boxes.
137
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
138
- Arguments:
139
- detections (Array[N, 6]), x1, y1, x2, y2, conf, class
140
- labels (Array[M, 5]), class, x1, y1, x2, y2
141
- Returns:
142
- None, updates confusion matrix accordingly
143
- """
144
- if detections is None:
145
- gt_classes = labels.int()
146
- for gc in gt_classes:
147
- self.matrix[self.nc, gc] += 1 # background FN
148
- return
149
-
150
- detections = detections[detections[:, 4] > self.conf]
151
- gt_classes = labels[:, 0].int()
152
- detection_classes = detections[:, 5].int()
153
- iou = box_iou(labels[:, 1:], detections[:, :4])
154
-
155
- x = torch.where(iou > self.iou_thres)
156
- if x[0].shape[0]:
157
- matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
158
- if x[0].shape[0] > 1:
159
- matches = matches[matches[:, 2].argsort()[::-1]]
160
- matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
161
- matches = matches[matches[:, 2].argsort()[::-1]]
162
- matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
163
- else:
164
- matches = np.zeros((0, 3))
165
-
166
- n = matches.shape[0] > 0
167
- m0, m1, _ = matches.transpose().astype(int)
168
- for i, gc in enumerate(gt_classes):
169
- j = m0 == i
170
- if n and sum(j) == 1:
171
- self.matrix[detection_classes[m1[j]], gc] += 1 # correct
172
- else:
173
- self.matrix[self.nc, gc] += 1 # true background
174
-
175
- if n:
176
- for i, dc in enumerate(detection_classes):
177
- if not any(m1 == i):
178
- self.matrix[dc, self.nc] += 1 # predicted background
179
-
180
- def matrix(self):
181
- return self.matrix
182
-
183
- def tp_fp(self):
184
- tp = self.matrix.diagonal() # true positives
185
- fp = self.matrix.sum(1) - tp # false positives
186
- # fn = self.matrix.sum(0) - tp # false negatives (missed detections)
187
- return tp[:-1], fp[:-1] # remove background class
188
-
189
- @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure')
190
- def plot(self, normalize=True, save_dir='', names=()):
191
- import seaborn as sn
192
-
193
- array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns
194
- array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
195
-
196
- fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)
197
- nc, nn = self.nc, len(names) # number of classes, names
198
- sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size
199
- labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels
200
- ticklabels = (names + ['background']) if labels else "auto"
201
- with warnings.catch_warnings():
202
- warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
203
- sn.heatmap(array,
204
- ax=ax,
205
- annot=nc < 30,
206
- annot_kws={
207
- "size": 8},
208
- cmap='Blues',
209
- fmt='.2f',
210
- square=True,
211
- vmin=0.0,
212
- xticklabels=ticklabels,
213
- yticklabels=ticklabels).set_facecolor((1, 1, 1))
214
- ax.set_ylabel('True')
215
- ax.set_ylabel('Predicted')
216
- ax.set_title('Confusion Matrix')
217
- fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
218
- plt.close(fig)
219
-
220
- def print(self):
221
- for i in range(self.nc + 1):
222
- print(' '.join(map(str, self.matrix[i])))
223
-
224
-
225
- def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
226
- # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)
227
-
228
- # Get the coordinates of bounding boxes
229
- if xywh: # transform from xywh to xyxy
230
- (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)
231
- w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2
232
- b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_
233
- b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_
234
- else: # x1, y1, x2, y2 = box1
235
- b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)
236
- b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)
237
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
238
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
239
-
240
- # Intersection area
241
- inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
242
- (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
243
-
244
- # Union Area
245
- union = w1 * h1 + w2 * h2 - inter + eps
246
-
247
- # IoU
248
- iou = inter / union
249
- if CIoU or DIoU or GIoU:
250
- cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
251
- ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
252
- if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
253
- c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
254
- rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2
255
- if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
256
- v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
257
- with torch.no_grad():
258
- alpha = v / (v - iou + (1 + eps))
259
- return iou - (rho2 / c2 + v * alpha) # CIoU
260
- return iou - rho2 / c2 # DIoU
261
- c_area = cw * ch + eps # convex area
262
- return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
263
- return iou # IoU
264
-
265
-
266
- def box_iou(box1, box2, eps=1e-7):
267
- # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
268
- """
269
- Return intersection-over-union (Jaccard index) of boxes.
270
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
271
- Arguments:
272
- box1 (Tensor[N, 4])
273
- box2 (Tensor[M, 4])
274
- Returns:
275
- iou (Tensor[N, M]): the NxM matrix containing the pairwise
276
- IoU values for every element in boxes1 and boxes2
277
- """
278
-
279
- # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
280
- (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)
281
- inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)
282
-
283
- # IoU = inter / (area1 + area2 - inter)
284
- return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)
285
-
286
-
287
- def bbox_ioa(box1, box2, eps=1e-7):
288
- """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2
289
- box1: np.array of shape(4)
290
- box2: np.array of shape(nx4)
291
- returns: np.array of shape(n)
292
- """
293
-
294
- # Get the coordinates of bounding boxes
295
- b1_x1, b1_y1, b1_x2, b1_y2 = box1
296
- b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
297
-
298
- # Intersection area
299
- inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
300
- (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
301
-
302
- # box2 area
303
- box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
304
-
305
- # Intersection over box2 area
306
- return inter_area / box2_area
307
-
308
-
309
- def wh_iou(wh1, wh2, eps=1e-7):
310
- # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
311
- wh1 = wh1[:, None] # [N,1,2]
312
- wh2 = wh2[None] # [1,M,2]
313
- inter = torch.min(wh1, wh2).prod(2) # [N,M]
314
- return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter)
315
-
316
-
317
- # Plots ----------------------------------------------------------------------------------------------------------------
318
-
319
-
320
- @threaded
321
- def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()):
322
- # Precision-recall curve
323
- fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
324
- py = np.stack(py, axis=1)
325
-
326
- if 0 < len(names) < 21: # display per-class legend if < 21 classes
327
- for i, y in enumerate(py.T):
328
- ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
329
- else:
330
- ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
331
-
332
- ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f [email protected]' % ap[:, 0].mean())
333
- ax.set_xlabel('Recall')
334
- ax.set_ylabel('Precision')
335
- ax.set_xlim(0, 1)
336
- ax.set_ylim(0, 1)
337
- ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
338
- ax.set_title('Precision-Recall Curve')
339
- fig.savefig(save_dir, dpi=250)
340
- plt.close(fig)
341
-
342
-
343
- @threaded
344
- def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'):
345
- # Metric-confidence curve
346
- fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
347
-
348
- if 0 < len(names) < 21: # display per-class legend if < 21 classes
349
- for i, y in enumerate(py):
350
- ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
351
- else:
352
- ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
353
-
354
- y = smooth(py.mean(0), 0.05)
355
- ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
356
- ax.set_xlabel(xlabel)
357
- ax.set_ylabel(ylabel)
358
- ax.set_xlim(0, 1)
359
- ax.set_ylim(0, 1)
360
- ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
361
- ax.set_title(f'{ylabel}-Confidence Curve')
362
- fig.savefig(save_dir, dpi=250)
363
- plt.close(fig)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arvi/Performance_predictor_and_feedback_generator/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Performance Predictor And Feedback Generator
3
- emoji: 📚
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.16.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/run_inference_tests.sh DELETED
@@ -1,44 +0,0 @@
1
- #!/bin/bash -e
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- BIN="python tools/train_net.py"
5
- OUTPUT="inference_test_output"
6
- NUM_GPUS=2
7
-
8
- CFG_LIST=( "${@:1}" )
9
-
10
- if [ ${#CFG_LIST[@]} -eq 0 ]; then
11
- CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml )
12
- fi
13
-
14
- echo "========================================================================"
15
- echo "Configs to run:"
16
- echo "${CFG_LIST[@]}"
17
- echo "========================================================================"
18
-
19
-
20
- for cfg in "${CFG_LIST[@]}"; do
21
- echo "========================================================================"
22
- echo "Running $cfg ..."
23
- echo "========================================================================"
24
- $BIN \
25
- --eval-only \
26
- --num-gpus $NUM_GPUS \
27
- --config-file "$cfg" \
28
- OUTPUT_DIR $OUTPUT
29
- rm -rf $OUTPUT
30
- done
31
-
32
-
33
- echo "========================================================================"
34
- echo "Running demo.py ..."
35
- echo "========================================================================"
36
- DEMO_BIN="python demo/demo.py"
37
- COCO_DIR=datasets/coco/val2014
38
- mkdir -pv $OUTPUT
39
-
40
- set -v
41
-
42
- $DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \
43
- --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT
44
- rm -rf $OUTPUT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/App Descargar Msica Mp3.md DELETED
@@ -1,161 +0,0 @@
1
-
2
- <h1>App Download Music MP3: Cómo disfrutar de música gratis sin conexión</h1>
3
- <p>¿Te encanta escuchar música pero odias pagar por servicios de streaming o usar tus datos? Si es así, es posible que quieras probar la aplicación de descarga de música mp3. Estas son aplicaciones que te permiten descargar música de varias fuentes y reproducirlas sin conexión en tu dispositivo. En este artículo, vamos a explicar qué aplicación descargar música mp3 es, por qué lo necesita, y cómo elegir el mejor. También revisaremos la parte superior 3 aplicación descargar música mp3 en 2023 y mostrar cómo usarlos. Vamos a empezar! </p>
4
- <h2>app descargar música mp3</h2><br /><p><b><b>DOWNLOAD</b> &#9881; <a href="https://bltlly.com/2v6M2K">https://bltlly.com/2v6M2K</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3>¿Qué es la descarga de aplicaciones de música mp3? </h3>
7
- <p>App descargar música mp3 es un tipo de software que te permite descargar archivos de música desde plataformas en línea como YouTube, SoundCloud, Spotify, y más. Los archivos descargados suelen estar en formato MP3, que es un formato de audio común y ampliamente soportado. A continuación, puede transferir los archivos al almacenamiento de su dispositivo o tarjeta SD y reproducirlos sin conexión utilizando cualquier aplicación de reproductor de música. </p>
8
- <h3>¿Por qué necesitas descargar música de la aplicación mp3? </h3>
9
- <p>Hay muchos beneficios de usar la aplicación de descarga de música mp3, como:</p>
10
- <ul>
11
- <li>Puedes ahorrar dinero no pagando por servicios de streaming o comprando canciones individualmente. </li>
12
- <li>Puede guardar datos no transmitiendo música en línea. </li>
13
- <li>Puedes escuchar música en cualquier momento y en cualquier lugar sin conexión a Internet o wifi. </li>
14
- <li>Puede crear sus propias listas de reproducción y personalizar su biblioteca de música. </li>
15
- <li>Puedes descubrir nuevas canciones y artistas de diferentes géneros y fuentes. </li>
16
- </ul>
17
- <h3>¿Cómo elegir la mejor aplicación descargar música mp3? </h3>
18
- <p>Hay muchos mp3 de música de descarga de aplicaciones disponibles en el mercado, pero no todos son confiables y seguros. Algunos pueden contener malware, virus o anuncios que pueden dañar tu dispositivo o comprometer tu privacidad. Algunos también pueden tener características limitadas, baja calidad o velocidad lenta. Para elegir la mejor aplicación de descarga de música mp3, debe considerar los siguientes factores:</p>
19
- <ul>
20
-
21
- <li>La calidad y velocidad de las descargas. </li>
22
- <li>La facilidad de uso y la interfaz de usuario. </li>
23
- <li>La compatibilidad y la seguridad de la aplicación. </li>
24
- <li>Los comentarios y valoraciones de otros usuarios. </li>
25
- </ul>
26
- <h2>Top 3 App Descargar música MP3 en 2023</h2>
27
- <h3>Audiomack: Descargador de música</h3>
28
- <p>Audiomack es una de las aplicaciones de descarga de música mp3 más populares y confiables en 2023. Le permite transmitir y descargar la mejor nueva música de moda de los mejores artistas en categorías como Hip Hop, Rap, R&B, EDM, Afropop y Reggae. También puede escuchar sus archivos MP3 locales y otros archivos desde la aplicación. </p>
29
- <h4>Características</h4>
30
- <ul>
31
- <li>Flujo ilimitado de pistas completas y mixtapes que son nuevos o tendencia. </li>
32
- <li>Descargar canciones y álbumes completos para escuchar sin conexión, sin datos. </li>
33
- <li>Pistas favoritas, álbumes y listas de reproducción y busque, explore y baraje fácilmente su colección de favoritos. </li>
34
- <li>Escucha música local como MP3s, AAC, M4A, WAV y otros archivos del reproductor de archivos local. </li>
35
- <li>Navegar por listas de reproducción curadas por humor, género y mucho más. </li>
36
- <li>Cree listas de reproducción ilimitadas. </li>
37
- <li>Sigue a tus artistas, productores y creadores de tendencias favoritos. </li>
38
- <li> <h4>Pros y contras</h4>
39
- <tabla>
40
- <tr>
41
- <th>Pros</th>
42
- <th>Contras</th>
43
- </tr>
44
- <tr>
45
- <td>Descargas gratuitas e ilimitadas. </td>
46
- <td>Algunas canciones pueden no estar disponibles para su descarga debido a problemas de licencia. </td>
47
- </tr>
48
- <tr>
49
- <td>Audio de alta calidad y velocidad rápida. </td>
50
- <td>Algunos anuncios pueden interrumpir el proceso de transmisión o descarga. </td>
51
- </tr>
52
- <tr>
53
- <td>Fácil de usar y navegar. </td>
54
- <td>Algunas características pueden requerir una suscripción premium. </td>
55
- </tr>
56
- </tabla>
57
- <h4>Cómo usarlo</h4>
58
- <ol>
59
- <li>Descargar e instalar la aplicación desde la Google Play Store o la App Store.</li>
60
- <li>Abra la aplicación y regístrese o inicie sesión con su correo electrónico, Facebook o cuenta de Google. </li>
61
- <li> Navegar por la página de inicio, tendencias, géneros o listas de reproducción secciones para encontrar la música que desea transmitir o descargar. </li>
62
-
63
- <li>Para acceder a las canciones descargadas, vaya a la sección Mi biblioteca y toque en Música sin conexión.</li>
64
- <li>Para escuchar sus archivos de música locales, vaya a la sección Mi biblioteca y toque en Música local.</li>
65
- <li>Para crear sus propias listas de reproducción, vaya a la sección Mi biblioteca y toque en Crear lista de reproducción. Puedes añadir canciones de tu música offline, música local o música online. </li>
66
- <li>Para seguir a tus artistas, productores o creadores de tendencias favoritos, ve a su página de perfil y toca el botón de seguir. También puedes ver sus últimas subidas, favoritos y listas de reproducción. </li>
67
- </ol>
68
- <h3> Cualquier convertidor de vídeo libre</h3>
69
- <p>Cualquier Video Converter Free es otra gran aplicación de descarga de música mp3 en 2023. Es un conversor de vídeo potente y versátil que también puede extraer audio de archivos de vídeo y guardarlos como MP3s. Puedes descargar videos de YouTube, Facebook, Vimeo, Dailymotion y más de 100 sitios más. También puede editar vídeos con recorte, recorte, rotación, adición de efectos, subtítulos y marcas de agua. </p>
70
- <h4>Características</h4>
71
- <ul>
72
- <li>Convertir cualquier formato de vídeo a MP4, AVI, MKV, WMV, MOV, FLV, 3GP, WebM, y más. </li>
73
- <li> Extraer audio de archivos de vídeo y guardarlos como MP3s con alta calidad. </li>
74
- <li>Descargar vídeos en línea de YouTube y otros sitios populares con un solo clic. </li>
75
- <li>Editar vídeos con varias herramientas como recorte, recorte, rotación, adición de efectos, subtítulos y marcas de agua. </li>
76
- <li>Grabar vídeos en DVD o discos Blu-ray con menús y plantillas personalizadas. </li>
77
- <li> Admite conversión por lotes y multihilo para una velocidad y eficiencia más rápidas. </li>
78
- <li>Soporta múltiples idiomas y plataformas, incluyendo Windows y Mac OS X.</li>
79
- </ul>
80
- <h4>Pros y contras</h4>
81
- <tabla>
82
- <tr>
83
- <th>Pros</th>
84
- <th>Contras</th>
85
- </tr>
86
- <tr>
87
- <td>Descargas y conversiones gratuitas e ilimitadas. </td>
88
- <td>Algunas funciones avanzadas pueden requerir una actualización de pago. </td>
89
- </tr>
90
- <tr>
91
- <td>Salida de audio y video de alta calidad. </td>
92
-
93
- </tr>
94
- <tr>
95
- <td>Fácil de usar y personalizar. </td>
96
- <td>Algunos formatos de video pueden no ser soportados o compatibles con algunos dispositivos. </td>
97
- </tr> <h4>Cómo usarlo</h4>
98
- <ol>
99
- <li>Descargar e instalar la aplicación desde el sitio web oficial o la tienda de Microsoft.</li>
100
- <li> Abra la aplicación y haga clic en el botón Agregar vídeo (s) para importar los archivos de vídeo que desea convertir o extraer audio de. </li>
101
- <li>Seleccione el formato de salida de la lista desplegable de la derecha. Para guardar como MP3, elija Archivos de audio > Audio MP3.</li>
102
- <li>Para descargar vídeos en línea, haga clic en el botón Descargar vídeo y pegue la URL del vídeo. También puede elegir el formato de salida y la calidad. </li>
103
- <li>Para editar vídeos, haga clic en el botón Editar y utilice las herramientas para recortar, recortar, rotar, añadir efectos, subtítulos y marcas de agua. </li>
104
- <li>Para grabar vídeos en DVD o discos Blu-ray, haga clic en el botón Grabar DVD y seleccione las opciones y plantillas. </li>
105
- <li>Haga clic en el botón Convertir ahora para iniciar el proceso de conversión o extracción. También puede marcar la opción para apagar el equipo cuando se complete la conversión. </li>
106
- <li>Para acceder a sus archivos convertidos o descargados, haga clic en el botón Carpeta de salida o vaya a la carpeta que especificó en la configuración. </li>
107
- </ol>
108
- <h3>Descargador de música</h3>
109
- <p>Music Downloader es otra aplicación de descarga de música mp3 en 2023. Es una aplicación simple y rápida que te permite descargar música gratis de varios géneros y artistas. También puede reproducir música en línea o sin conexión con su reproductor de música incorporado. También puede administrar sus archivos de música con su administrador de archivos. </p>
110
- <p></p>
111
- <h4>Características</h4>
112
- <ul>
113
- <li>Descargar música gratis de varios géneros y artistas. </li>
114
- <li>Reproducir música en línea o fuera de línea con su reproductor de música incorporado. </li>
115
- <li>Administra tus archivos de música con su gestor de archivos. </li>
116
- <li>Comparte tu música con tus amigos a través de redes sociales o correo electrónico. </li>
117
- <li>Soporta múltiples idiomas y plataformas incluyendo Android e iOS. </li>
118
- </ul>
119
- <h4>Pros y contras</h4>
120
- <tabla>
121
- <tr>
122
- <th>Pros</th>
123
- <th>Contras</th>
124
-
125
- <tr>
126
- <td>Descargas y reproducciones gratuitas e ilimitadas. </td>
127
- <td>Alguna música no puede ser licenciada o legal para descargar. </td>
128
- </tr>
129
- <tr>
130
- <td>Rápido y fácil de usar. </td>
131
- <td>Alguna música puede tener etiquetas de baja calidad o incorrectas. </td>
132
- </tr>
133
- <tr>
134
- <td>Interfaz simple y limpia. </td>
135
- <td>No hay funciones avanzadas ni opciones de personalización. </td>
136
- </tr>
137
- </tabla>
138
- <h4>Cómo usarlo</h4>
139
- <ol>
140
- <li>Descargar e instalar la aplicación desde la Google Play Store o la App Store.</li>
141
- <li>Abrir la aplicación y navegar por la música por géneros, artistas, o buscar por palabras clave. </li>
142
- <li>Para descargar una canción, toque en el icono de descarga junto al título de la canción. También puede previsualizar la canción tocando el icono de reproducción. </li>
143
- <li>Para acceder a las canciones descargadas, vaya a la sección Descargado y toque en la canción que desea reproducir. También puede eliminar, renombrar o compartir la canción desde allí. </li>
144
- <li>Para reproducir música en línea, vaya a la sección En línea y toque en la canción que desea transmitir. También puede agregarlo a sus favoritos o listas de reproducción desde allí. </li>
145
- <li>Para administrar sus archivos de música, vaya a la sección Administrador de archivos y toque en la carpeta que desea abrir. También puede crear nuevas carpetas, mover, copiar o eliminar archivos desde allí. </li>
146
- <li>Para compartir tu música con tus amigos, ve a la sección Compartir y selecciona las canciones que quieres compartir. A continuación, puede elegir el método de compartir, como redes sociales o correo electrónico. </li>
147
- </ol>
148
- <h2>Conclusión</h2>
149
-
150
- <h2>Preguntas frecuentes</h2>
151
- <ol>
152
- <li><b> ¿Qué es la descarga de aplicaciones de música mp3? </b></li>
153
- <p>Una aplicación de descarga de música mp3 es un tipo de software que le permite descargar archivos de música desde plataformas en línea como YouTube, SoundCloud, Spotify, y más. Los archivos descargados suelen estar en formato MP3, que es un formato de audio común y ampliamente soportado. A continuación, puede transferir los archivos al almacenamiento de su dispositivo o tarjeta SD y reproducirlos sin conexión utilizando cualquier aplicación de reproductor de música. </p> <li><b> ¿Por qué necesito música de descarga de aplicaciones mp3? </b></li>
154
- <p>Necesitas descargar música de la aplicación mp3 porque puede ayudarte a disfrutar de música gratis sin conexión en tu dispositivo. Puede ahorrarle dinero al no pagar por servicios de transmisión o comprar canciones individualmente. Puede ahorrarle datos al no transmitir música en línea. También puede permitirle escuchar música en cualquier momento y en cualquier lugar sin conexión a Internet o wifi. También puede crear sus propias listas de reproducción y personalizar su biblioteca de música. También puedes descubrir nuevas canciones y artistas de diferentes géneros y fuentes. </p>
155
- <li><b>¿Cómo puedo elegir la mejor aplicación de descarga de música mp3? </b></li>
156
- <p>Para elegir la mejor aplicación de descarga de música mp3, debe tener en cuenta los siguientes factores: el número y la variedad de fuentes que soporta, la calidad y velocidad de las descargas, la facilidad de uso y la interfaz de usuario, la compatibilidad y la seguridad de la aplicación, y las reseñas y valoraciones de otros usuarios. También debes comparar las características, pros y contras de diferentes aplicaciones y probarlas antes de tomar una decisión final. </p>
157
- <li><b> ¿Cuáles son las 3 mejores aplicaciones de descarga de música mp3 en 2023? </b></li>
158
- <p>Las 3 mejores aplicaciones de descarga de música mp3 en 2023 son Audiomack, Any Video Converter Free y Music Downloader. Estas aplicaciones han sido revisadas y calificadas altamente por muchos usuarios y expertos. Ofrecen una amplia gama de características, fuentes, calidad y velocidad para descargar y reproducir música sin conexión. También son fáciles de usar, compatibles y seguras. </p>
159
- <li><b>¿Cómo uso la descarga de aplicaciones música mp3? </b></li> 64aa2da5cf<br />
160
- <br />
161
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/CLIP/clip/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .clip import *
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/regexopt.py DELETED
@@ -1,91 +0,0 @@
1
- """
2
- pygments.regexopt
3
- ~~~~~~~~~~~~~~~~~
4
-
5
- An algorithm that generates optimized regexes for matching long lists of
6
- literal strings.
7
-
8
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
9
- :license: BSD, see LICENSE for details.
10
- """
11
-
12
- import re
13
- from re import escape
14
- from os.path import commonprefix
15
- from itertools import groupby
16
- from operator import itemgetter
17
-
18
- CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
19
- FIRST_ELEMENT = itemgetter(0)
20
-
21
-
22
- def make_charset(letters):
23
- return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
24
-
25
-
26
- def regex_opt_inner(strings, open_paren):
27
- """Return a regex that matches any string in the sorted list of strings."""
28
- close_paren = open_paren and ')' or ''
29
- # print strings, repr(open_paren)
30
- if not strings:
31
- # print '-> nothing left'
32
- return ''
33
- first = strings[0]
34
- if len(strings) == 1:
35
- # print '-> only 1 string'
36
- return open_paren + escape(first) + close_paren
37
- if not first:
38
- # print '-> first string empty'
39
- return open_paren + regex_opt_inner(strings[1:], '(?:') \
40
- + '?' + close_paren
41
- if len(first) == 1:
42
- # multiple one-char strings? make a charset
43
- oneletter = []
44
- rest = []
45
- for s in strings:
46
- if len(s) == 1:
47
- oneletter.append(s)
48
- else:
49
- rest.append(s)
50
- if len(oneletter) > 1: # do we have more than one oneletter string?
51
- if rest:
52
- # print '-> 1-character + rest'
53
- return open_paren + regex_opt_inner(rest, '') + '|' \
54
- + make_charset(oneletter) + close_paren
55
- # print '-> only 1-character'
56
- return open_paren + make_charset(oneletter) + close_paren
57
- prefix = commonprefix(strings)
58
- if prefix:
59
- plen = len(prefix)
60
- # we have a prefix for all strings
61
- # print '-> prefix:', prefix
62
- return open_paren + escape(prefix) \
63
- + regex_opt_inner([s[plen:] for s in strings], '(?:') \
64
- + close_paren
65
- # is there a suffix?
66
- strings_rev = [s[::-1] for s in strings]
67
- suffix = commonprefix(strings_rev)
68
- if suffix:
69
- slen = len(suffix)
70
- # print '-> suffix:', suffix[::-1]
71
- return open_paren \
72
- + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
73
- + escape(suffix[::-1]) + close_paren
74
- # recurse on common 1-string prefixes
75
- # print '-> last resort'
76
- return open_paren + \
77
- '|'.join(regex_opt_inner(list(group[1]), '')
78
- for group in groupby(strings, lambda s: s[0] == first[0])) \
79
- + close_paren
80
-
81
-
82
- def regex_opt(strings, prefix='', suffix=''):
83
- """Return a compiled regex that matches any string in the given list.
84
-
85
- The strings to match must be literal strings, not regexes. They will be
86
- regex-escaped.
87
-
88
- *prefix* and *suffix* are pre- and appended to the final regex.
89
- """
90
- strings = sorted(strings)
91
- return prefix + regex_opt_inner(strings, '(') + suffix
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/filepost.py DELETED
@@ -1,98 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import binascii
4
- import codecs
5
- import os
6
- from io import BytesIO
7
-
8
- from .fields import RequestField
9
- from .packages import six
10
- from .packages.six import b
11
-
12
- writer = codecs.lookup("utf-8")[3]
13
-
14
-
15
- def choose_boundary():
16
- """
17
- Our embarrassingly-simple replacement for mimetools.choose_boundary.
18
- """
19
- boundary = binascii.hexlify(os.urandom(16))
20
- if not six.PY2:
21
- boundary = boundary.decode("ascii")
22
- return boundary
23
-
24
-
25
- def iter_field_objects(fields):
26
- """
27
- Iterate over fields.
28
-
29
- Supports list of (k, v) tuples and dicts, and lists of
30
- :class:`~urllib3.fields.RequestField`.
31
-
32
- """
33
- if isinstance(fields, dict):
34
- i = six.iteritems(fields)
35
- else:
36
- i = iter(fields)
37
-
38
- for field in i:
39
- if isinstance(field, RequestField):
40
- yield field
41
- else:
42
- yield RequestField.from_tuples(*field)
43
-
44
-
45
- def iter_fields(fields):
46
- """
47
- .. deprecated:: 1.6
48
-
49
- Iterate over fields.
50
-
51
- The addition of :class:`~urllib3.fields.RequestField` makes this function
52
- obsolete. Instead, use :func:`iter_field_objects`, which returns
53
- :class:`~urllib3.fields.RequestField` objects.
54
-
55
- Supports list of (k, v) tuples and dicts.
56
- """
57
- if isinstance(fields, dict):
58
- return ((k, v) for k, v in six.iteritems(fields))
59
-
60
- return ((k, v) for k, v in fields)
61
-
62
-
63
- def encode_multipart_formdata(fields, boundary=None):
64
- """
65
- Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
66
-
67
- :param fields:
68
- Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
69
-
70
- :param boundary:
71
- If not specified, then a random boundary will be generated using
72
- :func:`urllib3.filepost.choose_boundary`.
73
- """
74
- body = BytesIO()
75
- if boundary is None:
76
- boundary = choose_boundary()
77
-
78
- for field in iter_field_objects(fields):
79
- body.write(b("--%s\r\n" % (boundary)))
80
-
81
- writer(body).write(field.render_headers())
82
- data = field.data
83
-
84
- if isinstance(data, int):
85
- data = str(data) # Backwards compatibility
86
-
87
- if isinstance(data, six.text_type):
88
- writer(body).write(data)
89
- else:
90
- body.write(data)
91
-
92
- body.write(b"\r\n")
93
-
94
- body.write(b("--%s--\r\n" % (boundary)))
95
-
96
- content_type = str("multipart/form-data; boundary=%s" % boundary)
97
-
98
- return body.getvalue(), content_type
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/tomli/_re.py DELETED
@@ -1,107 +0,0 @@
1
- # SPDX-License-Identifier: MIT
2
- # SPDX-FileCopyrightText: 2021 Taneli Hukkinen
3
- # Licensed to PSF under a Contributor Agreement.
4
-
5
- from __future__ import annotations
6
-
7
- from datetime import date, datetime, time, timedelta, timezone, tzinfo
8
- from functools import lru_cache
9
- import re
10
- from typing import Any
11
-
12
- from ._types import ParseFloat
13
-
14
- # E.g.
15
- # - 00:32:00.999999
16
- # - 00:32:00
17
- _TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
18
-
19
- RE_NUMBER = re.compile(
20
- r"""
21
- 0
22
- (?:
23
- x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
24
- |
25
- b[01](?:_?[01])* # bin
26
- |
27
- o[0-7](?:_?[0-7])* # oct
28
- )
29
- |
30
- [+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
31
- (?P<floatpart>
32
- (?:\.[0-9](?:_?[0-9])*)? # optional fractional part
33
- (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
34
- )
35
- """,
36
- flags=re.VERBOSE,
37
- )
38
- RE_LOCALTIME = re.compile(_TIME_RE_STR)
39
- RE_DATETIME = re.compile(
40
- rf"""
41
- ([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
42
- (?:
43
- [Tt ]
44
- {_TIME_RE_STR}
45
- (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
46
- )?
47
- """,
48
- flags=re.VERBOSE,
49
- )
50
-
51
-
52
- def match_to_datetime(match: re.Match) -> datetime | date:
53
- """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
54
-
55
- Raises ValueError if the match does not correspond to a valid date
56
- or datetime.
57
- """
58
- (
59
- year_str,
60
- month_str,
61
- day_str,
62
- hour_str,
63
- minute_str,
64
- sec_str,
65
- micros_str,
66
- zulu_time,
67
- offset_sign_str,
68
- offset_hour_str,
69
- offset_minute_str,
70
- ) = match.groups()
71
- year, month, day = int(year_str), int(month_str), int(day_str)
72
- if hour_str is None:
73
- return date(year, month, day)
74
- hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
75
- micros = int(micros_str.ljust(6, "0")) if micros_str else 0
76
- if offset_sign_str:
77
- tz: tzinfo | None = cached_tz(
78
- offset_hour_str, offset_minute_str, offset_sign_str
79
- )
80
- elif zulu_time:
81
- tz = timezone.utc
82
- else: # local date-time
83
- tz = None
84
- return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
85
-
86
-
87
- @lru_cache(maxsize=None)
88
- def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
89
- sign = 1 if sign_str == "+" else -1
90
- return timezone(
91
- timedelta(
92
- hours=sign * int(hour_str),
93
- minutes=sign * int(minute_str),
94
- )
95
- )
96
-
97
-
98
- def match_to_localtime(match: re.Match) -> time:
99
- hour_str, minute_str, sec_str, micros_str = match.groups()
100
- micros = int(micros_str.ljust(6, "0")) if micros_str else 0
101
- return time(int(hour_str), int(minute_str), int(sec_str), micros)
102
-
103
-
104
- def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
105
- if match.group("floatpart"):
106
- return parse_float(match.group())
107
- return int(match.group(), 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/testing/unittest/util.h DELETED
@@ -1,67 +0,0 @@
1
- #pragma once
2
-
3
- #include <iostream>
4
- #include <string>
5
- #include <typeinfo>
6
- #include <unittest/system.h>
7
-
8
- #include <thrust/extrema.h>
9
- #include <thrust/limits.h>
10
- #include <thrust/detail/type_traits.h>
11
-
12
- namespace unittest
13
- {
14
-
15
- template<typename T>
16
- std::string type_name(void)
17
- {
18
- return demangle(typeid(T).name());
19
- } // end type_name()
20
-
21
- // Use this with counting_iterator to avoid generating a range larger than we
22
- // can represent.
23
- template <typename T>
24
- typename thrust::detail::disable_if<
25
- thrust::detail::is_floating_point<T>::value
26
- , T
27
- >::type truncate_to_max_representable(std::size_t n)
28
- {
29
- return thrust::min<std::size_t>(
30
- n, static_cast<std::size_t>(thrust::numeric_limits<T>::max())
31
- );
32
- }
33
-
34
- // TODO: This probably won't work for `half`.
35
- template <typename T>
36
- typename thrust::detail::enable_if<
37
- thrust::detail::is_floating_point<T>::value
38
- , T
39
- >::type truncate_to_max_representable(std::size_t n)
40
- {
41
- return thrust::min<T>(
42
- n, thrust::numeric_limits<T>::max()
43
- );
44
- }
45
-
46
- } // end unittest
47
-
48
- template <typename Iterator>
49
- void PRINT(Iterator first, Iterator last)
50
- {
51
- size_t n = 0;
52
- for (Iterator i = first; i != last; i++, n++)
53
- std::cout << ">>> [" << n << "] = " << *i << std::endl;
54
- }
55
-
56
- template <typename Container>
57
- void PRINT(const Container& c)
58
- {
59
- PRINT(c.begin(), c.end());
60
- }
61
-
62
- template <size_t N>
63
- void PRINT(const char (&c)[N])
64
- {
65
- std::cout << std::string(c, c + N) << std::endl;
66
- }
67
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/core/triple_chevron_launch.h DELETED
@@ -1,976 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
- #include <thrust/detail/config.h>
30
- #include <thrust/system/cuda/detail/core/alignment.h>
31
- #include <thrust/system/cuda/detail/guarded_cuda_runtime_api.h>
32
- #include <cassert>
33
-
34
-
35
- namespace thrust
36
- {
37
-
38
- namespace cuda_cub {
39
- namespace launcher {
40
-
41
- struct triple_chevron
42
- {
43
- typedef size_t Size;
44
- dim3 const grid;
45
- dim3 const block;
46
- Size const shared_mem;
47
- cudaStream_t const stream;
48
-
49
- THRUST_RUNTIME_FUNCTION
50
- triple_chevron(dim3 grid_,
51
- dim3 block_,
52
- Size shared_mem_ = 0,
53
- cudaStream_t stream_ = 0)
54
- : grid(grid_),
55
- block(block_),
56
- shared_mem(shared_mem_),
57
- stream(stream_) {}
58
-
59
- #if 0
60
- template<class K, class... Args>
61
- cudaError_t __host__
62
- doit_host(K k, Args const&... args) const
63
- {
64
- k<<<grid, block, shared_mem, stream>>>(args...);
65
- return cudaPeekAtLastError();
66
- }
67
- #else
68
- template <class K, class _0>
69
- cudaError_t __host__
70
- doit_host(K k, _0 x0) const
71
- {
72
- k<<<grid, block, shared_mem, stream>>>(x0);
73
- return cudaPeekAtLastError();
74
- }
75
- template <class K, class _0, class _1>
76
- cudaError_t __host__
77
- doit_host(K k, _0 x0, _1 x1) const
78
- {
79
- k<<<grid, block, shared_mem, stream>>>(x0,x1);
80
- return cudaPeekAtLastError();
81
- }
82
- template <class K, class _0, class _1, class _2>
83
- cudaError_t __host__
84
- doit_host(K k, _0 x0, _1 x1, _2 x2) const
85
- {
86
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2);
87
- return cudaPeekAtLastError();
88
- }
89
- template <class K, class _0, class _1, class _2, class _3>
90
- cudaError_t __host__
91
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3) const
92
- {
93
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3);
94
- return cudaPeekAtLastError();
95
- }
96
- template <class K, class _0, class _1, class _2, class _3, class _4>
97
- cudaError_t __host__
98
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const
99
- {
100
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4);
101
- return cudaPeekAtLastError();
102
- }
103
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5>
104
- cudaError_t __host__
105
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const
106
- {
107
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5);
108
- return cudaPeekAtLastError();
109
- }
110
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
111
- cudaError_t __host__
112
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const
113
- {
114
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6);
115
- return cudaPeekAtLastError();
116
- }
117
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
118
- cudaError_t __host__
119
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const
120
- {
121
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7);
122
- return cudaPeekAtLastError();
123
- }
124
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
125
- cudaError_t __host__
126
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const
127
- {
128
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7,x8);
129
- return cudaPeekAtLastError();
130
- }
131
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
132
- cudaError_t __host__
133
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const
134
- {
135
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9);
136
- return cudaPeekAtLastError();
137
- }
138
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
139
- cudaError_t __host__
140
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const
141
- {
142
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA);
143
- return cudaPeekAtLastError();
144
- }
145
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
146
- cudaError_t __host__
147
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const
148
- {
149
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB);
150
- return cudaPeekAtLastError();
151
- }
152
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
153
- cudaError_t __host__
154
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const
155
- {
156
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC);
157
- return cudaPeekAtLastError();
158
- }
159
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
160
- cudaError_t __host__
161
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) const
162
- {
163
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD);
164
- return cudaPeekAtLastError();
165
- }
166
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
167
- cudaError_t __host__
168
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) const
169
- {
170
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD,xE);
171
- return cudaPeekAtLastError();
172
- }
173
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE, class _xF>
174
- cudaError_t __host__
175
- doit_host(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE, _xF xF) const
176
- {
177
- k<<<grid, block, shared_mem, stream>>>(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD,xE,xF);
178
- return cudaPeekAtLastError();
179
- }
180
- #endif
181
-
182
- template<class T>
183
- size_t __device__
184
- align_up(size_t offset) const
185
- {
186
- size_t alignment = alignment_of<T>::value;
187
- return alignment * ((offset + (alignment - 1))/ alignment);
188
- }
189
-
190
- #if 0
191
- size_t __device__ argument_pack_size(size_t size) const { return size; }
192
- template <class Arg, class... Args>
193
- size_t __device__
194
- argument_pack_size(size_t size, Arg const& arg, Args const&... args) const
195
- {
196
- size = align_up<Arg>(size);
197
- return argument_pack_size(size + sizeof(Arg), args...);
198
- }
199
- #else
200
- template <class Arg>
201
- size_t __device__
202
- argument_pack_size(size_t size, Arg) const
203
- {
204
- return align_up<Arg>(size) + sizeof(Arg);
205
- }
206
- template <class Arg, class _0>
207
- size_t __device__
208
- argument_pack_size(size_t size, Arg, _0 x0) const
209
- {
210
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0);
211
- }
212
- template <class Arg, class _0, class _1>
213
- size_t __device__
214
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1) const
215
- {
216
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1);
217
- }
218
- template <class Arg, class _0, class _1, class _2>
219
- size_t __device__
220
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2) const
221
- {
222
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2);
223
- }
224
- template <class Arg, class _0, class _1, class _2, class _3>
225
- size_t __device__
226
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3) const
227
- {
228
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3);
229
- }
230
- template <class Arg, class _0, class _1, class _2, class _3, class _4>
231
- size_t __device__
232
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const
233
- {
234
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4);
235
- }
236
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5>
237
- size_t __device__
238
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const
239
- {
240
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5);
241
- }
242
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
243
- size_t __device__
244
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const
245
- {
246
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6);
247
- }
248
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
249
- size_t __device__
250
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const
251
- {
252
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7);
253
- }
254
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
255
- size_t __device__
256
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const
257
- {
258
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7, x8);
259
- }
260
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
261
- size_t __device__
262
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const
263
- {
264
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9);
265
- }
266
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
267
- size_t __device__
268
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const
269
- {
270
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA);
271
- }
272
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
273
- size_t __device__
274
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const
275
- {
276
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB);
277
- }
278
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
279
- size_t __device__
280
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const
281
- {
282
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC);
283
- }
284
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
285
- size_t __device__
286
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD) const
287
- {
288
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD);
289
- }
290
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
291
- size_t __device__
292
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD, _xE xE) const
293
- {
294
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE);
295
- }
296
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE, class _xF>
297
- size_t __device__
298
- argument_pack_size(size_t size, Arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD, _xE xE, _xF xF) const
299
- {
300
- return argument_pack_size(align_up<Arg>(size) + sizeof(Arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, xF);
301
- }
302
- #endif /* variadic */
303
-
304
- template <class Arg>
305
- size_t __device__ copy_arg(char* buffer, size_t offset, Arg arg) const
306
- {
307
- offset = align_up<Arg>(offset);
308
- for (int i = 0; i != sizeof(Arg); ++i)
309
- buffer[offset+i] = *((char*)&arg + i);
310
- return offset + sizeof(Arg);
311
- }
312
-
313
- #if 0
314
- void __device__ fill_arguments(char*, size_t) const {}
315
- template<class Arg, class... Args>
316
- void __device__
317
- fill_arguments(char* buffer, size_t offset, Arg const& arg, Args const& ... args) const
318
- {
319
- fill_arguments(buffer, copy_arg(buffer, offset, arg), args...);
320
- }
321
- #else
322
- template<class Arg>
323
- void __device__
324
- fill_arguments(char* buffer, size_t offset, Arg arg) const
325
- {
326
- copy_arg(buffer, offset, arg);
327
- }
328
- template<class Arg, class _0>
329
- void __device__
330
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0) const
331
- {
332
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0);
333
- }
334
- template <class Arg, class _0, class _1>
335
- void __device__
336
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1) const
337
- {
338
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1);
339
- }
340
- template <class Arg, class _0, class _1, class _2>
341
- void __device__
342
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2) const
343
- {
344
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2);
345
- }
346
- template <class Arg, class _0, class _1, class _2, class _3>
347
- void __device__
348
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3) const
349
- {
350
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3);
351
- }
352
- template <class Arg, class _0, class _1, class _2, class _3, class _4>
353
- void __device__
354
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const
355
- {
356
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4);
357
- }
358
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5>
359
- void __device__
360
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const
361
- {
362
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5);
363
- }
364
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
365
- void __device__
366
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const
367
- {
368
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6);
369
- }
370
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
371
- void __device__
372
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const
373
- {
374
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7);
375
- }
376
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
377
- void __device__
378
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const
379
- {
380
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7, x8);
381
- }
382
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
383
- void __device__
384
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const
385
- {
386
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9);
387
- }
388
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
389
- void __device__
390
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const
391
- {
392
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA);
393
- }
394
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
395
- void __device__
396
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const
397
- {
398
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB);
399
- }
400
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
401
- void __device__
402
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const
403
- {
404
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC);
405
- }
406
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
407
- void __device__
408
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD) const
409
- {
410
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD);
411
- }
412
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
413
- void __device__
414
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD, _xE xE) const
415
- {
416
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE);
417
- }
418
- template <class Arg, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE, class _xF>
419
- void __device__
420
- fill_arguments(char* buffer, size_t offset, Arg arg, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD, _xE xE, _xF xF) const
421
- {
422
- fill_arguments(buffer, copy_arg(buffer, offset, arg), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, xF);
423
- }
424
- #endif /* variadic */
425
-
426
- #if 0
427
- template<class K, class... Args>
428
- cudaError_t __device__
429
- doit_device(K k, Args const&... args) const
430
- {
431
- cudaError_t status = cudaErrorNotSupported;
432
- #if __THRUST_HAS_CUDART__
433
- const size_t size = argument_pack_size(0,args...);
434
- void *param_buffer = cudaGetParameterBuffer(64,size);
435
- fill_arguments((char*)param_buffer, 0, args...);
436
- status = launch_device(k, param_buffer);
437
- #endif
438
- return status;
439
- }
440
- #else
441
- template<class K, class _0>
442
- cudaError_t __device__
443
- doit_device(K k, _0 x0) const
444
- {
445
- cudaError_t status = cudaErrorNotSupported;
446
- #if __THRUST_HAS_CUDART__
447
- const size_t size = argument_pack_size(0,x0);
448
- void *param_buffer = cudaGetParameterBuffer(64,size);
449
- fill_arguments((char*)param_buffer, 0, x0);
450
- status = launch_device(k, param_buffer);
451
- #else
452
- THRUST_UNUSED_VAR(k);
453
- THRUST_UNUSED_VAR(x0);
454
- #endif
455
- return status;
456
- }
457
- template <class K, class _0, class _1>
458
- cudaError_t __device__
459
- doit_device(K k, _0 x0, _1 x1) const
460
- {
461
- cudaError_t status = cudaErrorNotSupported;
462
- #if __THRUST_HAS_CUDART__
463
- const size_t size = argument_pack_size(0,x0,x1);
464
- void *param_buffer = cudaGetParameterBuffer(64,size);
465
- fill_arguments((char*)param_buffer, 0, x0,x1);
466
- status = launch_device(k, param_buffer);
467
- #else
468
- THRUST_UNUSED_VAR(k);
469
- THRUST_UNUSED_VAR(x0);
470
- THRUST_UNUSED_VAR(x1);
471
- #endif
472
- return status;
473
- }
474
- template <class K, class _0, class _1, class _2>
475
- cudaError_t __device__
476
- doit_device(K k, _0 x0, _1 x1, _2 x2) const
477
- {
478
- cudaError_t status = cudaErrorNotSupported;
479
- #if __THRUST_HAS_CUDART__
480
- const size_t size = argument_pack_size(0,x0,x1,x2);
481
- void *param_buffer = cudaGetParameterBuffer(64,size);
482
- fill_arguments((char*)param_buffer, 0, x0,x1,x2);
483
- status = launch_device(k, param_buffer);
484
- #else
485
- THRUST_UNUSED_VAR(k);
486
- THRUST_UNUSED_VAR(x0);
487
- THRUST_UNUSED_VAR(x1);
488
- THRUST_UNUSED_VAR(x2);
489
- #endif
490
- return status;
491
- }
492
- template <class K, class _0, class _1, class _2, class _3>
493
- cudaError_t __device__
494
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3) const
495
- {
496
- cudaError_t status = cudaErrorNotSupported;
497
- #if __THRUST_HAS_CUDART__
498
- const size_t size = argument_pack_size(0,x0,x1,x2,x3);
499
- void *param_buffer = cudaGetParameterBuffer(64,size);
500
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3);
501
- status = launch_device(k, param_buffer);
502
- #else
503
- THRUST_UNUSED_VAR(k);
504
- THRUST_UNUSED_VAR(x0);
505
- THRUST_UNUSED_VAR(x1);
506
- THRUST_UNUSED_VAR(x2);
507
- THRUST_UNUSED_VAR(x3);
508
- #endif
509
- return status;
510
- }
511
- template <class K, class _0, class _1, class _2, class _3, class _4>
512
- cudaError_t __device__
513
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const
514
- {
515
- cudaError_t status = cudaErrorNotSupported;
516
- #if __THRUST_HAS_CUDART__
517
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4);
518
- void *param_buffer = cudaGetParameterBuffer(64,size);
519
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4);
520
- status = launch_device(k, param_buffer);
521
- #else
522
- THRUST_UNUSED_VAR(k);
523
- THRUST_UNUSED_VAR(x0);
524
- THRUST_UNUSED_VAR(x1);
525
- THRUST_UNUSED_VAR(x2);
526
- THRUST_UNUSED_VAR(x3);
527
- THRUST_UNUSED_VAR(x4);
528
- #endif
529
- return status;
530
- }
531
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5>
532
- cudaError_t __device__
533
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const
534
- {
535
- cudaError_t status = cudaErrorNotSupported;
536
- #if __THRUST_HAS_CUDART__
537
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5);
538
- void *param_buffer = cudaGetParameterBuffer(64,size);
539
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5);
540
- status = launch_device(k, param_buffer);
541
- #else
542
- THRUST_UNUSED_VAR(k);
543
- THRUST_UNUSED_VAR(x0);
544
- THRUST_UNUSED_VAR(x1);
545
- THRUST_UNUSED_VAR(x2);
546
- THRUST_UNUSED_VAR(x3);
547
- THRUST_UNUSED_VAR(x4);
548
- THRUST_UNUSED_VAR(x5);
549
- #endif
550
- return status;
551
- }
552
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
553
- cudaError_t __device__
554
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const
555
- {
556
- cudaError_t status = cudaErrorNotSupported;
557
- #if __THRUST_HAS_CUDART__
558
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6);
559
- void *param_buffer = cudaGetParameterBuffer(64,size);
560
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6);
561
- status = launch_device(k, param_buffer);
562
- #else
563
- THRUST_UNUSED_VAR(k);
564
- THRUST_UNUSED_VAR(x0);
565
- THRUST_UNUSED_VAR(x1);
566
- THRUST_UNUSED_VAR(x2);
567
- THRUST_UNUSED_VAR(x3);
568
- THRUST_UNUSED_VAR(x4);
569
- THRUST_UNUSED_VAR(x5);
570
- THRUST_UNUSED_VAR(x6);
571
- #endif
572
- return status;
573
- }
574
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
575
- cudaError_t __device__
576
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const
577
- {
578
- cudaError_t status = cudaErrorNotSupported;
579
- #if __THRUST_HAS_CUDART__
580
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7);
581
- void *param_buffer = cudaGetParameterBuffer(64,size);
582
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7);
583
- status = launch_device(k, param_buffer);
584
- #else
585
- THRUST_UNUSED_VAR(k);
586
- THRUST_UNUSED_VAR(x0);
587
- THRUST_UNUSED_VAR(x1);
588
- THRUST_UNUSED_VAR(x2);
589
- THRUST_UNUSED_VAR(x3);
590
- THRUST_UNUSED_VAR(x4);
591
- THRUST_UNUSED_VAR(x5);
592
- THRUST_UNUSED_VAR(x6);
593
- THRUST_UNUSED_VAR(x7);
594
- #endif
595
- return status;
596
- }
597
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
598
- cudaError_t __device__
599
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const
600
- {
601
- cudaError_t status = cudaErrorNotSupported;
602
- #if __THRUST_HAS_CUDART__
603
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7,x8);
604
- void *param_buffer = cudaGetParameterBuffer(64,size);
605
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7,x8);
606
- status = launch_device(k, param_buffer);
607
- #else
608
- THRUST_UNUSED_VAR(k);
609
- THRUST_UNUSED_VAR(x0);
610
- THRUST_UNUSED_VAR(x1);
611
- THRUST_UNUSED_VAR(x2);
612
- THRUST_UNUSED_VAR(x3);
613
- THRUST_UNUSED_VAR(x4);
614
- THRUST_UNUSED_VAR(x5);
615
- THRUST_UNUSED_VAR(x6);
616
- THRUST_UNUSED_VAR(x7);
617
- THRUST_UNUSED_VAR(x8);
618
- #endif
619
- return status;
620
- }
621
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
622
- cudaError_t __device__
623
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const
624
- {
625
- cudaError_t status = cudaErrorNotSupported;
626
- #if __THRUST_HAS_CUDART__
627
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7,x8,x9);
628
- void *param_buffer = cudaGetParameterBuffer(64,size);
629
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7,x8,x9);
630
- status = launch_device(k, param_buffer);
631
- #else
632
- THRUST_UNUSED_VAR(k);
633
- THRUST_UNUSED_VAR(x0);
634
- THRUST_UNUSED_VAR(x1);
635
- THRUST_UNUSED_VAR(x2);
636
- THRUST_UNUSED_VAR(x3);
637
- THRUST_UNUSED_VAR(x4);
638
- THRUST_UNUSED_VAR(x5);
639
- THRUST_UNUSED_VAR(x6);
640
- THRUST_UNUSED_VAR(x7);
641
- THRUST_UNUSED_VAR(x8);
642
- THRUST_UNUSED_VAR(x9);
643
- #endif
644
- return status;
645
- }
646
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
647
- cudaError_t __device__
648
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const
649
- {
650
- cudaError_t status = cudaErrorNotSupported;
651
- #if __THRUST_HAS_CUDART__
652
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA);
653
- void *param_buffer = cudaGetParameterBuffer(64,size);
654
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA);
655
- status = launch_device(k, param_buffer);
656
- #else
657
- THRUST_UNUSED_VAR(k);
658
- THRUST_UNUSED_VAR(x0);
659
- THRUST_UNUSED_VAR(x1);
660
- THRUST_UNUSED_VAR(x2);
661
- THRUST_UNUSED_VAR(x3);
662
- THRUST_UNUSED_VAR(x4);
663
- THRUST_UNUSED_VAR(x5);
664
- THRUST_UNUSED_VAR(x6);
665
- THRUST_UNUSED_VAR(x7);
666
- THRUST_UNUSED_VAR(x8);
667
- THRUST_UNUSED_VAR(x9);
668
- THRUST_UNUSED_VAR(xA);
669
- #endif
670
- return status;
671
- }
672
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
673
- cudaError_t __device__
674
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const
675
- {
676
- cudaError_t status = cudaErrorNotSupported;
677
- #if __THRUST_HAS_CUDART__
678
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB);
679
- void *param_buffer = cudaGetParameterBuffer(64,size);
680
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB);
681
- status = launch_device(k, param_buffer);
682
- #else
683
- THRUST_UNUSED_VAR(k);
684
- THRUST_UNUSED_VAR(x0);
685
- THRUST_UNUSED_VAR(x1);
686
- THRUST_UNUSED_VAR(x2);
687
- THRUST_UNUSED_VAR(x3);
688
- THRUST_UNUSED_VAR(x4);
689
- THRUST_UNUSED_VAR(x5);
690
- THRUST_UNUSED_VAR(x6);
691
- THRUST_UNUSED_VAR(x7);
692
- THRUST_UNUSED_VAR(x8);
693
- THRUST_UNUSED_VAR(x9);
694
- THRUST_UNUSED_VAR(xA);
695
- THRUST_UNUSED_VAR(xB);
696
- #endif
697
- return status;
698
- }
699
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
700
- cudaError_t __device__
701
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const
702
- {
703
- cudaError_t status = cudaErrorNotSupported;
704
- #if __THRUST_HAS_CUDART__
705
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC);
706
- void *param_buffer = cudaGetParameterBuffer(64,size);
707
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC);
708
- status = launch_device(k, param_buffer);
709
- #else
710
- THRUST_UNUSED_VAR(k);
711
- THRUST_UNUSED_VAR(x0);
712
- THRUST_UNUSED_VAR(x1);
713
- THRUST_UNUSED_VAR(x2);
714
- THRUST_UNUSED_VAR(x3);
715
- THRUST_UNUSED_VAR(x4);
716
- THRUST_UNUSED_VAR(x5);
717
- THRUST_UNUSED_VAR(x6);
718
- THRUST_UNUSED_VAR(x7);
719
- THRUST_UNUSED_VAR(x8);
720
- THRUST_UNUSED_VAR(x9);
721
- THRUST_UNUSED_VAR(xA);
722
- THRUST_UNUSED_VAR(xB);
723
- THRUST_UNUSED_VAR(xC);
724
- #endif
725
- return status;
726
- }
727
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
728
- cudaError_t __device__
729
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD) const
730
- {
731
- cudaError_t status = cudaErrorNotSupported;
732
- #if __THRUST_HAS_CUDART__
733
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD);
734
- void *param_buffer = cudaGetParameterBuffer(64,size);
735
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD);
736
- status = launch_device(k, param_buffer);
737
- #else
738
- THRUST_UNUSED_VAR(k);
739
- THRUST_UNUSED_VAR(x0);
740
- THRUST_UNUSED_VAR(x1);
741
- THRUST_UNUSED_VAR(x2);
742
- THRUST_UNUSED_VAR(x3);
743
- THRUST_UNUSED_VAR(x4);
744
- THRUST_UNUSED_VAR(x5);
745
- THRUST_UNUSED_VAR(x6);
746
- THRUST_UNUSED_VAR(x7);
747
- THRUST_UNUSED_VAR(x8);
748
- THRUST_UNUSED_VAR(x9);
749
- THRUST_UNUSED_VAR(xA);
750
- THRUST_UNUSED_VAR(xB);
751
- THRUST_UNUSED_VAR(xC);
752
- THRUST_UNUSED_VAR(xD);
753
- #endif
754
- return status;
755
- }
756
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
757
- cudaError_t __device__
758
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD, _xE xE) const
759
- {
760
- cudaError_t status = cudaErrorNotSupported;
761
- #if __THRUST_HAS_CUDART__
762
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD,xE);
763
- void *param_buffer = cudaGetParameterBuffer(64,size);
764
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD,xE);
765
- status = launch_device(k, param_buffer);
766
- #else
767
- THRUST_UNUSED_VAR(k);
768
- THRUST_UNUSED_VAR(x0);
769
- THRUST_UNUSED_VAR(x1);
770
- THRUST_UNUSED_VAR(x2);
771
- THRUST_UNUSED_VAR(x3);
772
- THRUST_UNUSED_VAR(x4);
773
- THRUST_UNUSED_VAR(x5);
774
- THRUST_UNUSED_VAR(x6);
775
- THRUST_UNUSED_VAR(x7);
776
- THRUST_UNUSED_VAR(x8);
777
- THRUST_UNUSED_VAR(x9);
778
- THRUST_UNUSED_VAR(xA);
779
- THRUST_UNUSED_VAR(xB);
780
- THRUST_UNUSED_VAR(xC);
781
- THRUST_UNUSED_VAR(xD);
782
- THRUST_UNUSED_VAR(xE);
783
- #endif
784
- return status;
785
- }
786
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE, class _xF>
787
- cudaError_t __device__
788
- doit_device(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC,_xD xD, _xE xE, _xF xF) const
789
- {
790
- cudaError_t status = cudaErrorNotSupported;
791
- #if __THRUST_HAS_CUDART__
792
- const size_t size = argument_pack_size(0,x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD,xE,xF);
793
- void *param_buffer = cudaGetParameterBuffer(64,size);
794
- fill_arguments((char*)param_buffer, 0, x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,xA,xB,xC,xD,xE,xF);
795
- status = launch_device(k, param_buffer);
796
- #else
797
- THRUST_UNUSED_VAR(k);
798
- THRUST_UNUSED_VAR(x0);
799
- THRUST_UNUSED_VAR(x1);
800
- THRUST_UNUSED_VAR(x2);
801
- THRUST_UNUSED_VAR(x3);
802
- THRUST_UNUSED_VAR(x4);
803
- THRUST_UNUSED_VAR(x5);
804
- THRUST_UNUSED_VAR(x6);
805
- THRUST_UNUSED_VAR(x7);
806
- THRUST_UNUSED_VAR(x8);
807
- THRUST_UNUSED_VAR(x9);
808
- THRUST_UNUSED_VAR(xA);
809
- THRUST_UNUSED_VAR(xB);
810
- THRUST_UNUSED_VAR(xC);
811
- THRUST_UNUSED_VAR(xD);
812
- THRUST_UNUSED_VAR(xE);
813
- THRUST_UNUSED_VAR(xF);
814
- #endif
815
- return status;
816
- }
817
- #endif /* variadic */
818
-
819
- template <class K>
820
- cudaError_t __device__
821
- launch_device(K k, void* buffer) const
822
- {
823
- #if __THRUST_HAS_CUDART__
824
- return cudaLaunchDevice((void*)k,
825
- buffer,
826
- dim3(grid),
827
- dim3(block),
828
- shared_mem,
829
- stream);
830
- #else
831
- THRUST_UNUSED_VAR(k);
832
- THRUST_UNUSED_VAR(buffer);
833
- return cudaErrorNotSupported;
834
- #endif
835
- }
836
-
837
-
838
- #if defined(__NVCOMPILER_CUDA__)
839
- # define THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(...) \
840
- (__builtin_is_device_code() ? \
841
- doit_device(__VA_ARGS__) : doit_host(__VA_ARGS__))
842
- #elif defined(__CUDA_ARCH__)
843
- # define THRUST_TRIPLE_LAUNCHER_HOSTDEVICE doit_device
844
- #else
845
- # define THRUST_TRIPLE_LAUNCHER_HOSTDEVICE doit_host
846
- #endif
847
-
848
- #if 0
849
- __thrust_exec_check_disable__
850
- template <class K, class... Args>
851
- cudaError_t THRUST_FUNCTION
852
- doit(K k, Args const&... args) const
853
- {
854
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, args...);
855
- }
856
- #else
857
- __thrust_exec_check_disable__
858
- template <class K, class _0>
859
- cudaError_t THRUST_FUNCTION
860
- doit(K k, _0 x0) const
861
- {
862
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0);
863
- }
864
- __thrust_exec_check_disable__
865
- template <class K, class _0, class _1>
866
- cudaError_t THRUST_FUNCTION
867
- doit(K k, _0 x0, _1 x1) const
868
- {
869
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1);
870
- }
871
- __thrust_exec_check_disable__
872
- template <class K, class _0, class _1, class _2>
873
- cudaError_t THRUST_FUNCTION
874
- doit(K k, _0 x0, _1 x1, _2 x2) const
875
- {
876
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2);
877
- }
878
- __thrust_exec_check_disable__
879
- template <class K, class _0, class _1, class _2, class _3>
880
- cudaError_t THRUST_FUNCTION
881
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3) const
882
- {
883
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3);
884
- }
885
- __thrust_exec_check_disable__
886
- template <class K, class _0, class _1, class _2, class _3, class _4>
887
- cudaError_t THRUST_FUNCTION
888
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const
889
- {
890
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4);
891
- }
892
- __thrust_exec_check_disable__
893
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5>
894
- cudaError_t THRUST_FUNCTION
895
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const
896
- {
897
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5);
898
- }
899
- __thrust_exec_check_disable__
900
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
901
- cudaError_t THRUST_FUNCTION
902
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const
903
- {
904
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6);
905
- }
906
- __thrust_exec_check_disable__
907
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
908
- cudaError_t THRUST_FUNCTION
909
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const
910
- {
911
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7);
912
- }
913
- __thrust_exec_check_disable__
914
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
915
- cudaError_t THRUST_FUNCTION
916
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const
917
- {
918
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7, x8);
919
- }
920
- __thrust_exec_check_disable__
921
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
922
- cudaError_t THRUST_FUNCTION
923
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const
924
- {
925
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9);
926
- }
927
- __thrust_exec_check_disable__
928
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
929
- cudaError_t THRUST_FUNCTION
930
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const
931
- {
932
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA);
933
- }
934
- __thrust_exec_check_disable__
935
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
936
- cudaError_t THRUST_FUNCTION
937
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const
938
- {
939
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB);
940
- }
941
- __thrust_exec_check_disable__
942
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
943
- cudaError_t THRUST_FUNCTION
944
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const
945
- {
946
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC);
947
- }
948
- __thrust_exec_check_disable__
949
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
950
- cudaError_t THRUST_FUNCTION
951
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) const
952
- {
953
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD);
954
- }
955
- __thrust_exec_check_disable__
956
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
957
- cudaError_t THRUST_FUNCTION
958
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) const
959
- {
960
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE);
961
- }
962
- __thrust_exec_check_disable__
963
- template <class K, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE, class _xF>
964
- cudaError_t THRUST_FUNCTION
965
- doit(K k, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE, _xF xF) const
966
- {
967
- return THRUST_TRIPLE_LAUNCHER_HOSTDEVICE(k, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, xF);
968
- }
969
- #endif
970
- #undef THRUST_TRIPLE_LAUNCHER_HOSTDEVICE
971
- }; // struct triple_chevron
972
-
973
- } // namespace launcher
974
- } // namespace cuda_
975
-
976
- } // end namespace thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChallengeHub/Chinese-LangChain/assets/custom.css DELETED
@@ -1,190 +0,0 @@
1
- :root {
2
- --chatbot-color-light: rgba(255, 255, 255, 0.08);
3
- --chatbot-color-dark: #121111;
4
- }
5
-
6
- /* status_display */
7
- #status_display {
8
- display: flex;
9
- min-height: 2.5em;
10
- align-items: flex-end;
11
- justify-content: flex-end;
12
- }
13
- #status_display p {
14
- font-size: .85em;
15
- font-family: monospace;
16
- color: var(--body-text-color-subdued);
17
- }
18
-
19
-
20
-
21
- /* usage_display */
22
- #usage_display {
23
- height: 1em;
24
- }
25
- #usage_display p{
26
- padding: 0 1em;
27
- font-size: .85em;
28
- font-family: monospace;
29
- color: var(--body-text-color-subdued);
30
- }
31
- /* list */
32
- ol:not(.options), ul:not(.options) {
33
- padding-inline-start: 2em !important;
34
- }
35
-
36
- /* Thank @Keldos-Li for fixing it */
37
- /* Light mode (default) */
38
- #chuanhu_chatbot {
39
- background-color: var(--chatbot-color-light) !important;
40
- color: #000000 !important;
41
- }
42
- [data-testid = "bot"] {
43
- background-color: rgba(255, 255, 255, 0.08) !important;
44
- }
45
- [data-testid = "user"] {
46
- background-color: #95EC69 !important;
47
- }
48
-
49
- /* Dark mode */
50
- .dark #chuanhu_chatbot {
51
- background-color: var(--chatbot-color-dark) !important;
52
- color: rgba(255, 255, 255, 0.08) !important;
53
- }
54
- .dark [data-testid = "bot"] {
55
- background-color: #2C2C2C !important;
56
- }
57
- .dark [data-testid = "user"] {
58
- background-color: #26B561 !important;
59
- }
60
-
61
- #chuanhu_chatbot {
62
- height: 100%;
63
- min-height: 400px;
64
- }
65
-
66
- [class *= "message"] {
67
- border-radius: var(--radius-xl) !important;
68
- border: none;
69
- padding: var(--spacing-xl) !important;
70
- font-size: var(--text-md) !important;
71
- line-height: var(--line-md) !important;
72
- min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
73
- min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
74
- }
75
- [data-testid = "bot"] {
76
- max-width: 85%;
77
- border-bottom-left-radius: 0 !important;
78
- }
79
- [data-testid = "user"] {
80
- max-width: 85%;
81
- width: auto !important;
82
- border-bottom-right-radius: 0 !important;
83
- }
84
- /* Table */
85
- table {
86
- margin: 1em 0;
87
- border-collapse: collapse;
88
- empty-cells: show;
89
- }
90
- td,th {
91
- border: 1.2px solid var(--border-color-primary) !important;
92
- padding: 0.2em;
93
- }
94
- thead {
95
- background-color: rgba(175,184,193,0.2);
96
- }
97
- thead th {
98
- padding: .5em .2em;
99
- }
100
- /* Inline code */
101
- code {
102
- display: inline;
103
- white-space: break-spaces;
104
- border-radius: 6px;
105
- margin: 0 2px 0 2px;
106
- padding: .2em .4em .1em .4em;
107
- background-color: rgba(175,184,193,0.2);
108
- }
109
- /* Code block */
110
- pre code {
111
- display: block;
112
- overflow: auto;
113
- white-space: pre;
114
- background-color: hsla(0, 0%, 0%, 80%)!important;
115
- border-radius: 10px;
116
- padding: 1.4em 1.2em 0em 1.4em;
117
- margin: 1.2em 2em 1.2em 0.5em;
118
- color: #FFF;
119
- box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
120
- }
121
- /* Hightlight */
122
- .highlight .hll { background-color: #49483e }
123
- .highlight .c { color: #75715e } /* Comment */
124
- .highlight .err { color: #960050; background-color: #1e0010 } /* Error */
125
- .highlight .k { color: #66d9ef } /* Keyword */
126
- .highlight .l { color: #ae81ff } /* Literal */
127
- .highlight .n { color: #f8f8f2 } /* Name */
128
- .highlight .o { color: #f92672 } /* Operator */
129
- .highlight .p { color: #f8f8f2 } /* Punctuation */
130
- .highlight .ch { color: #75715e } /* Comment.Hashbang */
131
- .highlight .cm { color: #75715e } /* Comment.Multiline */
132
- .highlight .cp { color: #75715e } /* Comment.Preproc */
133
- .highlight .cpf { color: #75715e } /* Comment.PreprocFile */
134
- .highlight .c1 { color: #75715e } /* Comment.Single */
135
- .highlight .cs { color: #75715e } /* Comment.Special */
136
- .highlight .gd { color: #f92672 } /* Generic.Deleted */
137
- .highlight .ge { font-style: italic } /* Generic.Emph */
138
- .highlight .gi { color: #a6e22e } /* Generic.Inserted */
139
- .highlight .gs { font-weight: bold } /* Generic.Strong */
140
- .highlight .gu { color: #75715e } /* Generic.Subheading */
141
- .highlight .kc { color: #66d9ef } /* Keyword.Constant */
142
- .highlight .kd { color: #66d9ef } /* Keyword.Declaration */
143
- .highlight .kn { color: #f92672 } /* Keyword.Namespace */
144
- .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
145
- .highlight .kr { color: #66d9ef } /* Keyword.Reserved */
146
- .highlight .kt { color: #66d9ef } /* Keyword.Type */
147
- .highlight .ld { color: #e6db74 } /* Literal.Date */
148
- .highlight .m { color: #ae81ff } /* Literal.Number */
149
- .highlight .s { color: #e6db74 } /* Literal.String */
150
- .highlight .na { color: #a6e22e } /* Name.Attribute */
151
- .highlight .nb { color: #f8f8f2 } /* Name.Builtin */
152
- .highlight .nc { color: #a6e22e } /* Name.Class */
153
- .highlight .no { color: #66d9ef } /* Name.Constant */
154
- .highlight .nd { color: #a6e22e } /* Name.Decorator */
155
- .highlight .ni { color: #f8f8f2 } /* Name.Entity */
156
- .highlight .ne { color: #a6e22e } /* Name.Exception */
157
- .highlight .nf { color: #a6e22e } /* Name.Function */
158
- .highlight .nl { color: #f8f8f2 } /* Name.Label */
159
- .highlight .nn { color: #f8f8f2 } /* Name.Namespace */
160
- .highlight .nx { color: #a6e22e } /* Name.Other */
161
- .highlight .py { color: #f8f8f2 } /* Name.Property */
162
- .highlight .nt { color: #f92672 } /* Name.Tag */
163
- .highlight .nv { color: #f8f8f2 } /* Name.Variable */
164
- .highlight .ow { color: #f92672 } /* Operator.Word */
165
- .highlight .w { color: #f8f8f2 } /* Text.Whitespace */
166
- .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
167
- .highlight .mf { color: #ae81ff } /* Literal.Number.Float */
168
- .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
169
- .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
170
- .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
171
- .highlight .sa { color: #e6db74 } /* Literal.String.Affix */
172
- .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
173
- .highlight .sc { color: #e6db74 } /* Literal.String.Char */
174
- .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
175
- .highlight .sd { color: #e6db74 } /* Literal.String.Doc */
176
- .highlight .s2 { color: #e6db74 } /* Literal.String.Double */
177
- .highlight .se { color: #ae81ff } /* Literal.String.Escape */
178
- .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
179
- .highlight .si { color: #e6db74 } /* Literal.String.Interpol */
180
- .highlight .sx { color: #e6db74 } /* Literal.String.Other */
181
- .highlight .sr { color: #e6db74 } /* Literal.String.Regex */
182
- .highlight .s1 { color: #e6db74 } /* Literal.String.Single */
183
- .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
184
- .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
185
- .highlight .fm { color: #a6e22e } /* Name.Function.Magic */
186
- .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
187
- .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
188
- .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
189
- .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
190
- .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/tests/integration/memory_tests.py DELETED
@@ -1,49 +0,0 @@
1
- import random
2
- import string
3
- import sys
4
- import unittest
5
- from pathlib import Path
6
-
7
- from autogpt.config import Config
8
- from autogpt.memory.local import LocalCache
9
-
10
-
11
- class TestLocalCache(unittest.TestCase):
12
- def random_string(self, length):
13
- return "".join(random.choice(string.ascii_letters) for _ in range(length))
14
-
15
- def setUp(self):
16
- cfg = cfg = Config()
17
- self.cache = LocalCache(cfg)
18
- self.cache.clear()
19
-
20
- # Add example texts to the cache
21
- self.example_texts = [
22
- "The quick brown fox jumps over the lazy dog",
23
- "I love machine learning and natural language processing",
24
- "The cake is a lie, but the pie is always true",
25
- "ChatGPT is an advanced AI model for conversation",
26
- ]
27
-
28
- for text in self.example_texts:
29
- self.cache.add(text)
30
-
31
- # Add some random strings to test noise
32
- for _ in range(5):
33
- self.cache.add(self.random_string(10))
34
-
35
- def test_get_relevant(self):
36
- query = "I'm interested in artificial intelligence and NLP"
37
- k = 3
38
- relevant_texts = self.cache.get_relevant(query, k)
39
-
40
- print(f"Top {k} relevant texts for the query '{query}':")
41
- for i, text in enumerate(relevant_texts, start=1):
42
- print(f"{i}. {text}")
43
-
44
- self.assertEqual(len(relevant_texts), k)
45
- self.assertIn(self.example_texts[1], relevant_texts)
46
-
47
-
48
- if __name__ == "__main__":
49
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cletrason/Cletrason-toad-in-the-mario-movie/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/Cletrason/toad-in-the-mario-movie").launch()
 
 
 
 
spaces/CodeDoes/FrostAura-gpt-neox-20b-fiction-novel-generation/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/FrostAura/gpt-neox-20b-fiction-novel-generation").launch()
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/diffusionmodules/upscaling.py DELETED
@@ -1,81 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import numpy as np
4
- from functools import partial
5
-
6
- from ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule
7
- from ldm.util import default
8
-
9
-
10
- class AbstractLowScaleModel(nn.Module):
11
- # for concatenating a downsampled image to the latent representation
12
- def __init__(self, noise_schedule_config=None):
13
- super(AbstractLowScaleModel, self).__init__()
14
- if noise_schedule_config is not None:
15
- self.register_schedule(**noise_schedule_config)
16
-
17
- def register_schedule(self, beta_schedule="linear", timesteps=1000,
18
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
19
- betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
20
- cosine_s=cosine_s)
21
- alphas = 1. - betas
22
- alphas_cumprod = np.cumprod(alphas, axis=0)
23
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
24
-
25
- timesteps, = betas.shape
26
- self.num_timesteps = int(timesteps)
27
- self.linear_start = linear_start
28
- self.linear_end = linear_end
29
- assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
30
-
31
- to_torch = partial(torch.tensor, dtype=torch.float32)
32
-
33
- self.register_buffer('betas', to_torch(betas))
34
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
35
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
36
-
37
- # calculations for diffusion q(x_t | x_{t-1}) and others
38
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
39
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
40
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
41
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
42
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
43
-
44
- def q_sample(self, x_start, t, noise=None):
45
- noise = default(noise, lambda: torch.randn_like(x_start))
46
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
47
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
48
-
49
- def forward(self, x):
50
- return x, None
51
-
52
- def decode(self, x):
53
- return x
54
-
55
-
56
- class SimpleImageConcat(AbstractLowScaleModel):
57
- # no noise level conditioning
58
- def __init__(self):
59
- super(SimpleImageConcat, self).__init__(noise_schedule_config=None)
60
- self.max_noise_level = 0
61
-
62
- def forward(self, x):
63
- # fix to constant noise level
64
- return x, torch.zeros(x.shape[0], device=x.device).long()
65
-
66
-
67
- class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel):
68
- def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False):
69
- super().__init__(noise_schedule_config=noise_schedule_config)
70
- self.max_noise_level = max_noise_level
71
-
72
- def forward(self, x, noise_level=None):
73
- if noise_level is None:
74
- noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
75
- else:
76
- assert isinstance(noise_level, torch.Tensor)
77
- z = self.q_sample(x, noise_level)
78
- return z, noise_level
79
-
80
-
81
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CyberPeace-Institute/Cybersecurity-Knowledge-Graph-Extraction/app.py DELETED
@@ -1,103 +0,0 @@
1
- import streamlit as st
2
- from transformers import AutoModelForTokenClassification
3
- from annotated_text import annotated_text
4
- import numpy as np
5
- import os, joblib
6
-
7
- from utils import get_idxs_from_text
8
-
9
- model = AutoModelForTokenClassification.from_pretrained("CyberPeace-Institute/Cybersecurity-Knowledge-Graph", trust_remote_code=True)
10
-
11
- role_classifiers = {}
12
- folder_path = '/arg_role_models'
13
- for filename in os.listdir(os.getcwd() + folder_path):
14
- if filename.endswith('.joblib'):
15
- file_path = os.getcwd() + os.path.join(folder_path, filename)
16
- clf = joblib.load(file_path)
17
- arg = filename.split(".")[0]
18
- role_classifiers[arg] = clf
19
-
20
- def annotate(name):
21
- tokens = [item["token"] for item in output]
22
- tokens = [token.replace(" ", "") for token in tokens]
23
- text = model.tokenizer.decode([item["id"] for item in output])
24
- idxs = get_idxs_from_text(text, tokens)
25
- labels = [item[name] for item in output]
26
-
27
- annotated_text_list = []
28
- last_label = ""
29
- cumulative_tokens = ""
30
- last_id = 0
31
- for idx, label in zip(idxs, labels):
32
- to_label = label
33
- label_short = to_label.split("-")[1] if "-" in to_label else to_label
34
- if last_label == label_short:
35
- cumulative_tokens += text[last_id : idx["end_idx"]]
36
- last_id = idx["end_idx"]
37
- else:
38
- if last_label != "":
39
- if last_label == "O":
40
- annotated_text_list.append(cumulative_tokens)
41
- else:
42
- annotated_text_list.append((cumulative_tokens, last_label))
43
- last_label = label_short
44
- cumulative_tokens = idx["word"]
45
- last_id = idx["end_idx"]
46
- if last_label == "O":
47
- annotated_text_list.append(cumulative_tokens)
48
- else:
49
- annotated_text_list.append((cumulative_tokens, last_label))
50
- annotated_text(annotated_text_list)
51
-
52
- def get_arg_roles(output):
53
- args = [(idx, item["argument"], item["token"]) for idx, item in enumerate(output) if item["argument"]!= "O"]
54
-
55
- entities = []
56
- current_entity = None
57
- for position, label, token in args:
58
- if label.startswith('B-'):
59
- if current_entity is not None:
60
- entities.append(current_entity)
61
- current_entity = {'label': label[2:], 'text': token.replace(" ", ""), 'start': position, 'end': position}
62
- elif label.startswith('I-'):
63
- if current_entity is not None:
64
- current_entity['text'] += ' ' + token.replace(" ", "")
65
- current_entity['end'] = position
66
- for entity in entities:
67
- context = model.tokenizer.decode([item["id"] for item in output[max(0, entity["start"] - 15) : min(len(output), entity["end"] + 15)]])
68
- entity["context"] = context
69
-
70
- for entity in entities:
71
- if len(model.arg_2_role[entity["label"]]) > 1:
72
- sent_embed = model.embed_model.encode(entity["context"])
73
- arg_embed = model.embed_model.encode(entity["text"])
74
- embed = np.concatenate((sent_embed, arg_embed))
75
- arg_clf = role_classifiers[entity["label"]]
76
- role_id = arg_clf.predict(embed.reshape(1, -1))
77
- role = model.arg_2_role[entity["label"]][role_id[0]]
78
- entity["role"] = role
79
- else:
80
- entity["role"] = model.arg_2_role[entity["label"]][0]
81
-
82
- for item in output:
83
- item["role"] = "O"
84
- for entity in entities:
85
- for i in range(entity["start"], entity["end"] + 1):
86
- output[i]["role"] = entity["role"]
87
- return output
88
-
89
- st.title("Create Knowledge Graphs from Cyber Incidents")
90
-
91
- text_input = st.text_area("Enter your text here", height=100)
92
-
93
- if text_input or st.button('Apply'):
94
- output = model(text_input)
95
- st.subheader("Event Nuggets")
96
- annotate("nugget")
97
- st.subheader("Event Arguments")
98
- annotate("argument")
99
- st.subheader("Realis of Event Nuggets")
100
- annotate("realis")
101
- output = get_arg_roles(output)
102
- st.subheader("Role of the Event Arguments")
103
- annotate("role")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/B_A_S_E_.py DELETED
@@ -1,5 +0,0 @@
1
- from .otBase import BaseTTXConverter
2
-
3
-
4
- class table_B_A_S_E_(BaseTTXConverter):
5
- pass
 
 
 
 
 
 
spaces/DQChoi/image_sticker/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Image Sticker
3
- emoji: 💻
4
- colorFrom: pink
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dagfinn1962/prodia2/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Prodia
3
- emoji: 🔥
4
- colorFrom: pink
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: pikto/prodia
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/StyleGAN-NADA/model/sg2_model.py DELETED
@@ -1,817 +0,0 @@
1
- import math
2
- import random
3
- import functools
4
- import operator
5
-
6
- import torch
7
- from torch import nn
8
- from torch.nn import functional as F
9
- from torch.autograd import Function
10
-
11
- from op import conv2d_gradfix
12
-
13
- if torch.cuda.is_available():
14
- from op.fused_act import FusedLeakyReLU, fused_leaky_relu
15
- from op.upfirdn2d import upfirdn2d
16
- else:
17
- from op.fused_act_cpu import FusedLeakyReLU, fused_leaky_relu
18
- from op.upfirdn2d_cpu import upfirdn2d
19
-
20
-
21
- class PixelNorm(nn.Module):
22
- def __init__(self):
23
- super().__init__()
24
-
25
- def forward(self, input):
26
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
27
-
28
-
29
- def make_kernel(k):
30
- k = torch.tensor(k, dtype=torch.float32)
31
-
32
- if k.ndim == 1:
33
- k = k[None, :] * k[:, None]
34
-
35
- k /= k.sum()
36
-
37
- return k
38
-
39
-
40
- class Upsample(nn.Module):
41
- def __init__(self, kernel, factor=2):
42
- super().__init__()
43
-
44
- self.factor = factor
45
- kernel = make_kernel(kernel) * (factor ** 2)
46
- self.register_buffer("kernel", kernel)
47
-
48
- p = kernel.shape[0] - factor
49
-
50
- pad0 = (p + 1) // 2 + factor - 1
51
- pad1 = p // 2
52
-
53
- self.pad = (pad0, pad1)
54
-
55
- def forward(self, input):
56
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
57
-
58
- return out
59
-
60
-
61
- class Downsample(nn.Module):
62
- def __init__(self, kernel, factor=2):
63
- super().__init__()
64
-
65
- self.factor = factor
66
- kernel = make_kernel(kernel)
67
- self.register_buffer("kernel", kernel)
68
-
69
- p = kernel.shape[0] - factor
70
-
71
- pad0 = (p + 1) // 2
72
- pad1 = p // 2
73
-
74
- self.pad = (pad0, pad1)
75
-
76
- def forward(self, input):
77
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
78
-
79
- return out
80
-
81
-
82
- class Blur(nn.Module):
83
- def __init__(self, kernel, pad, upsample_factor=1):
84
- super().__init__()
85
-
86
- kernel = make_kernel(kernel)
87
-
88
- if upsample_factor > 1:
89
- kernel = kernel * (upsample_factor ** 2)
90
-
91
- self.register_buffer("kernel", kernel)
92
-
93
- self.pad = pad
94
-
95
- def forward(self, input):
96
- out = upfirdn2d(input, self.kernel, pad=self.pad)
97
-
98
- return out
99
-
100
-
101
- class EqualConv2d(nn.Module):
102
- def __init__(
103
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
104
- ):
105
- super().__init__()
106
-
107
- self.weight = nn.Parameter(
108
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
109
- )
110
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
111
-
112
- self.stride = stride
113
- self.padding = padding
114
-
115
- if bias:
116
- self.bias = nn.Parameter(torch.zeros(out_channel))
117
-
118
- else:
119
- self.bias = None
120
-
121
- def forward(self, input):
122
- out = conv2d_gradfix.conv2d(
123
- input,
124
- self.weight * self.scale,
125
- bias=self.bias,
126
- stride=self.stride,
127
- padding=self.padding,
128
- )
129
-
130
- return out
131
-
132
- def __repr__(self):
133
- return (
134
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
135
- f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})"
136
- )
137
-
138
-
139
- class EqualLinear(nn.Module):
140
- def __init__(
141
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
142
- ):
143
- super().__init__()
144
-
145
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
146
-
147
- if bias:
148
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
149
-
150
- else:
151
- self.bias = None
152
-
153
- self.activation = activation
154
-
155
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
156
- self.lr_mul = lr_mul
157
-
158
- def forward(self, input):
159
- if self.activation:
160
- out = F.linear(input, self.weight * self.scale)
161
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
162
-
163
- else:
164
- out = F.linear(
165
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
166
- )
167
-
168
- return out
169
-
170
- def __repr__(self):
171
- return (
172
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
173
- )
174
-
175
-
176
- class ModulatedConv2d(nn.Module):
177
- def __init__(
178
- self,
179
- in_channel,
180
- out_channel,
181
- kernel_size,
182
- style_dim,
183
- demodulate=True,
184
- upsample=False,
185
- downsample=False,
186
- blur_kernel=[1, 3, 3, 1],
187
- fused=True,
188
- ):
189
- super().__init__()
190
-
191
- self.eps = 1e-8
192
- self.kernel_size = kernel_size
193
- self.in_channel = in_channel
194
- self.out_channel = out_channel
195
- self.upsample = upsample
196
- self.downsample = downsample
197
-
198
- if upsample:
199
- factor = 2
200
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
201
- pad0 = (p + 1) // 2 + factor - 1
202
- pad1 = p // 2 + 1
203
-
204
- self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
205
-
206
- if downsample:
207
- factor = 2
208
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
209
- pad0 = (p + 1) // 2
210
- pad1 = p // 2
211
-
212
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
213
-
214
- fan_in = in_channel * kernel_size ** 2
215
- self.scale = 1 / math.sqrt(fan_in)
216
- self.padding = kernel_size // 2
217
-
218
- self.weight = nn.Parameter(
219
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
220
- )
221
-
222
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
223
-
224
- self.demodulate = demodulate
225
- self.fused = fused
226
-
227
- def __repr__(self):
228
- return (
229
- f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, "
230
- f"upsample={self.upsample}, downsample={self.downsample})"
231
- )
232
-
233
- def forward(self, input, style, is_s_code=False):
234
- batch, in_channel, height, width = input.shape
235
-
236
- if not self.fused:
237
- weight = self.scale * self.weight.squeeze(0)
238
-
239
- if is_s_code:
240
- style = style[self.modulation]
241
- else:
242
- style = self.modulation(style)
243
-
244
- if self.demodulate:
245
- w = weight.unsqueeze(0) * style.view(batch, 1, in_channel, 1, 1)
246
- dcoefs = (w.square().sum((2, 3, 4)) + 1e-8).rsqrt()
247
-
248
- input = input * style.reshape(batch, in_channel, 1, 1)
249
-
250
- if self.upsample:
251
- weight = weight.transpose(0, 1)
252
- out = conv2d_gradfix.conv_transpose2d(
253
- input, weight, padding=0, stride=2
254
- )
255
- out = self.blur(out)
256
-
257
- elif self.downsample:
258
- input = self.blur(input)
259
- out = conv2d_gradfix.conv2d(input, weight, padding=0, stride=2)
260
-
261
- else:
262
- out = conv2d_gradfix.conv2d(input, weight, padding=self.padding)
263
-
264
- if self.demodulate:
265
- out = out * dcoefs.view(batch, -1, 1, 1)
266
-
267
- return out
268
-
269
- if is_s_code:
270
- style = style[self.modulation]
271
- else:
272
- style = self.modulation(style)
273
-
274
- style = style.view(batch, 1, in_channel, 1, 1)
275
- weight = self.scale * self.weight * style
276
-
277
- if self.demodulate:
278
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
279
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
280
-
281
- weight = weight.view(
282
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
283
- )
284
-
285
- if self.upsample:
286
- input = input.view(1, batch * in_channel, height, width)
287
- weight = weight.view(
288
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
289
- )
290
- weight = weight.transpose(1, 2).reshape(
291
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
292
- )
293
- out = conv2d_gradfix.conv_transpose2d(
294
- input, weight, padding=0, stride=2, groups=batch
295
- )
296
- _, _, height, width = out.shape
297
- out = out.view(batch, self.out_channel, height, width)
298
- out = self.blur(out)
299
-
300
- elif self.downsample:
301
- input = self.blur(input)
302
- _, _, height, width = input.shape
303
- input = input.view(1, batch * in_channel, height, width)
304
- out = conv2d_gradfix.conv2d(
305
- input, weight, padding=0, stride=2, groups=batch
306
- )
307
- _, _, height, width = out.shape
308
- out = out.view(batch, self.out_channel, height, width)
309
-
310
- else:
311
- input = input.view(1, batch * in_channel, height, width)
312
- out = conv2d_gradfix.conv2d(
313
- input, weight, padding=self.padding, groups=batch
314
- )
315
- _, _, height, width = out.shape
316
- out = out.view(batch, self.out_channel, height, width)
317
-
318
- return out
319
-
320
-
321
- class NoiseInjection(nn.Module):
322
- def __init__(self):
323
- super().__init__()
324
-
325
- self.weight = nn.Parameter(torch.zeros(1))
326
-
327
- def forward(self, image, noise=None):
328
- if noise is None:
329
- batch, _, height, width = image.shape
330
- noise = image.new_empty(batch, 1, height, width).normal_()
331
-
332
- return image + self.weight * noise
333
-
334
-
335
- class ConstantInput(nn.Module):
336
- def __init__(self, channel, size=4):
337
- super().__init__()
338
-
339
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
340
-
341
- def forward(self, input, is_s_code=False):
342
- if not is_s_code:
343
- batch = input.shape[0]
344
- else:
345
- batch = next(iter(input.values())).shape[0]
346
-
347
- out = self.input.repeat(batch, 1, 1, 1)
348
-
349
- return out
350
-
351
-
352
- class StyledConv(nn.Module):
353
- def __init__(
354
- self,
355
- in_channel,
356
- out_channel,
357
- kernel_size,
358
- style_dim,
359
- upsample=False,
360
- blur_kernel=[1, 3, 3, 1],
361
- demodulate=True,
362
- ):
363
- super().__init__()
364
-
365
- self.conv = ModulatedConv2d(
366
- in_channel,
367
- out_channel,
368
- kernel_size,
369
- style_dim,
370
- upsample=upsample,
371
- blur_kernel=blur_kernel,
372
- demodulate=demodulate,
373
- )
374
-
375
- self.noise = NoiseInjection()
376
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
377
- # self.activate = ScaledLeakyReLU(0.2)
378
- self.activate = FusedLeakyReLU(out_channel)
379
-
380
- def forward(self, input, style, noise=None, is_s_code=False):
381
- out = self.conv(input, style, is_s_code=is_s_code)
382
- out = self.noise(out, noise=noise)
383
- # out = out + self.bias
384
- out = self.activate(out)
385
-
386
- return out
387
-
388
-
389
- class ToRGB(nn.Module):
390
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
391
- super().__init__()
392
-
393
- if upsample:
394
- self.upsample = Upsample(blur_kernel)
395
-
396
- self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
397
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
398
-
399
- def forward(self, input, style, skip=None, is_s_code=False):
400
- out = self.conv(input, style, is_s_code=is_s_code)
401
- out = out + self.bias
402
-
403
- if skip is not None:
404
- skip = self.upsample(skip)
405
-
406
- out = out + skip
407
-
408
- return out
409
-
410
-
411
- class Generator(nn.Module):
412
- def __init__(
413
- self,
414
- size,
415
- style_dim,
416
- n_mlp,
417
- channel_multiplier=2,
418
- blur_kernel=[1, 3, 3, 1],
419
- lr_mlp=0.01,
420
- ):
421
- super().__init__()
422
-
423
- self.size = size
424
-
425
- self.style_dim = style_dim
426
-
427
- layers = [PixelNorm()]
428
-
429
- for i in range(n_mlp):
430
- layers.append(
431
- EqualLinear(
432
- style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu"
433
- )
434
- )
435
-
436
- self.style = nn.Sequential(*layers)
437
-
438
- self.channels = {
439
- 4: 512,
440
- 8: 512,
441
- 16: 512,
442
- 32: 512,
443
- 64: 256 * channel_multiplier,
444
- 128: 128 * channel_multiplier,
445
- 256: 64 * channel_multiplier,
446
- 512: 32 * channel_multiplier,
447
- 1024: 16 * channel_multiplier,
448
- }
449
-
450
- self.input = ConstantInput(self.channels[4])
451
- self.conv1 = StyledConv(
452
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
453
- )
454
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
455
-
456
- self.log_size = int(math.log(size, 2))
457
- self.num_layers = (self.log_size - 2) * 2 + 1
458
-
459
- self.convs = nn.ModuleList()
460
- self.upsamples = nn.ModuleList()
461
- self.to_rgbs = nn.ModuleList()
462
- self.noises = nn.Module()
463
-
464
- in_channel = self.channels[4]
465
-
466
- for layer_idx in range(self.num_layers):
467
- res = (layer_idx + 5) // 2
468
- shape = [1, 1, 2 ** res, 2 ** res]
469
- self.noises.register_buffer(f"noise_{layer_idx}", torch.randn(*shape))
470
-
471
- for i in range(3, self.log_size + 1):
472
- out_channel = self.channels[2 ** i]
473
-
474
- self.convs.append(
475
- StyledConv(
476
- in_channel,
477
- out_channel,
478
- 3,
479
- style_dim,
480
- upsample=True,
481
- blur_kernel=blur_kernel,
482
- )
483
- )
484
-
485
- self.convs.append(
486
- StyledConv(
487
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
488
- )
489
- )
490
-
491
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
492
-
493
- in_channel = out_channel
494
-
495
- self.n_latent = self.log_size * 2 - 2
496
-
497
-
498
- self.modulation_layers = [self.conv1.conv.modulation, self.to_rgb1.conv.modulation] + \
499
- [layer.conv.modulation for layer in self.convs] + \
500
- [layer.conv.modulation for layer in self.to_rgbs]
501
-
502
- def make_noise(self):
503
- device = self.input.input.device
504
-
505
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
506
-
507
- for i in range(3, self.log_size + 1):
508
- for _ in range(2):
509
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
510
-
511
- return noises
512
-
513
- def mean_latent(self, n_latent):
514
- latent_in = torch.randn(
515
- n_latent, self.style_dim, device=self.input.input.device
516
- )
517
- latent = self.style(latent_in).mean(0, keepdim=True)
518
-
519
- return latent
520
-
521
- def get_latent(self, input):
522
- return self.style(input)
523
-
524
- def get_s_code(self, styles, input_is_latent):
525
-
526
- if not input_is_latent:
527
- styles = [self.style(s) for s in styles]
528
-
529
- s_codes = [{# const block
530
- self.modulation_layers[0]: self.modulation_layers[0](style[:, 0]), #s0
531
- self.modulation_layers[1]: self.modulation_layers[1](style[:, 1]), #s1
532
- # conv layers
533
- self.modulation_layers[2]: self.modulation_layers[2](style[:, 1]), #s2
534
- self.modulation_layers[3]: self.modulation_layers[3](style[:, 2]), #s3
535
- self.modulation_layers[4]: self.modulation_layers[4](style[:, 3]), #s5
536
- self.modulation_layers[5]: self.modulation_layers[5](style[:, 4]), #s6
537
- self.modulation_layers[6]: self.modulation_layers[6](style[:, 5]), #s8
538
- self.modulation_layers[7]: self.modulation_layers[7](style[:, 6]), #s9
539
- self.modulation_layers[8]: self.modulation_layers[8](style[:, 7]), #s11
540
- self.modulation_layers[9]: self.modulation_layers[9](style[:, 8]), #s12
541
- self.modulation_layers[10]: self.modulation_layers[10](style[:, 9]), #s14
542
- self.modulation_layers[11]: self.modulation_layers[11](style[:, 10]), #s15
543
- self.modulation_layers[12]: self.modulation_layers[12](style[:, 11]), #s17
544
- self.modulation_layers[13]: self.modulation_layers[13](style[:, 12]), #s18
545
- self.modulation_layers[14]: self.modulation_layers[14](style[:, 13]), #s20
546
- self.modulation_layers[15]: self.modulation_layers[15](style[:, 14]), #s21
547
- self.modulation_layers[16]: self.modulation_layers[16](style[:, 15]), #s23
548
- self.modulation_layers[17]: self.modulation_layers[17](style[:, 16]), #s24
549
- # toRGB layers
550
- self.modulation_layers[18]: self.modulation_layers[18](style[:, 3]), #s4
551
- self.modulation_layers[19]: self.modulation_layers[19](style[:, 5]), #s7
552
- self.modulation_layers[20]: self.modulation_layers[20](style[:, 7]), #s10
553
- self.modulation_layers[21]: self.modulation_layers[21](style[:, 9]), #s13
554
- self.modulation_layers[22]: self.modulation_layers[22](style[:, 11]), #s16
555
- self.modulation_layers[23]: self.modulation_layers[23](style[:, 13]), #s19
556
- self.modulation_layers[24]: self.modulation_layers[24](style[:, 15]), #s22
557
- self.modulation_layers[25]: self.modulation_layers[25](style[:, 17]), #s25
558
- } for style in styles]
559
-
560
- return s_codes
561
-
562
-
563
- def forward(
564
- self,
565
- styles,
566
- return_latents=False,
567
- inject_index=None,
568
- truncation=1,
569
- truncation_latent=None,
570
- input_is_latent=False,
571
- input_is_s_code=False,
572
- noise=None,
573
- randomize_noise=True,
574
- ):
575
- if not input_is_s_code:
576
- return self.forward_with_w(styles, return_latents, inject_index, truncation, truncation_latent, input_is_latent, noise, randomize_noise)
577
-
578
- return self.forward_with_s(styles, return_latents, noise, randomize_noise)
579
-
580
- def forward_with_w(
581
- self,
582
- styles,
583
- return_latents=False,
584
- inject_index=None,
585
- truncation=1,
586
- truncation_latent=None,
587
- input_is_latent=False,
588
- noise=None,
589
- randomize_noise=True,
590
- ):
591
- if not input_is_latent:
592
- styles = [self.style(s) for s in styles]
593
-
594
- if noise is None:
595
- if randomize_noise:
596
- noise = [None] * self.num_layers
597
- else:
598
- noise = [
599
- getattr(self.noises, f"noise_{i}") for i in range(self.num_layers)
600
- ]
601
-
602
- if truncation < 1:
603
- style_t = []
604
-
605
- for style in styles:
606
- style_t.append(
607
- truncation_latent + truncation * (style - truncation_latent)
608
- )
609
-
610
- styles = style_t
611
-
612
- if len(styles) < 2:
613
- inject_index = self.n_latent
614
-
615
- if styles[0].ndim < 3:
616
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
617
-
618
- else:
619
- latent = styles[0]
620
-
621
- else:
622
- if inject_index is None:
623
- inject_index = random.randint(1, self.n_latent - 1)
624
-
625
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
626
- latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
627
-
628
- latent = torch.cat([latent, latent2], 1)
629
-
630
- out = self.input(latent)
631
- out = self.conv1(out, latent[:, 0], noise=noise[0])
632
-
633
- skip = self.to_rgb1(out, latent[:, 1])
634
-
635
- i = 1
636
- for conv1, conv2, noise1, noise2, to_rgb in zip(
637
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
638
- ):
639
- out = conv1(out, latent[:, i], noise=noise1)
640
- out = conv2(out, latent[:, i + 1], noise=noise2)
641
- skip = to_rgb(out, latent[:, i + 2], skip)
642
-
643
- i += 2
644
-
645
- image = skip
646
-
647
- if return_latents:
648
- return image, latent
649
-
650
- else:
651
- return image, None
652
-
653
- def forward_with_s(
654
- self,
655
- styles,
656
- return_latents=False,
657
- noise=None,
658
- randomize_noise=True,
659
- ):
660
-
661
- if noise is None:
662
- if randomize_noise:
663
- noise = [None] * self.num_layers
664
- else:
665
- noise = [
666
- getattr(self.noises, f"noise_{i}") for i in range(self.num_layers)
667
- ]
668
-
669
- out = self.input(styles, is_s_code=True)
670
- out = self.conv1(out, styles, is_s_code=True, noise=noise[0])
671
-
672
- skip = self.to_rgb1(out, styles, is_s_code=True)
673
-
674
- i = 1
675
- for conv1, conv2, noise1, noise2, to_rgb in zip(
676
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
677
- ):
678
- out = conv1(out, styles, is_s_code=True, noise=noise1)
679
- out = conv2(out, styles, is_s_code=True, noise=noise2)
680
- skip = to_rgb(out, styles, skip, is_s_code=True)
681
-
682
- i += 2
683
-
684
- image = skip
685
-
686
- if return_latents:
687
- return image, styles
688
-
689
- else:
690
- return image, None
691
-
692
- class ConvLayer(nn.Sequential):
693
- def __init__(
694
- self,
695
- in_channel,
696
- out_channel,
697
- kernel_size,
698
- downsample=False,
699
- blur_kernel=[1, 3, 3, 1],
700
- bias=True,
701
- activate=True,
702
- ):
703
- layers = []
704
-
705
- if downsample:
706
- factor = 2
707
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
708
- pad0 = (p + 1) // 2
709
- pad1 = p // 2
710
-
711
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
712
-
713
- stride = 2
714
- self.padding = 0
715
-
716
- else:
717
- stride = 1
718
- self.padding = kernel_size // 2
719
-
720
- layers.append(
721
- EqualConv2d(
722
- in_channel,
723
- out_channel,
724
- kernel_size,
725
- padding=self.padding,
726
- stride=stride,
727
- bias=bias and not activate,
728
- )
729
- )
730
-
731
- if activate:
732
- layers.append(FusedLeakyReLU(out_channel, bias=bias))
733
-
734
- super().__init__(*layers)
735
-
736
-
737
- class ResBlock(nn.Module):
738
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
739
- super().__init__()
740
-
741
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
742
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
743
-
744
- self.skip = ConvLayer(
745
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
746
- )
747
-
748
- def forward(self, input):
749
- out = self.conv1(input)
750
- out = self.conv2(out)
751
-
752
- skip = self.skip(input)
753
- out = (out + skip) / math.sqrt(2)
754
-
755
- return out
756
-
757
-
758
- class Discriminator(nn.Module):
759
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
760
- super().__init__()
761
-
762
- channels = {
763
- 4: 512,
764
- 8: 512,
765
- 16: 512,
766
- 32: 512,
767
- 64: 256 * channel_multiplier,
768
- 128: 128 * channel_multiplier,
769
- 256: 64 * channel_multiplier,
770
- 512: 32 * channel_multiplier,
771
- 1024: 16 * channel_multiplier,
772
- }
773
-
774
- convs = [ConvLayer(3, channels[size], 1)]
775
-
776
- log_size = int(math.log(size, 2))
777
-
778
- in_channel = channels[size]
779
-
780
- for i in range(log_size, 2, -1):
781
- out_channel = channels[2 ** (i - 1)]
782
-
783
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
784
-
785
- in_channel = out_channel
786
-
787
- self.convs = nn.Sequential(*convs)
788
-
789
- self.stddev_group = 4
790
- self.stddev_feat = 1
791
-
792
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
793
- self.final_linear = nn.Sequential(
794
- EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"),
795
- EqualLinear(channels[4], 1),
796
- )
797
-
798
- def forward(self, input):
799
- out = self.convs(input)
800
-
801
- batch, channel, height, width = out.shape
802
- group = min(batch, self.stddev_group)
803
- stddev = out.view(
804
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
805
- )
806
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
807
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
808
- stddev = stddev.repeat(group, 1, height, width)
809
- out = torch.cat([out, stddev], 1)
810
-
811
- out = self.final_conv(out)
812
-
813
- out = out.view(batch, -1)
814
- out = self.final_linear(out)
815
-
816
- return out
817
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-avatar-frontend/Dockerfile DELETED
@@ -1,20 +0,0 @@
1
- # Sử dụng image Node.js phiên bản mới nhất
2
- FROM node:latest
3
-
4
- # Thiết lập thư mục làm việc trong container
5
- WORKDIR /app
6
-
7
- # Sao chép package.json và yarn.lock vào container
8
- COPY package.json yarn.lock ./
9
-
10
- # Cài đặt các gói phụ thuộc bằng Yarn
11
- RUN yarn install
12
-
13
- # Sao chép toàn bộ mã nguồn và các tệp khác vào container
14
- COPY . .
15
-
16
- # Expose port (Bạn cần xác định cổng mà ứng dụng của bạn đang chạy, ví dụ: 3000)
17
- EXPOSE 3000
18
-
19
- # Chạy ứng dụng khi khởi động container
20
- CMD ["yarn", "start"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DonDoesStuff/sd_xl_base_0.9/README.md DELETED
@@ -1,19 +0,0 @@
1
- ---
2
- title: DreamlikeArt-PhotoReal 2.0
3
- emoji: 🧘🏻‍♀️
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.16.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: phenomenon1981/DreamlikeArt-PhotoReal-2.0
11
- ---
12
- ---
13
- title: DreamlikeArt-PhotoReal 2.0
14
- emoji: 🧘🏻‍♀️
15
- colorFrom: blue
16
- colorTo: yellow
17
- sdk: gradio
18
- sdk_version: 3.16.1
19
- app_file: app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GetCode.py DELETED
@@ -1,232 +0,0 @@
1
-
2
-
3
-
4
- import os
5
- import pickle
6
- import numpy as np
7
- from dnnlib import tflib
8
- import tensorflow as tf
9
-
10
- import argparse
11
-
12
- def LoadModel(dataset_name):
13
- # Initialize TensorFlow.
14
- tflib.init_tf()
15
- model_path='./model/'
16
- model_name=dataset_name+'.pkl'
17
-
18
- tmp=os.path.join(model_path,model_name)
19
- with open(tmp, 'rb') as f:
20
- _, _, Gs = pickle.load(f)
21
- return Gs
22
-
23
- def lerp(a,b,t):
24
- return a + (b - a) * t
25
-
26
- #stylegan-ada
27
- def SelectName(layer_name,suffix):
28
- if suffix==None:
29
- tmp1='add:0' in layer_name
30
- tmp2='shape=(?,' in layer_name
31
- tmp4='G_synthesis_1' in layer_name
32
- tmp= tmp1 and tmp2 and tmp4
33
- else:
34
- tmp1=('/Conv0_up'+suffix) in layer_name
35
- tmp2=('/Conv1'+suffix) in layer_name
36
- tmp3=('4x4/Conv'+suffix) in layer_name
37
- tmp4='G_synthesis_1' in layer_name
38
- tmp5=('/ToRGB'+suffix) in layer_name
39
- tmp= (tmp1 or tmp2 or tmp3 or tmp5) and tmp4
40
- return tmp
41
-
42
-
43
- def GetSNames(suffix):
44
- #get style tensor name
45
- with tf.Session() as sess:
46
- op = sess.graph.get_operations()
47
- layers=[m.values() for m in op]
48
-
49
-
50
- select_layers=[]
51
- for layer in layers:
52
- layer_name=str(layer)
53
- if SelectName(layer_name,suffix):
54
- select_layers.append(layer[0])
55
- return select_layers
56
-
57
- def SelectName2(layer_name):
58
- tmp1='mod_bias' in layer_name
59
- tmp2='mod_weight' in layer_name
60
- tmp3='ToRGB' in layer_name
61
-
62
- tmp= (tmp1 or tmp2) and (not tmp3)
63
- return tmp
64
-
65
- def GetKName(Gs):
66
-
67
- layers=[var for name, var in Gs.components.synthesis.vars.items()]
68
-
69
- select_layers=[]
70
- for layer in layers:
71
- layer_name=str(layer)
72
- if SelectName2(layer_name):
73
- select_layers.append(layer)
74
- return select_layers
75
-
76
- def GetCode(Gs,random_state,num_img,num_once,dataset_name):
77
- rnd = np.random.RandomState(random_state) #5
78
-
79
- truncation_psi=0.7
80
- truncation_cutoff=8
81
-
82
- dlatent_avg=Gs.get_var('dlatent_avg')
83
-
84
- dlatents=np.zeros((num_img,512),dtype='float32')
85
- for i in range(int(num_img/num_once)):
86
- src_latents = rnd.randn(num_once, Gs.input_shape[1])
87
- src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component]
88
-
89
- # Apply truncation trick.
90
- if truncation_psi is not None and truncation_cutoff is not None:
91
- layer_idx = np.arange(src_dlatents.shape[1])[np.newaxis, :, np.newaxis]
92
- ones = np.ones(layer_idx.shape, dtype=np.float32)
93
- coefs = np.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones)
94
- src_dlatents_np=lerp(dlatent_avg, src_dlatents, coefs)
95
- src_dlatents=src_dlatents_np[:,0,:].astype('float32')
96
- dlatents[(i*num_once):((i+1)*num_once),:]=src_dlatents
97
- print('get all z and w')
98
-
99
- tmp='./npy/'+dataset_name+'/W'
100
- np.save(tmp,dlatents)
101
-
102
-
103
- def GetImg(Gs,num_img,num_once,dataset_name,save_name='images'):
104
- print('Generate Image')
105
- tmp='./npy/'+dataset_name+'/W.npy'
106
- dlatents=np.load(tmp)
107
- fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
108
-
109
- all_images=[]
110
- for i in range(int(num_img/num_once)):
111
- print(i)
112
- images=[]
113
- for k in range(num_once):
114
- tmp=dlatents[i*num_once+k]
115
- tmp=tmp[None,None,:]
116
- tmp=np.tile(tmp,(1,Gs.components.synthesis.input_shape[1],1))
117
- image2= Gs.components.synthesis.run(tmp, randomize_noise=False, output_transform=fmt)
118
- images.append(image2)
119
-
120
- images=np.concatenate(images)
121
-
122
- all_images.append(images)
123
-
124
- all_images=np.concatenate(all_images)
125
-
126
- tmp='./npy/'+dataset_name+'/'+save_name
127
- np.save(tmp,all_images)
128
-
129
- def GetS(dataset_name,num_img):
130
- print('Generate S')
131
- tmp='./npy/'+dataset_name+'/W.npy'
132
- dlatents=np.load(tmp)[:num_img]
133
-
134
- with tf.Session() as sess:
135
- init = tf.global_variables_initializer()
136
- sess.run(init)
137
-
138
- Gs=LoadModel(dataset_name)
139
- Gs.print_layers() #for ada
140
- select_layers1=GetSNames(suffix=None) #None,'/mul_1:0','/mod_weight/read:0','/MatMul:0'
141
- dlatents=dlatents[:,None,:]
142
- dlatents=np.tile(dlatents,(1,Gs.components.synthesis.input_shape[1],1))
143
-
144
- all_s = sess.run(
145
- select_layers1,
146
- feed_dict={'G_synthesis_1/dlatents_in:0': dlatents})
147
-
148
- layer_names=[layer.name for layer in select_layers1]
149
- save_tmp=[layer_names,all_s]
150
- return save_tmp
151
-
152
-
153
-
154
-
155
- def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False):
156
- """Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
157
- Can be used as an output transformation for Network.run().
158
- """
159
- if nchw_to_nhwc:
160
- images = np.transpose(images, [0, 2, 3, 1])
161
-
162
- scale = 255 / (drange[1] - drange[0])
163
- images = images * scale + (0.5 - drange[0] * scale)
164
-
165
- np.clip(images, 0, 255, out=images)
166
- images=images.astype('uint8')
167
- return images
168
-
169
-
170
- def GetCodeMS(dlatents):
171
- m=[]
172
- std=[]
173
- for i in range(len(dlatents)):
174
- tmp= dlatents[i]
175
- tmp_mean=tmp.mean(axis=0)
176
- tmp_std=tmp.std(axis=0)
177
- m.append(tmp_mean)
178
- std.append(tmp_std)
179
- return m,std
180
-
181
-
182
-
183
- #%%
184
- if __name__ == "__main__":
185
-
186
-
187
- parser = argparse.ArgumentParser(description='Process some integers.')
188
-
189
- parser.add_argument('--dataset_name',type=str,default='ffhq',
190
- help='name of dataset, for example, ffhq')
191
- parser.add_argument('--code_type',choices=['w','s','s_mean_std'],default='w')
192
-
193
- args = parser.parse_args()
194
- random_state=5
195
- num_img=100_000
196
- num_once=1_000
197
- dataset_name=args.dataset_name
198
-
199
- if not os.path.isfile('./model/'+dataset_name+'.pkl'):
200
- url='https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/'
201
- name='stylegan2-'+dataset_name+'-config-f.pkl'
202
- os.system('wget ' +url+name + ' -P ./model/')
203
- os.system('mv ./model/'+name+' ./model/'+dataset_name+'.pkl')
204
-
205
- if not os.path.isdir('./npy/'+dataset_name):
206
- os.system('mkdir ./npy/'+dataset_name)
207
-
208
- if args.code_type=='w':
209
- Gs=LoadModel(dataset_name=dataset_name)
210
- GetCode(Gs,random_state,num_img,num_once,dataset_name)
211
- # GetImg(Gs,num_img=num_img,num_once=num_once,dataset_name=dataset_name,save_name='images_100K') #no need
212
- elif args.code_type=='s':
213
- save_name='S'
214
- save_tmp=GetS(dataset_name,num_img=2_000)
215
- tmp='./npy/'+dataset_name+'/'+save_name
216
- with open(tmp, "wb") as fp:
217
- pickle.dump(save_tmp, fp)
218
-
219
- elif args.code_type=='s_mean_std':
220
- save_tmp=GetS(dataset_name,num_img=num_img)
221
- dlatents=save_tmp[1]
222
- m,std=GetCodeMS(dlatents)
223
- save_tmp=[m,std]
224
- save_name='S_mean_std'
225
- tmp='./npy/'+dataset_name+'/'+save_name
226
- with open(tmp, "wb") as fp:
227
- pickle.dump(save_tmp, fp)
228
-
229
-
230
-
231
-
232
-