Commit
·
c907fcf
1
Parent(s):
22076f7
Update parquet files (step 28 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/testing/writesonic_test.py +0 -35
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discover Colombia with a Free and Open-Source Map for Your Garmin Nuvi with an Unlocked Crack.md +0 -93
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Treasure Planet Training Academy PC 2007 Learn to Fly and Fight in the Galaxy.md +0 -161
- spaces/1gistliPinn/ChatGPT4/Examples/AstrovisionlifesignsoftwarewithFREE Crack.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download All Episodes of Kota Factory Season 2 for Free Heres How.md +0 -97
- spaces/1phancelerku/anime-remove-background/ARK Survival Evolved APK - Explore All Versions of the Game.md +0 -126
- spaces/1phancelerku/anime-remove-background/Bed Wars MOD APK 2023 Enjoy the Ultimate Blockman GO Experience.md +0 -103
- spaces/1phancelerku/anime-remove-background/Download Rope Hero Mafia City Wars and Use Your Superpowers to Fight Crime.md +0 -207
- spaces/1phancelerku/anime-remove-background/FNaF x Brawl Stars Download the APK and Join the Fun.md +0 -125
- spaces/232labs/VToonify/vtoonify/model/bisenet/resnet.py +0 -109
- spaces/44ov41za8i/FreeVC/speaker_encoder/preprocess.py +0 -285
- spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py +0 -118
- spaces/801artistry/RVC801/infer/modules/train/extract_feature_print.py +0 -137
- spaces/A00001/bingothoo/src/lib/bots/bing/index.ts +0 -426
- spaces/AB-TW/team-ai/promopts.py +0 -57
- spaces/AIFILMS/StyleGANEX/datasets/ffhq_degradation_dataset.py +0 -235
- spaces/AIZero2HeroBootcamp/FastSpeech2LinerGradioApp/README.md +0 -13
- spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/__init__.py +0 -20
- spaces/Abhaykoul/Palm-2/README.md +0 -12
- spaces/Adapter/T2I-Adapter/dist_util.py +0 -91
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/Factory.d.ts +0 -5
- spaces/AlekseyKorshuk/thin-plate-spline-motion-model/train_avd.py +0 -91
- spaces/AlexWang/lama/saicinpainting/training/losses/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py +0 -7
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/parrots_jit.py +0 -41
- spaces/Apex-X/ROOPOK/roop/face_analyser.py +0 -54
- spaces/ArkanDash/rvc-models-new/lib/infer_pack/attentions.py +0 -417
- spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/unet_blocks.py +0 -588
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/models.py +0 -39
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/before_sleep.py +0 -71
- spaces/Autopixel/blurry-faces/README.md +0 -47
- spaces/Awesimo/jojogan/e4e/editings/sefa.py +0 -46
- spaces/BAAI/AltDiffusion/style.css +0 -81
- spaces/Bart92/RVC_HF/julius/resample.py +0 -216
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/__main__.py +0 -274
- spaces/CVPR/LIVE/model_download/yolov5_model_p5_all.sh +0 -8
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/merge.h +0 -1018
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/adjacent_difference.h +0 -74
- spaces/CVPR/regionclip-demo/detectron2/layers/aspp.py +0 -144
- spaces/CXD200/QSign/Dockerfile +0 -15
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/MpoImagePlugin.py +0 -197
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/pytest_plugin.py +0 -142
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dotenv/ipython.py +0 -39
- spaces/Dagfinn1962/stablediffusion-models/images.py +0 -22
- spaces/DaleChen/AutoGPT/autogpt/commands/twitter.py +0 -26
- spaces/Detomo/ai-comic-generation/src/app/layouts/new_layouts.tsx +0 -273
- spaces/Djacon/emotion_detection/static/analytics.html +0 -301
- spaces/DragGan/DragGan-Inversion/PTI/models/e4e/latent_codes_pool.py +0 -55
- spaces/Eddycrack864/Applio-Inference/julius/fftconv.py +0 -183
spaces/101-5/gpt4free/g4f/.v1/testing/writesonic_test.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# import writesonic
|
2 |
-
import writesonic
|
3 |
-
|
4 |
-
# create account (3-4s)
|
5 |
-
account = writesonic.Account.create(logging=True)
|
6 |
-
|
7 |
-
# with loging:
|
8 |
-
# 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
|
9 |
-
# 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
|
10 |
-
# 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
|
11 |
-
# 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
|
12 |
-
|
13 |
-
# simple completion
|
14 |
-
response = writesonic.Completion.create(api_key=account.key, prompt='hello world')
|
15 |
-
|
16 |
-
print(response.completion.choices[0].text) # Hello! How may I assist you today?
|
17 |
-
|
18 |
-
# conversation
|
19 |
-
|
20 |
-
response = writesonic.Completion.create(
|
21 |
-
api_key=account.key,
|
22 |
-
prompt='what is my name ?',
|
23 |
-
enable_memory=True,
|
24 |
-
history_data=[{'is_sent': True, 'message': 'my name is Tekky'}, {'is_sent': False, 'message': 'hello Tekky'}],
|
25 |
-
)
|
26 |
-
|
27 |
-
print(response.completion.choices[0].text) # Your name is Tekky.
|
28 |
-
|
29 |
-
# enable internet
|
30 |
-
|
31 |
-
response = writesonic.Completion.create(
|
32 |
-
api_key=account.key, prompt='who won the quatar world cup ?', enable_google_results=True
|
33 |
-
)
|
34 |
-
|
35 |
-
print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discover Colombia with a Free and Open-Source Map for Your Garmin Nuvi with an Unlocked Crack.md
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Mapa De Colombia Para Garmin Nuvi, Unlocked Crack</h1>
|
3 |
-
<p>Do you own a Garmin Nuvi GPS device and you want to explore the beautiful country of Colombia? If so, you might be interested in getting a map of Colombia for your device. But where can you find one? And how can you install it?</p>
|
4 |
-
<p>In this article, I will show you how to download and install a map of Colombia for your Garmin Nuvi device with an unlocked crack. This means that you can use the map without paying any fees or registering any accounts. You will also learn about the benefits and drawbacks of using this map, and some frequently asked questions that you might have.</p>
|
5 |
-
<h2>Mapa De Colombia Para Garmin Nuvi, Unlocked Crack</h2><br /><p><b><b>DOWNLOAD</b> ✶✶✶ <a href="https://byltly.com/2uKxv0">https://byltly.com/2uKxv0</a></b></p><br /><br />
|
6 |
-
<h2>How to download Mapa De Colombia Para Garmin Nuvi, Unlocked Crack</h2>
|
7 |
-
<p>The first step is to find a reliable source of the map file. There are many websites that offer free maps for Garmin devices, but not all of them are trustworthy or updated. Some of them may contain viruses or malware that can harm your computer or your device. Some of them may also have incomplete or inaccurate data that can affect your navigation.</p>
|
8 |
-
<p>One of the sources that I recommend is <strong></strong>, which is a YouTube video that shows you how to install a map of Colombia on your GPS device. The video also provides a link to download the map file from a Google Drive folder. The map file is called <code>gmapsupp.img</code> and it contains the map of Colombia updated to September 2012, with all the cities and municipalities of the country.</p>
|
9 |
-
<p>Mapa Colombia Garmin Nuvi Gratis Descargar<br />
|
10 |
-
Como Instalar Mapa De Colombia En Garmin Nuvi<br />
|
11 |
-
Mapa De Colombia Actualizado Para Garmin Nuvi<br />
|
12 |
-
Mapa De Colombia Para GPS Garmin Nuvi<br />
|
13 |
-
Mapa De Colombia Para Navegador Garmin Nuvi<br />
|
14 |
-
Mapa De Colombia Para Garmin Nuvi 205w<br />
|
15 |
-
Mapa De Colombia Para Garmin Nuvi 1300<br />
|
16 |
-
Mapa De Colombia Para Garmin Nuvi 40<br />
|
17 |
-
Mapa De Colombia Para Garmin Nuvi 50<br />
|
18 |
-
Mapa De Colombia Para Garmin Nuvi 255w<br />
|
19 |
-
Mapa De Colombia Para Garmin Nuvi 265w<br />
|
20 |
-
Mapa De Colombia Para Garmin Nuvi 1490t<br />
|
21 |
-
Mapa De Colombia Para Garmin Nuvi 2595lmt<br />
|
22 |
-
Mapa De Colombia Para Garmin Nuvi 3790t<br />
|
23 |
-
Mapa De Colombia Para Garmin Nuvi 2460lmt<br />
|
24 |
-
Mapa De Colombia Para Garmin Nuvi 3597lmthd<br />
|
25 |
-
Mapa De Colombia Para Garmin Nuvi 55lm<br />
|
26 |
-
Mapa De Colombia Para Garmin Nuvi 65lm<br />
|
27 |
-
Mapa De Colombia Para Garmin Nuvi 67lm<br />
|
28 |
-
Mapa De Colombia Para Garmin Nuvi 68lm<br />
|
29 |
-
Mapa De Colombia Para Garmin Nuvi 52lm<br />
|
30 |
-
Mapa De Colombia Para Garmin Nuvi 58lm<br />
|
31 |
-
Mapa De Colombia Para Garmin Nuvi 2539lmt<br />
|
32 |
-
Mapa De Colombia Para Garmin Nuvi 2559lmt<br />
|
33 |
-
Mapa De Colombia Para Garmin Nuvi 2589lmt<br />
|
34 |
-
Mapa De Colombia Para Garmin Nuvi 2599lmthd<br />
|
35 |
-
Mapa De Colombia Para Garmin Nuvi 2639lmt<br />
|
36 |
-
Mapa De Colombia Para Garmin Nuvi 2689lmt<br />
|
37 |
-
Mapa De Colombia Para Garmin Nuvi 2789lmt<br />
|
38 |
-
Mapa De Colombia Para Garmin Nuvi 2797lmt<br />
|
39 |
-
Mapa De Colombia Para Garmin Nuvi 55lmt<br />
|
40 |
-
Mapa De Colombia Para Garmin Nuvi 65lmt<br />
|
41 |
-
Mapa De Colombia Para Garmin Nuvi 66lmt<br />
|
42 |
-
Mapa De Colombia Para Garmin Nuvi 2457lmt<br />
|
43 |
-
Mapa De Colombia Para Garmin Nuvi 2497lmt<br />
|
44 |
-
Mapa De Colombia Para Garmin Nuvi 2557lmt<br />
|
45 |
-
Mapa De Colombia Para Garmin Nuvi 2577lt<br />
|
46 |
-
Mapa De Colombia Para Garmin Nuvi 2597lmt<br />
|
47 |
-
Descargar Crack Unlocked Del Mapa De Colombia Para Garmin Nuvi <br />
|
48 |
-
Crack Unlocked Del Mapa Actualizado De Colombia Para GPS Garmin <br />
|
49 |
-
Crack Unlocked Del Ultimo Mapa Disponible De Colombia Para Navegador <br />
|
50 |
-
Crack Unlocked Del Mejor Mapa Detallado De Colombia Para Dispositivo <br />
|
51 |
-
Crack Unlocked Del Nuevo Mapa Oficial De Colombia Para Equipo <br />
|
52 |
-
Crack Unlocked Del Original Y Completo Mapa Topografico De <br />
|
53 |
-
Crack Unlocked Del Fiable Y Seguro Mapa Ruteable De Colombia </p>
|
54 |
-
<p>The second step is to download the map file to your computer. To do this, you need to click on the link provided in the video description or in the comments section. This will take you to a Google Drive folder where you can see the <code>gmapsupp.img</code> file. You need to right-click on the file and select <code>Download</code>. The file size is about 83 MB, so it may take some time depending on your internet speed.</p>
|
55 |
-
<p>The third step is to extract the map file from the zip archive. When you download the file, it will be compressed in a zip format. You need to unzip it using a software like WinRAR or 7-Zip. You can do this by right-clicking on the file and selecting <code>Extract here</code> or <code>Extract to gmapsupp/</code>. This will create a folder called <code>gmapsupp</code> where you can find the <code>gmapsupp.img</code> file.</p>
|
56 |
-
<h2>How to install Mapa De Colombia Para Garmin Nuvi, Unlocked Crack</h2>
|
57 |
-
<p>The fourth step is to connect your Garmin Nuvi device to your computer. To do this, you need a USB cable that is compatible with your device. You need to plug one end of the cable into your device and the other end into your computer's USB port. Your computer should recognize your device as a removable drive.</p>
|
58 |
-
<p>The fifth step is to copy the map file to your Garmin Nuvi device. To do this, you need to open the folder where you extracted the <code>gmapsupp.img</code> file. You need to select the file and copy it by pressing <code>Ctrl+C</code> on your keyboard. Then, you need to open the folder where your device is located. You can do this by clicking on <code>This PC</code> or <code>My Computer</code> and then clicking on the drive letter that corresponds to your device. It should be something like <code>(E:)</code> or <code>(F:)</code>. You need to paste the file by pressing <code>Ctrl+V</code> on your keyboard.</p>
|
59 |
-
<p>The sixth step is to disconnect your Garmin Nuvi device from your computer. To do this, you need to click on the <code>Safely Remove Hardware and Eject Media</code> icon on your taskbar. It should be something like a USB icon with a green check mark. You need to click on it and select <code>Eject (E:) GARMIN</code> or <code>Eject (F:) GARMIN</code>. This will safely remove your device from your computer.</p>
|
60 |
-
<h2>How to use Mapa De Colombia Para Garmin Nuvi, Unlocked Crack</h2>
|
61 |
-
<p>The seventh step is to turn on your Garmin Nuvi device and select the map of Colombia. To do this, you need to press and hold the power button on your device until it turns on. Then, you need to tap on <code>Tools</code>, then on <code>Settings</code>, then on <code>Map & Vehicle</code>, then on <code>myMaps</code>. You should see a list of maps that are available on your device. You need to check the box next to <code>Pamacol v26-2018-10-01 (2018)</code>, which is the name of the map of Colombia that you downloaded and installed. You also need to uncheck any other maps that are not relevant for your destination.</p>
|
62 |
-
<p>The eighth step is to enjoy exploring Colombia with your Garmin Nuvi device. To do this, you need to tap on <code>Where To?</code>, then on <code>Browse Map</code>. You should see a map of Colombia on your screen. You can zoom in and out by pinching or spreading two fingers on the screen. You can pan around by dragging one finger on the screen. You can search for a specific location by tapping on <code>Magnifying Glass</code>, then typing in a name or address.</p>
|
63 |
-
<h2>Benefits of using Mapa De Colombia Para Garmin Nuvi, Unlocked Crack</h2>
|
64 |
-
<ul>
|
65 |
-
<li><strong>You can navigate through all the cities and municipalities of Colombia with ease.</strong> The map that you downloaded and installed has been designed to be compatible with your Garmin Nuvi device, which means that it can provide you with accurate and reliable directions, voice guidance, speed limits, traffic alerts, and other features that can enhance your driving experience. The map also has a high level of detail, which means that you can see all the streets, buildings, parks, rivers, and other landmarks that can help you orient yourself and find your destination.</li>
|
66 |
-
<li><strong>You can avoid getting lost or paying for expensive roaming charges.</strong> The map that you downloaded and installed is stored on your Garmin Nuvi device's internal memory or SD card, which means that you can use it offline without needing an internet connection or a cellular signal. This can save you from getting lost in unfamiliar places or paying for expensive roaming charges when traveling abroad. The map also has a large coverage area, which means that you can explore all the regions of Colombia without worrying about missing any important areas.</li>
|
67 |
-
</ul>
|
68 |
-
<h2>Drawbacks of using Mapa De Colombia Para Garmin Nuvi, Unlocked Crack</h2>
|
69 |
-
<ul>
|
70 |
-
<li><strong>You may encounter some compatibility issues with your Garmin Nuvi device.</strong> The map that you downloaded and installed is an unlocked map, which means that it has been modified or cracked to bypass the authentication or registration process required by Garmin or the map provider. This can cause some compatibility issues with your Garmin Nuvi device, such as errors, glitches, crashes, freezes, or reduced performance. Some of these issues can be fixed by updating your device's software or firmware, but some of them may require contacting Garmin's customer support or returning your device for repair or replacement.</li>
|
71 |
-
<li><strong>You may violate some copyright laws or terms of service by using an unlocked map.</strong> The map that you downloaded and installed is an unlocked map, which means that it has been obtained illegally or without permission from Garmin or the map provider. This can violate some copyright laws or terms of service that protect the intellectual property rights of the original creators or owners of the map. This can expose you to some legal risks or consequences, such as fines, lawsuits, or criminal charges. You may also lose your warranty or support from Garmin or the map provider if they find out that you are using an unlocked map.</li>
|
72 |
-
<li><strong>You may not get any technical support or warranty from Garmin or the map provider.</strong> The map that you downloaded and installed is an unlocked map, which means that it is not an official or authorized product from Garmin or the map provider. This means that you may not get any technical support or warranty from them if you encounter any problems or issues with the map. You may also not get any updates or improvements for the map if they release new versions or features. You may have to rely on third-party sources or forums for help or information about the map.</li>
|
73 |
-
</ul>
|
74 |
-
<h2>Conclusion</h2>
|
75 |
-
<p>In conclusion, Mapa De Colombia Para Garmin Nuvi, Unlocked Crack is a map of Colombia for your Garmin Nuvi device that you can download and install for free without needing any accounts or fees. It has many benefits, such as accessing the most updated and detailed map of Colombia, navigating through all the cities and municipalities of Colombia with ease, and avoiding getting lost or paying for expensive roaming charges. However, it also has some drawbacks, such as encountering some compatibility issues with your Garmin Nuvi device, violating some copyright laws or terms of service by using an unlocked map, and not getting any technical support or warranty from Garmin or the map provider.</p>
|
76 |
-
<p>If you are interested in trying out Mapa De Colombia Para Garmin Nuvi, Unlocked Crack, you can follow the steps that I have shown you in this article. However, you should also be aware of the risks and consequences that come with using an unlocked map. You should also respect the rights and interests of Garmin and the map provider who created and own the original map.</p>
|
77 |
-
<p>If you have any questions or comments about Mapa De Colombia Para Garmin Nuvi, Unlocked Crack, feel free to contact me at <code>[email protected]</code>. I would love to hear from you and help you with your content needs.</p>
|
78 |
-
<h2>FAQs</h2>
|
79 |
-
<ol>
|
80 |
-
<li><strong>What is Garmin Nuvi?</strong></li>
|
81 |
-
<p>Garmin Nuvi is a series of GPS devices designed for personal navigation in cars. It has a touchscreen interface that allows users to enter destinations, view maps, get directions, receive voice guidance, and access other features. It also has a built-in speaker and microphone that enable hands-free calling and voice control. Some models also have additional features such as Bluetooth connectivity, traffic updates, lane assist, speed limit display, and more.</p>
|
82 |
-
<li><strong>What is an unlocked map?</strong></li>
|
83 |
-
<p>An unlocked map is a map file that has been modified or cracked to bypass the authentication or registration process required by Garmin or the map provider. This allows users to use the map without paying any fees or creating any accounts. However, this also violates some copyright laws or terms of service that protect the intellectual property rights of the original creators or owners of the map.</p>
|
84 |
-
<li><strong>How can I update Mapa De Colombia Para Garmin Nuvi, Unlocked Crack?</strong></li>
|
85 |
-
<p>You can update Mapa De Colombia Para Garmin Nuvi, Unlocked Crack by downloading and installing a newer version of the map file from a reliable source. However, you should be careful about where you get the update from because some sources may contain viruses or malware that can harm your computer or your device. You should also backup your current map file before updating it in case something goes wrong.</p>
|
86 |
-
<li><strong>Is Mapa De Colombia Para Garmin Nuvi, Unlocked Crack safe to use?</strong></li>
|
87 |
-
legal risks or consequences, and lack of technical support or warranty from Garmin or the map provider. You should be aware of these risks and consequences before using Mapa De Colombia Para Garmin Nuvi, Unlocked Crack.</p>
|
88 |
-
<li><strong>Where can I find more maps for my Garmin Nuvi device?</strong></li>
|
89 |
-
<p>You can find more maps for your Garmin Nuvi device on Garmin's official website or on other websites that offer free or paid maps for Garmin devices. However, you should always check the compatibility and quality of the maps before downloading and installing them on your device. You should also follow the instructions and terms of service of the map providers when using their maps.</p>
|
90 |
-
</ol>
|
91 |
-
</p> 0a6ba089eb<br />
|
92 |
-
<br />
|
93 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Treasure Planet Training Academy PC 2007 Learn to Fly and Fight in the Galaxy.md
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Treasure Planet Training Academy PC 2007</h1>
|
3 |
-
<p>Do you love Disney's Treasure Planet? Do you want to experience the thrill of sailing through space, fighting pirates and rescuing friends? If so, you should download Treasure Planet Training Academy PC 2007, a collection of three games that will take you on an amazing adventure across the galaxy. In this article, we will tell you everything you need to know about this game, including what it is, what are the three games in it, and how to download it.</p>
|
4 |
-
<h2>What is Treasure Planet Training Academy?</h2>
|
5 |
-
<p>Treasure Planet Training Academy is a PC game that was released in 2002 by Disney Interactive. It is based on the animated movie Treasure Planet, which tells the story of Jim Hawkins, a young boy who joins a crew of space pirates in search of a legendary treasure. The game was distributed by McDonald's Mighty Kid's Meals as a promotional item, along with other Treasure Planet related games and toys.</p>
|
6 |
-
<h2>Download Treasure Planet Training Academy PC 2007</h2><br /><p><b><b>Download</b> ★ <a href="https://byltly.com/2uKveg">https://byltly.com/2uKveg</a></b></p><br /><br />
|
7 |
-
<p>Treasure Planet Training Academy consists of three games that let you explore different aspects of the movie's universe. Each game has its own gameplay, graphics and sound effects that match the style and tone of the movie. You can play them in any order you want, or switch between them as you like. The games are:</p>
|
8 |
-
<h3>A collection of three games based on the Disney movie</h3>
|
9 |
-
<ul>
|
10 |
-
<li>Treasure Planet: Broadside Blast</li>
|
11 |
-
<li>Treasure Planet: Treasure Racer</li>
|
12 |
-
<li>Treasure Planet: Etherium Rescue</li>
|
13 |
-
</ul>
|
14 |
-
<h3>A promotional item distributed by McDonald's Mighty Kid's Meals</h3>
|
15 |
-
<p>The game was part of a marketing campaign by McDonald's to promote the movie and its merchandise. It was given away for free with every purchase of a Mighty Kid's Meal, which was a special menu for children that included a burger, fries, drink and a toy. The game came in a CD-ROM that also contained previews and trailers for other Disney games and movies.</p>
|
16 |
-
<h3>A fun and adventurous way to explore the galaxy</h3>
|
17 |
-
<p>The game is designed to appeal to fans of the movie and anyone who enjoys sci-fi and fantasy genres. It offers a variety of challenges and rewards that will keep you entertained for hours. You can also learn more about the characters, locations and lore of Treasure Planet as you play. The game is suitable for players of all ages and skill levels.</p>
|
18 |
-
<h2>What are the three games in Treasure Planet Training Academy?</h2>
|
19 |
-
<p>As we mentioned before, Treasure Planet Training Academy has three games that let you experience different aspects of the movie's universe. Each game has its own gameplay, graphics and sound effects that match the style and tone of the movie. Here is a brief overview of each game:</p>
|
20 |
-
<h3>Treasure Planet: Broadside Blast</h3>
|
21 |
-
<p>This is a naval combat game where you take control of a Royal Navy ship and shoot enemy ships with your cannons. You can choose from different ships, each with its own speed, armor and firepower. You can also upgrade your ship with new weapons and abilities as you progress through the game.</p>
|
22 |
-
<p>How to download Treasure Planet Training Academy for PC<br />
|
23 |
-
Treasure Planet Training Academy PC game free download<br />
|
24 |
-
Treasure Planet Training Academy PC 2007 full version<br />
|
25 |
-
Treasure Planet Training Academy PC gameplay and review<br />
|
26 |
-
Treasure Planet Training Academy PC system requirements<br />
|
27 |
-
Treasure Planet Training Academy PC cheats and tips<br />
|
28 |
-
Treasure Planet Training Academy PC download link<br />
|
29 |
-
Treasure Planet Training Academy PC torrent download<br />
|
30 |
-
Treasure Planet Training Academy PC crack download<br />
|
31 |
-
Treasure Planet Training Academy PC iso download<br />
|
32 |
-
Treasure Planet Training Academy PC rar download<br />
|
33 |
-
Treasure Planet Training Academy PC zip download<br />
|
34 |
-
Treasure Planet Training Academy PC online play<br />
|
35 |
-
Treasure Planet Training Academy PC multiplayer mode<br />
|
36 |
-
Treasure Planet Training Academy PC patch download<br />
|
37 |
-
Treasure Planet Training Academy PC update download<br />
|
38 |
-
Treasure Planet Training Academy PC mods download<br />
|
39 |
-
Treasure Planet Training Academy PC trainer download<br />
|
40 |
-
Treasure Planet Training Academy PC save game download<br />
|
41 |
-
Treasure Planet Training Academy PC walkthrough and guide<br />
|
42 |
-
Treasure Planet Training Academy PC best settings<br />
|
43 |
-
Treasure Planet Training Academy PC keyboard controls<br />
|
44 |
-
Treasure Planet Training Academy PC mouse controls<br />
|
45 |
-
Treasure Planet Training Academy PC graphics settings<br />
|
46 |
-
Treasure Planet Training Academy PC sound settings<br />
|
47 |
-
Treasure Planet Training Academy PC video settings<br />
|
48 |
-
Treasure Planet Training Academy PC screen resolution settings<br />
|
49 |
-
Treasure Planet Training Academy PC windowed mode settings<br />
|
50 |
-
Treasure Planet Training Academy PC full screen mode settings<br />
|
51 |
-
Treasure Planet Training Academy PC compatibility mode settings<br />
|
52 |
-
Treasure Planet Training Academy PC error fix<br />
|
53 |
-
Treasure Planet Training Academy PC black screen fix<br />
|
54 |
-
Treasure Planet Training Academy PC crash fix<br />
|
55 |
-
Treasure Planet Training Academy PC lag fix<br />
|
56 |
-
Treasure Planet Training Academy PC fps fix<br />
|
57 |
-
Treasure Planet Training Academy PC stuttering fix<br />
|
58 |
-
Treasure Planet Training Academy PC loading fix<br />
|
59 |
-
Treasure Planet Training Academy PC installation fix<br />
|
60 |
-
Treasure Planet Training Academy PC uninstallation fix<br />
|
61 |
-
Treasure Planet Training Academy PC reinstallation fix<br />
|
62 |
-
Download Disney's Treasure Planet games for PC<br />
|
63 |
-
Download Disney's Action Game featuring Jim Hawkins for PC<br />
|
64 |
-
Download Disney's Etherium Rescue for PC<br />
|
65 |
-
Download Disney's Broadside Blast for PC <br />
|
66 |
-
Download Disney's Solar Surfer for PC <br />
|
67 |
-
Download Disney's Planetary Pinball for PC <br />
|
68 |
-
Download Disney's Space Race for PC <br />
|
69 |
-
Download Disney's Map and Compass for PC <br />
|
70 |
-
Download Disney's Goo Bouncing for PC <br />
|
71 |
-
Download Disney's Robot Retrieval for PC</p>
|
72 |
-
<p>The game has several levels that take place in different locations from the movie, such as Crescentia, Montressor Spaceport and Treasure Planet itself. Each level has its own objectives, enemies and hazards that you have to overcome. You can also unlock bonus levels by completing certain tasks or finding hidden items.</p>
|
73 |
-
<p>The game features realistic physics and graphics that simulate sailing through space. You have to adjust your speed, direction and angle to hit your targets and avoid incoming fire. You can also use special maneuvers such as barrel rolls, loops and spins to dodge or attack your enemies.</p>
|
74 |
-
<h4>A naval combat game where you shoot enemy ships</h4>
|
75 |
-
<code><pre><img src="broadside_blast.jpg" alt="A screenshot of Broadside Blast showing a ship firing cannons at another ship"></pre></code>
|
76 |
-
<h4>Features different levels, weapons and upgrades</h4>
|
77 |
-
<code><pre><table> <tr> <th>Level</th> <th>Objective</th> <th>Enemy</th> <th>Hazard</th> </th> <tr> <td>Crescentia</td> <td>Destroy all enemy ships</td> <td>Pirate galleons</td> <td>Asteroids</td> </tr> <tr> <td>Montressor Spaceport</td> <td>Protect Jim Hawkins from Scroop's attack</td> <td>Scroop's spider ship</td> <td>Space mines</td> </tr> <tr> <td>Treasure Planet</td> <td>Stop Captain Flint from escaping with the treasure</td> <td>Captain Flint's flagship</td> <td>Solar flares</td> </tr> </table></pre></code>
|
78 |
-
<h4>Tips and tricks to master the game</h4>
|
79 |
-
<ul>
|
80 |
-
<li>Use your radar to locate your enemies and avoid surprises.</li>
|
81 |
-
<li>Use your secondary weapons such as rockets, torpedoes and lasers for extra damage.</li>
|
82 |
-
<li>Use your special abilities such as shields, cloaking and boosters for extra defense or speed.</li>
|
83 |
-
<li>Collect power-ups such as health packs, ammo crates and coins for extra benefits.</li>
|
84 |
-
<li>Aim for weak spots such as masts, sails and engines for critical hits.</li>
|
85 |
-
<li>Watch out for environmental hazards such as asteroids, space mines and solar flares that can damage or destroy your ship.</li>
|
86 |
-
<h3>Treasure Planet: Treasure Racer</h3>
|
87 |
-
<p>This is a racing game where you take control of a Royal Navy ship and steer it through an intergalactic raceway. You can choose from different ships, each with its own speed, handling and design. You can also customize your ship with different colors and decals as you progress through the game.</p>
|
88 |
-
<p>The game has several tracks that take place in different locations from the movie, such as Montressor Spaceport, Crescentia and Treasure Planet itself. Each track has its own layout, obstacles and shortcuts that you have to navigate. You can also unlock bonus tracks by completing certain tasks or finding hidden items.</p>
|
89 |
-
<p>The game features fast-paced and exciting gameplay that simulates racing through space. You have to use your skills and reflexes to avoid crashing into walls, debris and other ships. You can also use special power-ups such as rockets, shields and boosters to gain an edge over your opponents.</p>
|
90 |
-
<h4>A racing game where you steer a Royal Navy ship through an intergalactic raceway</h4>
|
91 |
-
<code><pre><img src="treasure_racer.jpg" alt="A screenshot of Treasure Racer showing a ship racing through a track"></pre></code>
|
92 |
-
<h4>Features different tracks, obstacles and power-ups</h4>
|
93 |
-
<code><pre><table> <tr> <th>Track</th> <th>Location</th> <th>Obstacle</th> <th>Power-up</th> </tr> <tr> <td>Montressor Spaceport</td> <td>A busy space station with many ships and buildings</td> <td>Crates, barrels and signs that block your way</td> <td>Rockets that let you shoot your opponents</td> </tr> <tr> <td>Crescentia</td> <td>A planet with a crescent-shaped ring of asteroids around it</td> <td>Asteroids, comets and meteors that fly across your path</td> <td>Shields that protect you from damage</td> </tr> <tr> <td>Treasure Planet</td> <td>A planet with a hidden treasure vault inside it</td> <td>Lava flows, geysers and traps that erupt from the ground</td> <td>Boosters that increase your speed temporarily</td> </tr> </table></pre></code>
|
94 |
-
<h4>Tips and tricks to master the game</h4>
|
95 |
-
<ul>
|
96 |
-
<li>Use your arrow keys or mouse to steer your ship left or right.</li>
|
97 |
-
<li>Use your spacebar or left mouse button to activate your power-ups.</li>
|
98 |
-
<li>Collect coins and gems along the way to increase your score and unlock new features.</li>
|
99 |
-
<li>Look for ramps, loops and tunnels that can help you avoid obstacles or reach shortcuts.</li>
|
100 |
-
<li>Watch out for other racers who can bump into you or use their power-ups against you.</li>
|
101 |
-
<li>Try to finish each track in the shortest time possible to earn medals and trophies.</li>
|
102 |
-
<h3>Treasure Planet: Etherium Rescue</h3>
|
103 |
-
<p>This is a rescue game where you take control of a solar surfer and fly through space to save stranded crew members. You can choose from different surfers, each with its own speed, maneuverability and design. You can also upgrade your surfer with new parts and abilities as you progress through the game.</p>
|
104 |
-
<p>The game has several missions that take place in different locations from the movie, such as Montressor Spaceport, Crescentia and Treasure Planet itself. Each mission has its own objectives, enemies and rewards that you have to complete. You can also unlock bonus missions by completing certain tasks or finding hidden items.</p>
|
105 |
-
<p>The game features dynamic and immersive gameplay that simulates flying through space. You have to use your skills and reflexes to avoid crashing into obstacles, enemies and debris. You can also use special moves such as flips, spins and dives to dodge or attack your enemies.</p>
|
106 |
-
<h4>A rescue game where you fly a solar surfer and save stranded crew members</h4>
|
107 |
-
<code><pre><img src="etherium_rescue.jpg" alt="A screenshot of Etherium Rescue showing a surfer flying through space"></pre></code>
|
108 |
-
<h4>Features different missions, hazards and rewards</h4>
|
109 |
-
<code><pre><table> <tr> <th>Mission</th> <th>Objective</th> <th>Enemy</th> <th>Reward</th> </tr> <tr> <td>Montressor Spaceport</td> <td>Rescue 10 crew members from the space station</td> <td>Pirate fighters and drones</td> <td>A new surfer part and a coin bonus</td> </tr> <tr> <td>Crescentia</td> <td>Rescue 15 crew members from the asteroid ring</td> <td>Pirate galleons and asteroids</td> <td>A new surfer part and a gem bonus</td> </tr> <tr> <td>Treasure Planet</td> <td>Rescue 20 crew members from the planet's surface</td> <td>Captain Flint's flagship and traps</td> <td>A new surfer part and a trophy bonus</td> </tr> </table></pre></code>
|
110 |
-
<h4>Tips and tricks to master the game</h4>
|
111 |
-
<ul>
|
112 |
-
<li>Use your mouse or arrow keys to move your surfer up, down, left or right.</li>
|
113 |
-
<li>Use your spacebar or left mouse button to activate your turbo boost.</li>
|
114 |
-
<li>Collect stars and hearts along the way to increase your score and health.</li>
|
115 |
-
<li>Look for green arrows that indicate the location of crew members.</li>
|
116 |
-
<li>Watch out for red arrows that indicate the location of enemies.</li>
|
117 |
-
<li>Try to complete each mission in the shortest time possible to earn medals and trophies.</li>
|
118 |
-
<h2>Conclusion</h2>
|
119 |
-
<p>Treasure Planet Training Academy PC 2007 is a collection of three games that will take you on an amazing adventure across the galaxy. It is based on the animated movie Treasure Planet, which tells the story of Jim Hawkins, a young boy who joins a crew of space pirates in search of a legendary treasure. The game was distributed by McDonald's Mighty Kid's Meals as a promotional item in 2002.</p>
|
120 |
-
<p>The game consists of three games that let you explore different aspects of the movie's universe. Each game has its own gameplay, graphics and sound effects that match the style and tone of the movie. The games are:</p>
|
121 |
-
<ul>
|
122 |
-
<li>Treasure Planet: Broadside Blast, a naval combat game where you shoot enemy ships with your cannons.</li>
|
123 |
-
<li>Treasure Planet: Treasure Racer, a racing game where you steer a Royal Navy ship through an intergalactic raceway.</li>
|
124 |
-
<li>Treasure Planet: Etherium Rescue, a rescue game where you fly a solar surfer and save stranded crew members.</li>
|
125 |
-
</ul>
|
126 |
-
<p>The game is compatible with most Windows operating systems up to Windows XP. However, it may not work properly on newer operating systems such as Windows 7, 8 or 10. If you have a newer operating system, you may need to use a compatibility mode or an emulator to run the game.</p>
|
127 |
-
<p>You can download Treasure Planet Training Academy PC 2007 for free from an online source that offers it as abandonware. One of these sources is archive.org, which is a digital library that preserves various forms of media and content. You can find the game on archive.org by following this link. You can also use a software program such as Daemon Tools Lite to mount the ISO file of the game as a virtual CD-ROM on your PC.</p>
|
128 |
-
<p>If you love Disney's Treasure Planet and want to experience the thrill of sailing through space, fighting pirates and rescuing friends, you should download Treasure Planet Training Academy PC 2007 and play it on your PC. It is a fun and adventurous way to explore the galaxy and learn more about the characters, locations and lore of Treasure Planet. It is also suitable for players of all ages and skill levels.</p>
|
129 |
-
<p>So what are you waiting for? Download Treasure Planet Training Academy PC 2007 today and join Jim Hawkins and his friends on their quest for the greatest treasure in the universe!</p>
|
130 |
-
<h3>A summary of the main points of the article</h3>
|
131 |
-
<ul>
|
132 |
-
<li>Treasure Planet Training Academy PC 2007 is a collection of three games based on the Disney movie Treasure Planet.</li>
|
133 |
-
<li>The game was distributed by McDonald's Mighty Kid's Meals as a promotional item in 2002.</li>
|
134 |
-
<li>The game consists of three games: Treasure Planet: Broadside Blast, Treasure Planet: Treasure Racer and Treasure Planet: Etherium Rescue.</li>
|
135 |
-
<li>The game is compatible with most Windows operating systems up to Windows XP.</li>
|
136 |
-
<li>You can download the game for free from archive.org and use Daemon Tools Lite to mount it as a virtual CD-ROM on your PC.</li>
|
137 |
-
</ul>
|
138 |
-
<h3>A call to action for the readers to download and play the game</h3>
|
139 |
-
<h2>FAQs</h2>
|
140 |
-
<p>Here are some frequently asked questions and answers about Treasure Planet Training Academy PC 2007:</p>
|
141 |
-
<h3>Q: What is Treasure Planet?</h3>
|
142 |
-
<p>A: Treasure Planet is a 2002 animated movie by Disney that is loosely based on the classic novel Treasure Island by Robert Louis Stevenson. It tells the story of Jim Hawkins, a young boy who joins a crew of space pirates in search of a legendary treasure hidden on a planet called Treasure Planet.</p>
|
143 |
-
<h3>Q: Who are the main characters of Treasure Planet?</h3>
|
144 |
-
<p>A: The main characters of Treasure Planet are:</p>
|
145 |
-
<ul>
|
146 |
-
<li>Jim Hawkins, the protagonist and a rebellious teenager who dreams of adventure.</li>
|
147 |
-
<li>John Silver, the antagonist and a cyborg pirate who acts as Jim's mentor and friend.</li>
|
148 |
-
<li>Captain Amelia, the captain of the Royal Navy ship RLS Legacy and a feline alien.</li>
|
149 |
-
<li>Doctor Doppler, a friend of Jim's family and an astrophysicist who funds the expedition to Treasure Planet.</li>
|
150 |
-
<li>Morph, a shape-shifting creature who is Silver's pet and companion.</li>
|
151 |
-
<li>B.E.N., a robot who lives on Treasure Planet and has lost his memory.</li>
|
152 |
-
</ul>
|
153 |
-
<h3>Q: How many games are there in Treasure Planet Training Academy PC 2007?</h3>
|
154 |
-
<p>A: There are three games in Treasure Planet Training Academy PC 2007: Treasure Planet: Broadside Blast, Treasure Planet: Treasure Racer and Treasure Planet: Etherium Rescue. Each game has its own gameplay, graphics and sound effects that match the style and tone of the movie.</p>
|
155 |
-
<h3>Q: How can I download Treasure Planet Training Academy PC 2007?</h3>
|
156 |
-
<p>A: You can download Treasure Planet Training Academy PC 2007 for free from an online source that offers it as abandonware. One of these sources is archive.org, which is a digital library that preserves various forms of media and content. You can find the game on archive.org by following this link. You can also use a software program such as Daemon Tools Lite to mount the ISO file of the game as a virtual CD-ROM on your PC.</p>
|
157 |
-
<h3>Q: Is Treasure Planet Training Academy PC 2007 compatible with Windows 10?</h3>
|
158 |
-
<p>A: The game is compatible with most Windows operating systems up to Windows XP. However, it may not work properly on newer operating systems such as Windows 10. If you have Windows 10, you may need to use a compatibility mode or an emulator to run the game. You can find more information on how to do that online.</p>
|
159 |
-
</p> 0a6ba089eb<br />
|
160 |
-
<br />
|
161 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/AstrovisionlifesignsoftwarewithFREE Crack.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>AstrologyGoals : Lien Communication 2.6.0 (email) avais. Daroun. LienVraiDise: 2.6.0 (email) Feb 26, 2020 15:30. https://coub.com/stories/2947137-astrovisionlifesignsoftwarewithcrack-yanhed Pater. </p>
|
3 |
-
<p> difr,astrovisionlifesignsoftwarewithcrack,e https://wiki.archlinux.org/index.php/arch_sysroot bezeichnet dies aufgrund eines Projekts derselben Entwicklern, die den Austausch der alten Arch Linux / arch-sysroot-Directory auf den neuen Arx Linux / archlinux-sysroot-Unterverzeichnis angepasst haben. Dieser Artikel ist eine Zusammenstellung von Informationen über das Spezifische an archlinux-sysroot zu den Spielen und zum eigenen Adressbuch. https://wiki.archlinux.org/index.php/Archlinux-sysroot (Text) Lunedale Studios. https://coub.com/stories/2947138-best-dynamic-bone-v1-1-7-setup-free Ich hoffe, dass Sie dieses Ziel erreichten, da ich heute einen nicht ganz optimellen Patch für das Sea of Thieves-System installiert haben. Man kann es so sagen, dass dieses System in einem Zustand der »not-quite-playable« ist. Offenbar hat man die Löcher in der Komplettversion gefunden, um die die Entwickler so anzulocken waren, dieses System zu etablieren. Dies ist zumindest die erste Version dieses Patches, und es wird noch mehr kommen. Aber ich muss leider einen Eindruck vermitteln, was man von diesem Fehler erwarten würde, damit ihr mir diese Informationen nicht anbietet, veraltet oder ein schlechtes Zeichen im Spiel ist. Wenn man die patch geupdatedet hat, sollte man einfach die zwei o.oo-Versionen durchgehen (und kaufen bzw. kaufen, das ist ein win-Update). Dann sollte es natürlich funktionieren. Ich bin selber nicht schlecht mit meinem PC, aber es dauert immer weniger, ab einem bestimmten Punkt Probleme zu entdecken, wenn man mit einer mageren Konsole arbeitet. Es ist also keine gute Idee ein Update von Problemen zu entdecken, wenn man nicht mehr irgendwie das Spiel flüsterfegt (unter Windows, man kann das Einlösen mit Win+R starten).</p>
|
4 |
-
<h2>astrovisionlifesignsoftwarewithcrack</h2><br /><p><b><b>Download File</b> ⚡ <a href="https://imgfil.com/2uy09B">https://imgfil.com/2uy09B</a></b></p><br /><br /> 899543212b<br />
|
5 |
-
<br />
|
6 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download All Episodes of Kota Factory Season 2 for Free Heres How.md
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br> - How to watch Kota Factory Season 2 on Netflix? <br> - How to download Kota Factory Season 2 from other sources? <br> - Conclusion: Why you should watch Kota Factory Season 2? | | H2: Introduction: What is Kota Factory and why is it popular? | - A brief overview of the plot, genre, and cast of Kota Factory <br> - The positive reviews and ratings of Kota Factory Season 1 <br> - The anticipation and expectations for Kota Factory Season 2 | | H2: How to watch Kota Factory Season 2 on Netflix? | - The release date and time of Kota Factory Season 2 on Netflix <br> - The subscription plans and benefits of Netflix <br> - The steps to watch Kota Factory Season 2 on Netflix | | H2: How to download Kota Factory Season 2 from other sources? | - The disclaimer and warning about the legal and ethical issues of downloading pirated content <br> - The possible risks and consequences of downloading from unverified sources <br> - The alternative ways to download Kota Factory Season 2 legally and safely | | H2: Conclusion: Why you should watch Kota Factory Season 2? | - A summary of the main points and benefits of watching Kota Factory Season 2 <br> - A call to action to watch Kota Factory Season 2 on Netflix or other legal sources <br> - A thank you note and a request for feedback | Table 2: Article with HTML formatting <h1>Kota Factory Season 2: How to Download and Watch Online</h1>
|
3 |
-
<p>If you are a fan of Indian web series, you must have heard of Kota Factory. It is a comedy-drama series that revolves around the lives of students who aspire to crack the IIT entrance exams in the city of Kota, Rajasthan. The series showcases the struggles, challenges, and joys of these students as they prepare for their future.</p>
|
4 |
-
<p>Kota Factory Season 1 was released in 2019 on YouTube by The Viral Fever (TVF), a popular online entertainment platform. It received rave reviews from critics and audiences alike for its realistic portrayal, relatable characters, witty dialogues, and black-and-white cinematography. It became one of the most-watched and highest-rated web series in India.</p>
|
5 |
-
<h2>kota factory season 2 download link</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://urlin.us/2uT0S8">https://urlin.us/2uT0S8</a></b></p><br /><br />
|
6 |
-
<p>Kota Factory Season 2 was eagerly awaited by the fans who wanted to know what happens next to the protagonist Vaibhav Pandey (played by Mayur More) and his friends. The good news is that Kota Factory Season 2 is finally here and it is streaming on Netflix since September 24, 2021. The bad news is that not everyone has access to Netflix or can afford its subscription plans.</p>
|
7 |
-
<p>So, how can you watch or download Kota Factory Season 2 online? In this article, we will tell you everything you need to know about how to watch or download Kota Factory Season 2 legally and safely. We will also give you some reasons why you should not miss this amazing web series. So, read on and enjoy!</p>
|
8 |
-
<h2>How to watch Kota Factory Season 2 on Netflix?</h2>
|
9 |
-
<p>The easiest and best way to watch Kota Factory Season 2 online is to stream it on Netflix. Netflix is a global streaming service that offers a wide range of movies, shows, documentaries, and original content across various genres and languages. You can watch Netflix on any device that has an internet connection and a compatible app or browser.</p>
|
10 |
-
<p>kota factory season 2 free download<br />
|
11 |
-
kota factory season 2 torrent download<br />
|
12 |
-
kota factory season 2 watch online<br />
|
13 |
-
kota factory season 2 netflix<br />
|
14 |
-
kota factory season 2 release date<br />
|
15 |
-
kota factory season 2 episode 1 download<br />
|
16 |
-
kota factory season 2 full episodes download<br />
|
17 |
-
kota factory season 2 download filmywap<br />
|
18 |
-
kota factory season 2 download telegram<br />
|
19 |
-
kota factory season 2 download 480p<br />
|
20 |
-
kota factory season 2 download 720p<br />
|
21 |
-
kota factory season 2 download 1080p<br />
|
22 |
-
kota factory season 2 download mp4<br />
|
23 |
-
kota factory season 2 download mkv<br />
|
24 |
-
kota factory season 2 download worldfree4u<br />
|
25 |
-
kota factory season 2 download moviesflix<br />
|
26 |
-
kota factory season 2 download filmyzilla<br />
|
27 |
-
kota factory season 2 download tamilrockers<br />
|
28 |
-
kota factory season 2 download movierulz<br />
|
29 |
-
kota factory season 2 download bolly4u<br />
|
30 |
-
kota factory season 2 download pagalworld<br />
|
31 |
-
kota factory season 2 download quora<br />
|
32 |
-
kota factory season 2 download reddit<br />
|
33 |
-
kota factory season 2 download youtube<br />
|
34 |
-
kota factory season 2 download google drive<br />
|
35 |
-
kota factory season 2 subtitles download<br />
|
36 |
-
kota factory season 2 english subtitles download<br />
|
37 |
-
kota factory season 2 hindi subtitles download<br />
|
38 |
-
kota factory season 2 trailer download<br />
|
39 |
-
kota factory season 2 theme song download<br />
|
40 |
-
kota factory season 2 cast and crew<br />
|
41 |
-
kota factory season 2 review and ratings<br />
|
42 |
-
kota factory season 2 plot and spoilers<br />
|
43 |
-
kota factory season 2 memes and jokes<br />
|
44 |
-
kota factory season 2 behind the scenes<br />
|
45 |
-
how to download kota factory season 2 for free<br />
|
46 |
-
where to watch kota factory season 2 online for free<br />
|
47 |
-
when will kota factory season 2 be released on netflix<br />
|
48 |
-
what is the story of kota factory season 2 <br />
|
49 |
-
who are the actors in kota factory season 2</p>
|
50 |
-
<p>Kota Factory Season 2 was released on Netflix on September 24, 2021 at 12:30 PM IST. It consists of five episodes, each with a duration of around 40 minutes. You can binge-watch all the episodes in one go or watch them at your own pace.</p>
|
51 |
-
<p>To watch Kota Factory Season 2 on Netflix, you need to have a valid subscription plan. Netflix offers three plans in India: Mobile (Rs.199 per month), Basic (Rs.499 per month), and Standard (Rs.649 per month). The Mobile plan allows you to watch Netflix on one mobile or tablet device at a time in standard definition (SD). The Basic plan allows you to watch Netflix on one laptop, TV, phone, or tablet at a time in SD. The Standard plan allows you to watch Netflix on two devices at a time in high definition (HD).</p>
|
52 |
-
<p>To watch Kota Factory Season 2 on Netflix, follow these steps:</p>
|
53 |
-
<ol>
|
54 |
-
<li>Create an account on Netflix if you don't have one already. You can sign up up with your email, phone number, or social media account. You can also use a free trial if you are a new user.</li>
|
55 |
-
<li>Choose a subscription plan that suits your needs and budget. You can pay with your credit card, debit card, net banking, or UPI. You can also cancel your subscription anytime.</li>
|
56 |
-
<li>Log in to your Netflix account and search for Kota Factory Season 2. You can also browse the categories or genres to find it.</li>
|
57 |
-
<li>Select the episode that you want to watch and click on the play button. You can also adjust the video quality, audio language, subtitles, and playback speed according to your preference.</li>
|
58 |
-
<li>Enjoy watching Kota Factory Season 2 on Netflix!</li>
|
59 |
-
</ol>
|
60 |
-
<h2>How to download Kota Factory Season 2 from other sources?</h2>
|
61 |
-
<p>If you don't have Netflix or don't want to pay for it, you might be tempted to download Kota Factory Season 2 from other sources. However, we strongly advise you not to do so. Downloading pirated content is illegal and unethical. It violates the intellectual property rights of the creators and distributors of the content. It also harms the entertainment industry and the artists who work hard to produce quality content.</p>
|
62 |
-
<p>Moreover, downloading Kota Factory Season 2 from unverified sources can expose you to various risks and consequences. You might end up downloading malware, viruses, spyware, or ransomware that can damage your device or steal your personal information. You might also face legal action, fines, or imprisonment if you are caught downloading or sharing pirated content.</p>
|
63 |
-
<p>Therefore, we urge you to avoid downloading Kota Factory Season 2 from any source that is not authorized or licensed by the makers of the web series. Instead, you can use some alternative ways to download Kota Factory Season 2 legally and safely. Here are some of them:</p>
|
64 |
-
<ul>
|
65 |
-
<li>Use a VPN service: A VPN (virtual private network) is a software that encrypts your internet traffic and hides your IP address and location. It allows you to access geo-restricted content and bypass censorship. You can use a VPN service to watch Kota Factory Season 2 on Netflix if it is not available in your region. However, you still need to have a Netflix subscription and a reliable VPN service that works with Netflix.</li>
|
66 |
-
<li>Use a video downloader tool: A video downloader tool is a software that allows you to download videos from various online platforms. You can use a video downloader tool to download Kota Factory Season 2 from Netflix if you have a subscription and an offline viewing option. However, you need to make sure that the video downloader tool is compatible with Netflix and does not violate its terms of service.</li>
|
67 |
-
<li>Use a torrent site: A torrent site is a website that hosts torrent files that contain metadata about files and folders that are distributed over a peer-to-peer network. You can use a torrent site to download Kota Factory Season 2 from other users who have uploaded it. However, you need to be careful about the legality, quality, and safety of the torrent files and the torrent site. You also need to have a torrent client and a VPN service to download torrents.</li>
|
68 |
-
</ul>
|
69 |
-
<h2>Conclusion: Why you should watch Kota Factory Season 2?</h2>
|
70 |
-
<p>Kota Factory Season 2 is a web series that deserves your attention and appreciation. It is a web series that tells a realistic and relatable story of students who are chasing their dreams in a competitive environment. It is a web series that features talented actors who deliver brilliant performances and memorable characters. It is a web series that has witty humor, emotional drama, inspiring messages, and stunning visuals.</p>
|
71 |
-
<p>Kota Factory Season 2 is a web series that will make you laugh, cry, think, and feel. It will make you nostalgic about your school days, your friends, your teachers, and your aspirations. It will make you empathize with the struggles and challenges of the students who are preparing for one of the toughest exams in India. It will make you appreciate the value of education, friendship, family, and love.</p>
|
72 |
-
<p>Kota Factory Season 2 is a web series that you should not miss if you love Indian web series or if you are looking for something fresh and original to watch online. You can watch it on Netflix with a subscription plan or download it from other legal sources with caution. You can also share it with your friends and family who might enjoy it as well.</p>
|
73 |
-
<p>Thank you for reading this article till the end. We hope you found it helpful and informative. If you have any questions or feedback about Kota Factory Season 2 or this article, please feel free to leave them in the comments section below. We would love to hear from you!</p>
|
74 |
-
<h3>Frequently Asked Questions</h3>
|
75 |
-
<ol>
|
76 |
-
<li><b>Is Kota Factory based on a true story?</b></li>
|
77 |
-
<p>Kota Factory is not based on a true story, but it is inspired by the real-life experiences of the creators and writers of the web series. They have drawn from their own memories and observations of studying or coaching in Kota. They have also done extensive research and interviews with students, teachers, and parents to make the web series authentic and realistic.</p>
|
78 |
-
<li><b>Who are the main actors and characters of Kota Factory?</b></li>
|
79 |
-
<p>The main actors and characters of Kota Factory are:</p>
|
80 |
-
<ul>
|
81 |
-
<li>Mayur More as Vaibhav Pandey, a 16-year-old student who joins Maheshwari Classes in Kota to prepare for IIT-JEE.</li>
|
82 |
-
<li>Jitendra Kumar as Jeetu Bhaiya, a popular and charismatic teacher at Maheshwari Classes who mentors Vaibhav and his friends.</li>
|
83 |
-
<li>Ranjan Raj as Balmukund Meena, a loyal and optimistic friend of Vaibhav who hails from Rajasthan.</li>
|
84 |
-
<li>Alam Khan as Uday Gupta, a witty and sarcastic friend of Vaibhav who is good at maths.</li>
|
85 |
-
<li>Ahsaas Channa as Shivangi Ranawat, a smart and confident student at Maheshwari Classes who becomes Vaibhav's love interest.</li>
|
86 |
-
<li>Revathi Pillai as Vartika Ratawal, a studious and ambitious student at Maheshwari Classes who competes with Vaibhav.</li>
|
87 |
-
<li>Urvi Singh as Meenal Parekh, a rebellious and outspoken student at Maheshwari Classes who is friends with Shivangi and Vartika.</li>
|
88 |
-
</ul>
|
89 |
-
<li><b>What is the meaning of Kota Factory?</b></li>
|
90 |
-
<p>Kota Factory is a term that refers to the city of Kota in Rajasthan, which is known for its numerous coaching institutes that prepare students for various competitive exams, especially IIT-JEE. The term implies that Kota is like a factory that produces engineers and doctors in large numbers. It also reflects the pressure, stress, and monotony that the students face in Kota.</p>
|
91 |
-
<li><b>What is the theme song of Kota Factory?</b></li>
|
92 |
-
<p>The theme song of Kota Factory is called "Yeh Meri Life Hai" (This Is My Life). It is composed by Simran Hora and Karthik Rao, and sung by Ahsaas Channa. It is a catchy and upbeat song that captures the mood and spirit of the web series. It expresses the hopes, dreams, fears, and frustrations of the students who are living in Kota.</p>
|
93 |
-
<li><b>Will there be a Kota Factory Season 3?</b></li>
|
94 |
-
<p>As of now, there is no official confirmation or announcement about Kota Factory Season 3. However, given the popularity and success of the web series, it is likely that there will be a third season in the future. The fans are also eagerly waiting for more episodes and stories of their favorite characters. The makers of the web series have hinted that they have some ideas and plans for the next season, but they have not revealed any details yet.</p>
|
95 |
-
<h4></h4></p> 197e85843d<br />
|
96 |
-
<br />
|
97 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/ARK Survival Evolved APK - Explore All Versions of the Game.md
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>ARK: Survival Evolved APK All Version - A Guide for Android Users</h1>
|
3 |
-
<p>Are you a fan of adventure, survival, and dinosaurs? If yes, then you might want to try out ARK: Survival Evolved, a popular game that lets you explore, craft, and fight in a prehistoric world. But how can you play this game on your Android device? And what are the different versions of the game available? In this article, we will answer these questions and more. We will show you how to download and install ARK: Survival Evolved APK all version, and how to play and enjoy the game on your Android device.</p>
|
4 |
-
<h2>What is ARK: Survival Evolved?</h2>
|
5 |
-
<h3>A brief introduction to the game and its features</h3>
|
6 |
-
<p>ARK: Survival Evolved is a game that was released in 2017 by Studio Wildcard. It is an open-world survival game that puts you in the shoes of a stranded human on an island full of dinosaurs and other creatures. You have to gather resources, craft tools and weapons, build shelters, tame animals, and fight against enemies and environmental hazards. You can also team up with other players online or play solo offline. The game has stunning graphics, realistic physics, and dynamic weather systems that make it immersive and challenging.</p>
|
7 |
-
<h2>ark survival evolved apk all version</h2><br /><p><b><b>DOWNLOAD</b> ⚹ <a href="https://jinyurl.com/2uNMtl">https://jinyurl.com/2uNMtl</a></b></p><br /><br />
|
8 |
-
<h3>The difference between APK and XAPK files</h3>
|
9 |
-
<p>Before we show you how to download and install ARK: Survival Evolved APK all version, you need to know the difference between APK and XAPK files. APK stands for Android Package Kit, which is the standard file format for Android apps. XAPK stands for eXtended Android Package Kit, which is a compressed file that contains both the APK file and the additional data files (such as OBB files) that are required for some apps to run properly. ARK: Survival Evolved is one of those apps that need both the APK file and the OBB files to work.</p>
|
10 |
-
<h2>How to download and install ARK: Survival Evolved APK all version</h2>
|
11 |
-
<h3>The requirements and precautions for installing the game</h3>
|
12 |
-
<p>Before you download and install ARK: Survival Evolved APK all version, you need to make sure that your Android device meets the following requirements:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Android 7.0 or higher</li>
|
15 |
-
<li>At least 4 GB of RAM</li>
|
16 |
-
<li>At least 2 GB of free storage space</li>
|
17 |
-
<li>A stable internet connection</li>
|
18 |
-
</ul>
|
19 |
-
<p>You also need to take some precautions before installing the game:</p>
|
20 |
-
<p>ark survival evolved mobile apk download<br />
|
21 |
-
ark survival evolved apk mod unlimited money<br />
|
22 |
-
ark survival evolved apk obb offline<br />
|
23 |
-
ark survival evolved apk latest version 2022<br />
|
24 |
-
ark survival evolved apk free full version<br />
|
25 |
-
ark survival evolved apk data highly compressed<br />
|
26 |
-
ark survival evolved apk rexdl<br />
|
27 |
-
ark survival evolved apk android 1<br />
|
28 |
-
ark survival evolved apk revdl<br />
|
29 |
-
ark survival evolved apk pure<br />
|
30 |
-
ark survival evolved apk hack<br />
|
31 |
-
ark survival evolved apk uptodown<br />
|
32 |
-
ark survival evolved apk andropalace<br />
|
33 |
-
ark survival evolved apk mirror<br />
|
34 |
-
ark survival evolved apk apkpure<br />
|
35 |
-
ark survival evolved apk for pc<br />
|
36 |
-
ark survival evolved apk no verification<br />
|
37 |
-
ark survival evolved apk old version<br />
|
38 |
-
ark survival evolved apk 2.0.28<br />
|
39 |
-
ark survival evolved apk 2.0.25<br />
|
40 |
-
ark survival evolved apk 2.0.24<br />
|
41 |
-
ark survival evolved apk 2.0.23<br />
|
42 |
-
ark survival evolved apk 2.0.22<br />
|
43 |
-
ark survival evolved apk 2.0.21<br />
|
44 |
-
ark survival evolved apk 2.0.20<br />
|
45 |
-
ark survival evolved apk 2.0.19<br />
|
46 |
-
ark survival evolved apk 2.0.18<br />
|
47 |
-
ark survival evolved apk 2.0.17<br />
|
48 |
-
ark survival evolved apk 2.0.16<br />
|
49 |
-
ark survival evolved apk 2.0.15<br />
|
50 |
-
ark survival evolved apk 2.0.14<br />
|
51 |
-
ark survival evolved apk 2.0.13<br />
|
52 |
-
ark survival evolved apk 2.0.12<br />
|
53 |
-
ark survival evolved apk 2.0.11<br />
|
54 |
-
ark survival evolved apk 2.0.10<br />
|
55 |
-
ark survival evolved apk 2.0.9<br />
|
56 |
-
ark survival evolved apk 2.0.8<br />
|
57 |
-
ark survival evolved apk 2.0.7<br />
|
58 |
-
ark survival evolved apk 2.0.6<br />
|
59 |
-
ark survival evolved apk 2.0.5<br />
|
60 |
-
ark survival evolved apk 2.0.4<br />
|
61 |
-
ark survival evolved apk 2.0.3<br />
|
62 |
-
ark survival evolved apk 2.0.2<br />
|
63 |
-
ark survival evolved apk 2.0.1<br />
|
64 |
-
ark survival evolved modded servers android download</p>
|
65 |
-
<ul>
|
66 |
-
<li>Enable unknown sources on your device settings to allow installation from third-party sources</li>
|
67 |
-
<li>Disable any antivirus or security apps that might interfere with the installation process</li>
|
68 |
-
<li>Backup your data before installing the game in case something goes wrong</li>
|
69 |
-
</ul>
|
70 |
-
<h3>The steps to download and install the game from APKCombo</h3>
|
71 |
-
<p>If you want to download and install ARK: Survival Evolved APK all version from APKCombo, a website that provides various versions of Android apps, you can follow these steps:</p>
|
72 |
-
<ol>
|
73 |
-
<li>Go to <a href="(^1^)">APKCombo</a> on your browser</li>
|
74 |
-
<li>Search for ARK: Survival Evolved in the search bar</li>
|
75 |
-
<li>Select the version that you want to download (the latest one is 2.0 a character and choosing a server to join. You can customize your character's appearance, gender, and name. You can also choose between different game modes, such as PvP (player versus player), PvE (player versus environment), or single-player. You can also adjust the difficulty level and the server settings to suit your preferences.</p>
|
76 |
-
<p>Once you enter the game, you will find yourself on a beach with nothing but a pair of underwear and an implant on your arm. You will see a HUD (head-up display) that shows your health, stamina, hunger, thirst, oxygen, weight, and temperature. You will also see a map, a compass, and a hotbar that shows your equipped items. You can use the virtual joystick on the left side of the screen to move around, and the buttons on the right side of the screen to perform actions such as jumping, crouching, punching, picking up items, accessing inventory, crafting, and using items. You can also swipe the screen to look around and pinch to zoom in or out.</p>
|
77 |
-
<h3>The tips and tricks to survive and thrive in the game</h3>
|
78 |
-
<p>ARK: Survival Evolved is a game that challenges you to use your wits and resources to survive in a hostile world. Here are some tips and tricks that can help you along the way:</p>
|
79 |
-
<ul>
|
80 |
-
<li>Gather resources as soon as possible. You will need them to craft essential items such as tools, weapons, clothing, and shelter. You can gather resources by punching trees, rocks, bushes, and corpses. You can also use tools such as axes, picks, spears, and knives to gather more efficiently.</li>
|
81 |
-
<li>Craft items as soon as possible. You will need them to protect yourself from the elements, predators, and other players. You can craft items by accessing your inventory and selecting the engrams (blueprints) that you have learned. You can learn new engrams by leveling up your character and spending points on them. Some of the items that you should craft early on are a stone hatchet, a stone pick, a spear, a torch, a campfire, a sleeping bag, and a thatch hut.</li>
|
82 |
-
<li>Level up your character as soon as possible. You will need to improve your stats and skills to survive longer and better. You can level up your character by gaining experience from various activities such as gathering resources, crafting items, killing creatures, and exploring the map. You can spend points on increasing your health, stamina, oxygen, food, water, weight, melee damage, movement speed, crafting speed, fortitude, or torpidity. You can also learn new engrams to craft more advanced items.</li>
|
83 |
-
<li>Tame creatures as soon as possible. You will need them to assist you in combat, transportation, and resource gathering. You can tame creatures by knocking them out with blunt weapons or tranquilizers, and feeding them their preferred food until they become loyal to you. You can also breed creatures to produce offspring with better stats and traits. Some of the creatures that you should tame early on are a dodo, a parasaur, a raptor, a trike, and a pteranodon.</li>
|
84 |
-
<li>Explore the map as soon as possible. You will need to discover new locations, resources, and secrets to progress in the game. You can explore the map by walking, swimming, flying, or riding on your tamed creatures. You can also use a raft or a boat to travel on water. You can find various points of interest on the map, such as caves, ruins, obelisks, beacons, and boss arenas. You can also encounter various events and challenges on the map, such as weather changes, natural disasters, supply drops, and enemy raids.</li>
|
85 |
-
</ul>
|
86 |
-
<h3>The best features and modes of the game</h3>
|
87 |
-
<p>ARK: Survival Evolved is a game that offers you a lot of features and modes to enjoy. Here are some of the best ones:</p>
|
88 |
-
<ul>
|
89 |
-
<li>The multiplayer mode: You can play online with other players from around the world on various servers. You can join or create a tribe with other players to cooperate or compete with other tribes. You can also chat, trade, and ally with other players. You can also participate in various events and activities on the servers, such as raids, wars, tournaments, and quests.</li>
|
90 |
-
<li>The single-player mode: You can play offline by yourself on your own private server. You can customize the server settings to your liking, such as the difficulty level, the spawn rates, the day and night cycle, and the weather effects. You can also pause and save the game at any time. You can also use cheats and commands to modify the game to your advantage.</li>
|
91 |
-
<li>The creative mode: You can play without any restrictions or limitations on your own private server. You can access all the items and engrams in the game without any requirements or costs. You can also spawn any creature or object in the game without any limits or consequences. You can also fly around and explore the map freely. You can use this mode to build amazing structures, create epic scenarios, or just have fun.</li>
|
92 |
-
</ul>
|
93 |
-
<h2>Conclusion</h2>
|
94 |
-
<h3>A summary of the main points and a call to action</h3>
|
95 |
-
<p>ARK: Survival Evolved is a game that lets you experience an amazing adventure in a prehistoric world full of dinosaurs and other creatures. You can download and install ARK: Survival Evolved APK all version on your Android device by following the steps we have shown you in this article. You can also play and enjoy ARK: Survival Evolved on your Android device by following the tips and tricks we have shared with you in this article. ARK: Survival Evolved is a game that will keep you entertained for hours with its stunning graphics, realistic physics, dynamic weather systems, and endless possibilities. So what are you waiting for? Download ARK: Survival Evolved APK all version now and start your journey!</p>
|
96 |
-
<h2>FAQs</h2>
|
97 |
-
<p>Here are some frequently asked questions about ARK: Survival Evolved APK all version:</p>
|
98 |
-
<ol>
|
99 |
-
<li>Is ARK: Survival Evolved APK all version safe to download and install?</li>
|
100 |
-
<p>Yes, ARK: Survival Evolved APK all version is safe to download and install as long as you get it from a trusted source such as APKCombo or Google Play Store. However, you should always be careful when downloading and installing any app from third-party sources, as they might contain malware or viruses that could harm your device.</p>
|
101 |
-
<li>Is ARK: Survival Evolved APK all version free to play?</li>
|
102 |
-
<p>Yes, ARK: Survival Evolved APK all version is free to play on your Android device. However, the game does offer some in-app purchases that can enhance your gameplay experience, such as premium currency (amber), skins, items, and expansions. You can also watch ads to earn some free amber or use a modded version of the game to get unlimited amber. However, we do not recommend using any modded or hacked version of the game, as they might cause errors, crashes, or bans.</p>
|
103 |
-
<li>How can I update ARK: Survival Evolved APK all version?</li>
|
104 |
-
<p>You can update ARK: Survival Evolved APK all version by following the same steps that you used to download and install it. You can check for updates on APKCombo or Google Play Store and download and install the latest version of the game. You can also enable automatic updates on your device settings to get the updates as soon as they are available.</p>
|
105 |
-
<li>How can I fix ARK: Survival Evolved APK all version not working or crashing?</li>
|
106 |
-
<p>If you encounter any problems with ARK: Survival Evolved APK all version, such as not working, crashing, freezing, or lagging, you can try some of these solutions:</p>
|
107 |
-
<ul>
|
108 |
-
<li>Restart your device and launch the game again</li>
|
109 |
-
<li>Clear the cache and data of the game from your device settings</li>
|
110 |
-
<li>Reinstall the game from a trusted source</li>
|
111 |
-
<li>Check your internet connection and make sure it is stable and fast</li>
|
112 |
-
<li>Lower the graphics and sound settings of the game from the options menu</li>
|
113 |
-
<li>Contact the developer of the game for support or report a bug</li>
|
114 |
-
</ul>
|
115 |
-
<li>How can I contact the developer of ARK: Survival Evolved APK all version?</li>
|
116 |
-
<p>If you have any questions, feedback, or suggestions for ARK: Survival Evolved APK all version, you can contact the developer of the game by using one of these methods:</p>
|
117 |
-
<ul>
|
118 |
-
<li>Email: [email protected]</li>
|
119 |
-
<li>Website: https://www.playark.com/</li>
|
120 |
-
<li>Facebook: https://www.facebook.com/playark/</li>
|
121 |
-
<li>Twitter: https://twitter.com/survivetheark</li>
|
122 |
-
<li>Discord: https://discord.gg/playark</li>
|
123 |
-
</ul>
|
124 |
-
</ol></p> 401be4b1e0<br />
|
125 |
-
<br />
|
126 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bed Wars MOD APK 2023 Enjoy the Ultimate Blockman GO Experience.md
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Bed Wars MOD APK New Version: Everything You Need to Know</h1>
|
3 |
-
<p>If you are looking for a fun and exciting multiplayer game that challenges your creativity and strategy, then you should try Bed Wars. And if you want to enjoy the game even more, then you should download the latest version of Bed Wars MOD APK. In this article, we will tell you everything you need to know about this amazing game and its modified version.</p>
|
4 |
-
<h2>bed wars mod apk new version</h2><br /><p><b><b>Download Zip</b> ⭐ <a href="https://jinyurl.com/2uNOqW">https://jinyurl.com/2uNOqW</a></b></p><br /><br />
|
5 |
-
<h2>What is Bed Wars?</h2>
|
6 |
-
<p>Bed Wars is a popular mobile game developed by Blockman GO Studio. It is a team-based game where you have to protect your bed from being destroyed by other players while trying to destroy theirs. The game has various modes, maps, and skins to choose from, making it more diverse and interesting.</p>
|
7 |
-
<h3>A fun and strategic multiplayer game</h3>
|
8 |
-
<p>Bed Wars is a game that requires both skill and strategy. You have to work with your teammates to collect resources, build defenses, craft weapons, and attack your enemies. You also have to be careful not to fall into the void or get killed by other players. The game is very engaging and addictive, as you can play with your friends or join random matches online.</p>
|
9 |
-
<h3>How to play Bed Wars</h3>
|
10 |
-
<p>The game is easy to play but hard to master. Here are the basic steps to play Bed Wars:</p>
|
11 |
-
<p>bed wars mod apk unlimited money and gcubes<br />
|
12 |
-
bed wars mod apk latest version 2023 download<br />
|
13 |
-
bed wars mod apk with aimbot and health hack<br />
|
14 |
-
bed wars mod apk free gems and coins<br />
|
15 |
-
bed wars mod apk no root and no ban<br />
|
16 |
-
bed wars mod apk offline and online mode<br />
|
17 |
-
bed wars mod apk for android and ios devices<br />
|
18 |
-
bed wars mod apk unlimited resources and weapons<br />
|
19 |
-
bed wars mod apk new maps and modes<br />
|
20 |
-
bed wars mod apk best strategy and tips<br />
|
21 |
-
bed wars mod apk how to install and play<br />
|
22 |
-
bed wars mod apk review and rating<br />
|
23 |
-
bed wars mod apk update and patch notes<br />
|
24 |
-
bed wars mod apk features and benefits<br />
|
25 |
-
bed wars mod apk download link and guide<br />
|
26 |
-
bed wars mod apk cheats and tricks<br />
|
27 |
-
bed wars mod apk gameplay and graphics<br />
|
28 |
-
bed wars mod apk multiplayer and team mode<br />
|
29 |
-
bed wars mod apk custom skins and characters<br />
|
30 |
-
bed wars mod apk fun and addictive game<br />
|
31 |
-
bed wars mod apk challenges and rewards<br />
|
32 |
-
bed wars mod apk bugs and issues fixed<br />
|
33 |
-
bed wars mod apk support and feedback<br />
|
34 |
-
bed wars mod apk comparison and alternatives<br />
|
35 |
-
bed wars mod apk pros and cons<br />
|
36 |
-
bed wars mod apk requirements and compatibility<br />
|
37 |
-
bed wars mod apk size and performance<br />
|
38 |
-
bed wars mod apk security and privacy<br />
|
39 |
-
bed wars mod apk ads and in-app purchases<br />
|
40 |
-
bed wars mod apk developer and publisher</p>
|
41 |
-
<ul>
|
42 |
-
<li>Select a mode and a map. There are different modes such as solo, duo, squad, and rush. There are also different maps such as castle, pirate ship, sky island, and more.</li>
|
43 |
-
<li>Join a team. You can either create your own team or join an existing one. You can also play solo if you prefer.</li>
|
44 |
-
<li>Protect your bed. Your bed is your respawn point. If your bed is destroyed, you will not be able to respawn if you die. You can use various materials such as wool, wood, stone, iron, and gold to build defenses around your bed.</li>
|
45 |
-
<li>Collect resources. You can collect resources from the generators in your base or in the middle of the map. There are four types of resources: iron, gold, diamond, and emerald. You can use them to buy items from the shop such as blocks, weapons, armor, tools, potions, and more.</li>
|
46 |
-
<li>Destroy other beds. You can use tools such as pickaxes, axes, shears, and TNT to break through the defenses of other beds. You can also use bows, fireballs, ender pearls, and other items to attack from a distance.</li>
|
47 |
-
<li>Eliminate other players. You can use swords, axes, bows, fireballs, potions, and other items to fight other players. You can also knock them off the map or into the void.</li>
|
48 |
-
<li>Win the game. The last team or player standing wins the game.</li>
|
49 |
-
</ul>
|
50 |
-
<h2>What is Bed Wars MOD APK?</h2>
|
51 |
-
<p>Bed Wars MOD APK is a modified version of the original game that gives you some extra features and advantages that are not available in the official version. It is a free and safe way to enhance your gaming experience and have more fun.</p>
|
52 |
-
<h3>A modified version of the original game</h3>
|
53 |
-
<p>Bed Wars MOD APK is not developed by Blockman GO Studio but by independent developers who modify the original game files to create a new version. This means that it is not available on the Google Play Store or the App Store but on third-party websites. You have to download it from a reliable source and install it manually on your device.</p>
|
54 |
-
<h3>The benefits of using Bed Wars MOD APK</h3>
|
55 |
-
<p>There are many benefits of using Bed Wars MOD APK over the official version. Here are some of them:</p>
|
56 |
-
<h4 <h4>Unlimited money and gcubes</h4>
|
57 |
-
<p>One of the benefits of using Bed Wars MOD APK is that you get unlimited money and gcubes, which are the in-game currencies. You can use them to buy anything you want from the shop, such as blocks, weapons, armor, tools, potions, and more. You can also use them to unlock skins, emotes, and other cosmetics. You don't have to worry about running out of money or gcubes ever again.</p>
|
58 |
-
<h4>Unlimited health and aimbot</h4>
|
59 |
-
<p>Another benefit of using Bed Wars MOD APK is that you get unlimited health and aimbot, which are the in-game features. You can use them to survive longer and kill faster in the game. You don't have to worry about losing health or missing shots ever again. You can also use them to troll other players and make them rage quit.</p>
|
60 |
-
<h4>No ads and bugs</h4>
|
61 |
-
<p>A final benefit of using Bed Wars MOD APK is that you get no ads and bugs, which are the in-game annoyances. You don't have to watch any ads or deal with any glitches or errors in the game. You can enjoy a smooth and uninterrupted gaming experience.</p>
|
62 |
-
<h2>How to download and install Bed Wars MOD APK?</h2>
|
63 |
-
<p>If you are interested in downloading and installing Bed Wars MOD APK, then you have to follow some simple steps. However, you also have to take some precautions to avoid any problems or risks.</p>
|
64 |
-
<h3>The steps to follow</h3>
|
65 |
-
<p>Here are the steps to download and install Bed Wars MOD APK:</p>
|
66 |
-
<ol>
|
67 |
-
<li>Go to a trusted website that offers Bed Wars MOD APK. You can search for it on Google or use the link below.</li>
|
68 |
-
<li>Download the APK file to your device. Make sure you have enough storage space and a stable internet connection.</li>
|
69 |
-
<li>Enable the installation of unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and turning it on.</li>
|
70 |
-
<li>Locate the APK file on your device and tap on it to start the installation process.</li>
|
71 |
-
<li>Follow the instructions on the screen and wait for the installation to finish.</li>
|
72 |
-
<li>Launch the game and enjoy the mod features.</li>
|
73 |
-
</ol>
|
74 |
-
<h3>The precautions to take</h3>
|
75 |
-
<p>Here are some precautions to take before downloading and installing Bed Wars MOD APK:</p>
|
76 |
-
<ul>
|
77 |
-
<li>Make sure you download the APK file from a reliable and secure website. Some websites may contain viruses or malware that can harm your device or steal your data.</li>
|
78 |
-
<li>Make sure you have a backup of your original game data. Some mod versions may overwrite or delete your game data, so you may lose your progress or achievements.</li>
|
79 |
-
<li>Make sure you use a VPN or an alternative account when playing online. Some mod versions may be detected by the game servers and result in a ban or a suspension of your account.</li>
|
80 |
-
</ul>
|
81 |
-
<h2>Conclusion</h2>
|
82 |
-
<p>Bed Wars is a fun and exciting multiplayer game that challenges your creativity and strategy. Bed Wars MOD APK is a modified version of the game that gives you some extra features and advantages that are not available in the official version. You can download and install Bed Wars MOD APK by following some simple steps and taking some precautions. However, you should also be aware of the potential risks and consequences of using mod versions of games. We hope this article has helped you learn more about Bed Wars and its mod version.</p>
|
83 |
-
<h2>FAQs</h2>
|
84 |
-
<p>Here are some frequently asked questions about Bed Wars and Bed Wars MOD APK:</p>
|
85 |
-
<h4>Q: Is Bed Wars free to play?</h4>
|
86 |
-
<p>A: Yes, Bed Wars is free to play on both Android and iOS devices. However, it contains some in-app purchases that require real money.</p>
|
87 |
-
<h4>Q: Is Bed Wars MOD APK safe to use?</h4>
|
88 |
-
<p>A: It depends on where you download it from and how you use it. If you download it from a trusted website and use it with caution, then it should be safe to use. However, if you download it from an unknown source or use it recklessly, then it may pose some risks to your device or account.</p>
|
89 |
-
<h4>Q: Can I play Bed Wars with my friends?</h4>
|
90 |
-
<p>A: Yes, you can play Bed Wars with your friends online. You can either create your own team or join an existing one. You can also chat with your teammates and coordinate your actions.</p>
|
91 |
-
<h4>Q: How can I update Bed Wars MOD APK?</h4>
|
92 |
-
<p>A: You can update Bed Wars MOD APK by downloading the latest version from the same website where you downloaded it before. However, you may have to uninstall the previous version first and install the new one manually on your device. You may also have to backup your game data before updating.</p>
|
93 |
-
<h4>Q: What are some tips and tricks to play Bed Wars better?</h4>
|
94 |
-
<p>A: Here are some tips and tricks to play Bed Wars better:</p>
|
95 |
-
<ul>
|
96 |
-
<li>Upgrade your generators as soon as possible. This will help you get more resources faster and buy better items.</li>
|
97 |
-
<li>Use different types of blocks to protect your bed. This will make it harder for your enemies to break through your defenses.</li>
|
98 |
-
<li>Don't forget to buy armor and tools. This will improve your survivability and efficiency in combat.</li>
|
99 |
-
<li>Communicate and cooperate with your teammates. This will help you plan your strategies and coordinate your attacks.</li>
|
100 |
-
<li>Be aware of your surroundings. This will help you avoid traps, ambushes, and sneak attacks.</li>
|
101 |
-
</ul></p> 401be4b1e0<br />
|
102 |
-
<br />
|
103 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Rope Hero Mafia City Wars and Use Your Superpowers to Fight Crime.md
DELETED
@@ -1,207 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Rope Hero Game Download Kar Do: How to Play the Best Superhero Game on Your Device</h1>
|
3 |
-
<p>Do you like superhero games? Do you want to play a game that lets you swing like a spider, fight like a ninja, and drive like a racer? If yes, then you should try the rope hero game. The rope hero game is a 3D action-adventure game that lets you become a super rope hero in a vice city full of crime and chaos. You can use your super powers to help the police and free the city from gangsters, or you can become a villain and cause mayhem and destruction. The choice is yours.</p>
|
4 |
-
<p>In this article, we will tell you everything you need to know about the rope hero game, including what it is, why it is popular, and how to download it on different devices. We will also give you some tips and tricks on how to play the game better and have more fun. So, if you are ready to become a real super rope hero, read on.</p>
|
5 |
-
<h2>rope hero game download kar do</h2><br /><p><b><b>Download</b> ✺ <a href="https://jinyurl.com/2uNPXv">https://jinyurl.com/2uNPXv</a></b></p><br /><br />
|
6 |
-
<h2>What is the Rope Hero Game?</h2>
|
7 |
-
<p>The rope hero game is a 3D third-person shooter with RPG elements that was developed by Naxeex LLC. The game was released in 2016 for Android devices and in 2017 for iOS devices. The game has over 100 million downloads on Google Play Store and over 2 million ratings on App Store.</p>
|
8 |
-
<p>The game is set in a vice city that is controlled by various crime bosses and gangs. You play as a blue super hero who has a super rope that allows him to swing from one building to another, as well as other super powers such as mega jumps, power landings, and gravity gun. You can use your powers to fight against crime or join it.</p>
|
9 |
-
<p>The game features a huge open world that you can explore freely. You can also customize your character with different weapons, accessories, and vehicles. The game has many quests and challenges that you can complete to earn money, experience, and rewards. You can also participate in mini-games such as gangster shootouts, car races, arena battles, ATM hacking, and more <h2>Why is the Rope Hero Game Popular?</h2>
|
10 |
-
<p>The rope hero game is popular because it offers a lot of fun and excitement for the players. Here are some of the reasons why people love playing the rope hero game:</p>
|
11 |
-
<ul>
|
12 |
-
<li>The game has amazing graphics and sound effects that make you feel like you are in a real vice city.</li>
|
13 |
-
<li>The game has a realistic physics engine that allows you to swing, jump, and fly with your super rope.</li>
|
14 |
-
<li>The game has a variety of missions and activities that keep you entertained and challenged.</li>
|
15 |
-
<li>The game has a dynamic gameplay that changes according to your choices and actions.</li>
|
16 |
-
<li>The game has a humorous and witty dialogue that makes you laugh and enjoy the game more.</li>
|
17 |
-
<li>The game has a high replay value that allows you to play the game differently every time.</li>
|
18 |
-
</ul>
|
19 |
-
<p>These are just some of the reasons why the rope hero game is popular among millions of players around the world. If you want to experience the thrill and fun of being a super rope hero, you should download the game now.</p>
|
20 |
-
<h2>How to Download the Rope Hero Game on Different Devices?</h2>
|
21 |
-
<p>The rope hero game is available for free on different devices. You can download the game from the official app stores or from other sources. Here is how to download the rope hero game on different devices:</p>
|
22 |
-
<h3>Android Devices</h3>
|
23 |
-
<p>If you have an Android device, you can download the game from Google Play Store or from other sources. Here are the steps to download the game from Google Play Store:</p>
|
24 |
-
<ol>
|
25 |
-
<li>Open Google Play Store on your device.</li>
|
26 |
-
<li>Search for "rope hero" in the search bar.</li>
|
27 |
-
<li>Select the game with the blue icon and the name "Rope Hero: Vice Town".</li>
|
28 |
-
<li>Tap on "Install" and wait for the game to download and install on your device.</li>
|
29 |
-
<li>Tap on "Open" and enjoy playing the game.</li>
|
30 |
-
</ol>
|
31 |
-
<p>If you want to download the game from other sources, you need to enable unknown sources on your device. Here are the steps to enable unknown sources on your device:</p>
|
32 |
-
<p>rope hero vice town game download kaise kare<br />
|
33 |
-
rope hero mafia city wars game download for android<br />
|
34 |
-
rope hero game download karne ka tarika<br />
|
35 |
-
rope hero game download karna hai<br />
|
36 |
-
rope hero game download karo<br />
|
37 |
-
rope hero game download karne wala<br />
|
38 |
-
rope hero game download karne ki website<br />
|
39 |
-
rope hero game download karne ke liye<br />
|
40 |
-
rope hero game download kare<br />
|
41 |
-
rope hero game download karne ka app<br />
|
42 |
-
rope hero vice town game download for pc<br />
|
43 |
-
rope hero mafia city wars game download for ios<br />
|
44 |
-
rope hero vice town game download apk<br />
|
45 |
-
rope hero mafia city wars game download apk<br />
|
46 |
-
rope hero vice town game download play store<br />
|
47 |
-
rope hero mafia city wars game download play store<br />
|
48 |
-
rope hero vice town game download app store<br />
|
49 |
-
rope hero mafia city wars game download app store<br />
|
50 |
-
rope hero vice town game download free<br />
|
51 |
-
rope hero mafia city wars game download free<br />
|
52 |
-
rope hero vice town game download offline<br />
|
53 |
-
rope hero mafia city wars game download offline<br />
|
54 |
-
rope hero vice town game download online<br />
|
55 |
-
rope hero mafia city wars game download online<br />
|
56 |
-
rope hero vice town game download latest version<br />
|
57 |
-
rope hero mafia city wars game download latest version<br />
|
58 |
-
rope hero vice town game download mod apk<br />
|
59 |
-
rope hero mafia city wars game download mod apk<br />
|
60 |
-
rope hero vice town game download hack version<br />
|
61 |
-
rope hero mafia city wars game download hack version<br />
|
62 |
-
rope hero vice town game download full version<br />
|
63 |
-
rope hero mafia city wars game download full version<br />
|
64 |
-
rope hero vice town game download new version<br />
|
65 |
-
rope hero mafia city wars game download new version<br />
|
66 |
-
rope hero vice town game download old version<br />
|
67 |
-
rope hero mafia city wars game download old version<br />
|
68 |
-
rope hero vice town game download update version<br />
|
69 |
-
rope hero mafia city wars game download update version<br />
|
70 |
-
rope hero vice town game features and reviews<br />
|
71 |
-
rope hero mafia city wars game features and reviews<br />
|
72 |
-
best tips and tricks for playing rope hero vice town game <br />
|
73 |
-
best tips and tricks for playing rope hero mafia city wars game <br />
|
74 |
-
how to play and win in rope hero vice town game <br />
|
75 |
-
how to play and win in rope hero mafia city wars game <br />
|
76 |
-
how to install and uninstall rope hero vice town game <br />
|
77 |
-
how to install and uninstall rope hero mafia city wars game <br />
|
78 |
-
how to solve common problems in rope hero vice town game <br />
|
79 |
-
how to solve common problems in rope hero mafia city wars game</p>
|
80 |
-
<ol>
|
81 |
-
<li>Go to Settings on your device.</li>
|
82 |
-
<li>Tap on Security or Privacy.</li>
|
83 |
-
<li>Find and enable Unknown Sources or Install Unknown Apps.</li>
|
84 |
-
<li>Confirm your choice by tapping OK or Allow.</li>
|
85 |
-
</ol>
|
86 |
-
<p>After enabling unknown sources, you can download the game from any website that offers APK files. Here are some of the websites that offer APK files for the rope hero game:</p>
|
87 |
-
<ul>
|
88 |
-
<li>[APKPure]</li>
|
89 |
-
<li>[APKMonk]</li>
|
90 |
-
<li>[APKMirror]</li>
|
91 |
-
</ul>
|
92 |
-
<p>Here are the steps to download the game from these websites:</p>
|
93 |
-
<ol>
|
94 |
-
<li>Open any of these websites on your browser.</li>
|
95 |
-
<li>Search for "rope hero" in the search bar.</li>
|
96 |
-
<li>Select the game with the blue icon and the name "Rope Hero: Vice Town".</li>
|
97 |
-
<li>Tap on "Download APK" and wait for the file to download on your device.</li>
|
98 |
-
<li>Open the file and tap on "Install" and wait for the game to install on your device.</li>
|
99 |
-
<li>Tap on "Open" and enjoy playing the game.</li>
|
100 |
-
</ol>
|
101 |
-
<h3>iOS Devices</h3>
|
102 |
-
<p>If you have an iOS device, you can download the game from App Store or from other sources. Here are the steps to download the game from App Store:</p>
|
103 |
-
<ol>
|
104 |
-
<li>Open App Store on your device.</li>
|
105 |
-
<li>Search for "rope hero" in the search bar.</li>
|
106 |
-
<li>Select the game with the blue icon and the name "Rope Hero: Vice Town".</li>
|
107 |
-
<li>Tap on "Get" and wait for the game to download and install on your device.</li>
|
108 |
-
<li>Tap on "Open" and enjoy playing the game.</li>
|
109 |
-
</ol>
|
110 |
-
<p>If you want to download the game from other sources, you need to use a third-party app installer such as TutuApp or AppValley. Here are some of the app installers that offer IPA files for the rope hero game:</p>
|
111 |
-
<ul>
|
112 |
-
<li>[TutuApp]</li>
|
113 |
-
<li>[AppValley]</li>
|
114 |
-
</ul>
|
115 |
-
<p>Here are the steps to download the game from these app installers:</p>
|
116 |
-
<ol>
|
117 |
-
<li>Open any of these app installers on your browser.</li>
|
118 |
-
<li>Search for "rope hero" in the search bar.</li>
|
119 |
-
<li>Select the game with the blue icon and the name "Rope Hero: Vice Town".</ <li>Tap on "Install" and wait for the game to download and install on your device.</li>
|
120 |
-
<li>Tap on "Open" and enjoy playing the game.</li>
|
121 |
-
</ol>
|
122 |
-
<p>Note: You may need to trust the app installer on your device before you can use it. To do that, go to Settings > General > Profiles & Device Management and tap on the app installer's profile. Then, tap on "Trust" and confirm your choice.</p>
|
123 |
-
<h3>PC or Mac Devices</h3>
|
124 |
-
<p>If you have a PC or Mac device, you can download the game from BlueStacks or other emulators. BlueStacks is a software that allows you to run Android apps on your PC or Mac. Here are the steps to download the game from BlueStacks:</p>
|
125 |
-
<ol>
|
126 |
-
<li>Download and install BlueStacks on your PC or Mac from [here](https://www.bluestacks.com/).</li>
|
127 |
-
<li>Open BlueStacks and sign in with your Google account.</li>
|
128 |
-
<li>Search for "rope hero" in the search bar.</li>
|
129 |
-
<li>Select the game with the blue icon and the name "Rope Hero: Vice Town".</li>
|
130 |
-
<li>Click on "Install" and wait for the game to download and install on your device.</li>
|
131 |
-
<li>Click on "Open" and enjoy playing the game.</li>
|
132 |
-
</ol>
|
133 |
-
<p>If you want to use other emulators, you can check out this [list](https://www.androidauthority.com/best-android-emulators-for-pc-655308/) of the best Android emulators for PC and Mac.</p>
|
134 |
-
<h2>How to Play the Rope Hero Game?</h2>
|
135 |
-
<p>Now that you have downloaded the rope hero game on your device, you are ready to play it. The game is easy to play, but it also has some challenges and surprises that make it more interesting. Here is how to play the rope hero game:</p>
|
136 |
-
<p>The game has a basic tutorial that teaches you how to use your super powers, fight enemies, and complete quests. You can access the tutorial by tapping on the question mark icon on the top left corner of the screen. You can also skip the tutorial if you want to learn by yourself.</p>
|
137 |
-
<p>The game has a simple control scheme that allows you to move, jump, swing, shoot, and interact with objects. You can use the virtual joystick on the left side of the screen to move your character. You can use the buttons on the right side of the screen to perform different actions. Here are some of the buttons and their functions:</p>
|
138 |
-
<ul>
|
139 |
-
<li>The jump button lets you jump over obstacles and perform mega jumps.</li>
|
140 |
-
<li>The rope button lets you shoot your super rope and swing from one place to another.</li>
|
141 |
-
<li>The fire button lets you shoot your weapon or use your gravity gun.</li>
|
142 |
-
<li>The aim button lets you aim your weapon or gravity gun more precisely.</li>
|
143 |
-
<li>The reload button lets you reload your weapon or gravity gun.</li>
|
144 |
-
<li>The action button lets you interact with objects, vehicles, people, and animals.</li>
|
145 |
-
</ul>
|
146 |
-
<p>The game has a map that shows you where you are, where your enemies are, and where your quests are. You can access the map by tapping on the map icon on the top right corner of the screen. You can also zoom in and out of the map by pinching the screen.</p>
|
147 |
-
<p>The game has a menu that lets you customize your character, check your inventory, view your skills, change your settings, and more. You can access the menu by tapping on the menu icon on the top left corner of the screen. Here are some of the options in the menu:</p>
|
148 |
-
<ul>
|
149 |
-
<li>The character option lets you choose your weapons, accessories, and vehicles.</li>
|
150 |
-
<li>The inventory option lets you see what items you have collected or bought.</li>
|
151 |
-
<li>The skills option lets you see what skills you have learned or upgraded.</li>
|
152 |
-
<li>The settings option lets you adjust the sound, graphics, controls, and language of the game.</li>
|
153 |
-
<li>The help option lets you see some tips and tricks for playing the game better.</li>
|
154 |
-
</ul>
|
155 |
-
<p>The game has a lot of quests and challenges that you can complete to earn money, experience, and rewards. You can find quests by looking for yellow exclamation marks on the map or by talking to people with yellow speech bubbles above their heads. You can find challenges by looking for red skulls on the map or by participating in mini-games such as gangster shootouts, car races, arena battles, ATM hacking, and more. You can also create your own quests and challenges by using your imagination and creativity.</p> <h2>How to Use Your Super Powers?</h2>
|
156 |
-
<p>One of the best things about the rope hero game is that you can use your super powers to do amazing things. Here are some of the super powers that you have and how to use them:</p>
|
157 |
-
<ul>
|
158 |
-
<li>Your super rope lets you swing from one building to another like a spider. You can use it to travel faster, escape from enemies, or reach high places. To use your super rope, tap on the rope button and aim at a building or object. Then, tap on the rope button again to release the rope and swing.</li>
|
159 |
-
<li>Your mega jumps let you leap over obstacles and perform power landings. You can use them to jump over cars, walls, or enemies, or to land with a shockwave that damages nearby enemies. To use your mega jumps, tap on the jump button and hold it until the power meter is full. Then, release the jump button and watch your character soar.</li>
|
160 |
-
<li>Your gravity gun lets you manipulate objects and enemies with a powerful force. You can use it to grab, throw, or crush objects or enemies, or to create explosions or implosions. To use your gravity gun, tap on the fire button and aim at an object or enemy. Then, tap on the fire button again to release the object or enemy.</li>
|
161 |
-
</ul>
|
162 |
-
<p>These are just some of the super powers that you have in the game. You can also unlock more super powers as you level up and upgrade your skills. You can also combine your super powers with your weapons and vehicles to create more awesome effects.</p>
|
163 |
-
<h2>How to Complete Quests and Challenges?</h2>
|
164 |
-
<p>The rope hero game has a lot of quests and challenges that you can complete to earn money, experience, and rewards. Quests are missions that have a story and a goal. Challenges are activities that have a time limit or a score limit. Here are some of the quests and challenges that you can find in the game:</p>
|
165 |
-
<ul>
|
166 |
-
<li>Gangster Shootouts: These are quests where you have to fight against gangsters who are attacking you or other people. You can use your super powers, weapons, or vehicles to defeat them. You can also use the environment to your advantage by swinging, jumping, or throwing objects at them.</li>
|
167 |
-
<li>Car Races: These are challenges where you have to race against other drivers on the streets. You can use your super powers, weapons, or vehicles to win the race. You can also use shortcuts, ramps, or obstacles to gain an edge over your opponents.</li>
|
168 |
-
<li>Arena Battles: These are challenges where you have to fight against other super heroes in an arena. You can use your super powers, weapons, or vehicles to beat them. You can also use traps, power-ups, or spectators to help you.</li>
|
169 |
-
<li>ATM Hacking: These are quests where you have to hack into ATMs and steal money from them. You can use your super powers, weapons, or vehicles to hack into them. You can also use stealth, distraction, or disguise to avoid detection.</li>
|
170 |
-
<li>And more: There are many more quests and challenges that you can find in the game. You can also create your own quests and challenges by using your imagination and creativity.</li>
|
171 |
-
</ul>
|
172 |
-
<p>To complete quests and challenges, you have to follow the instructions and objectives that are given to you. You can also check your progress and status by tapping on the quest icon or challenge icon on the top right corner of the screen.</p>
|
173 |
-
<h2>How to Customize Your Character and Vehicles?</h2>
|
174 |
-
<p>The rope hero game also lets you customize your character and vehicles with different weapons, accessories, and colors. You can make your character look cool, funny, or scary with different outfits, masks, hats, glasses, and more. You can also make your vehicles look fast, furious, or fancy with different models, parts, stickers, and more.</p>
|
175 |
-
<p>To customize your character and vehicles, you have to go to the shop or garage in the game. You can find them by looking for green dollar signs on the map or by tapping on the menu icon and selecting the character option or the inventory option.</p>
|
176 |
-
<p>In the shop or garage, you can buy new weapons, accessories, and vehicles with money that you earn from completing quests and challenges. You can also sell old weapons, accessories, and vehicles that you don't need anymore.</p>
|
177 |
-
<p>In the shop or garage, you can also change the color of your weapons, accessories and vehicles with different colors that you like. You can also preview how your character and vehicles will look like before you buy or change them.</p>
|
178 |
-
<p>To customize your character and vehicles, you have to tap on the item that you want to buy or change and then tap on the buy button or the color button. You can also tap on the preview button to see how your character and vehicles will look like.</p>
|
179 |
-
<p>Customizing your character and vehicles is not only fun, but also useful. Different weapons, accessories, and vehicles have different stats and effects that can help you in your quests and challenges. For example, some weapons have more damage, some accessories have more defense, and some vehicles have more speed. You can check the stats and effects of each item by tapping on the info button.</p>
|
180 |
-
<h2>Tips and Tricks for Playing the Rope Hero Game</h2>
|
181 |
-
<p>The rope hero game is easy to play, but it also has some tips and tricks that can help you play better and have more fun. Here are some of the tips and tricks that we recommend:</p>
|
182 |
-
<ul>
|
183 |
-
<li>Use your super powers wisely. Your super powers are awesome, but they also have a cooldown time that prevents you from using them too often. You can see the cooldown time by looking at the blue bar below each button. You can also reduce the cooldown time by upgrading your skills in the menu.</li>
|
184 |
-
<li>Save money and resources. Money and resources are important in the game, as they allow you to buy new weapons, accessories, and vehicles, as well as upgrade your skills. You can save money and resources by completing quests and challenges, finding hidden collectibles, avoiding unnecessary expenses, and selling old items.</li>
|
185 |
-
<li>Level up faster. Leveling up is important in the game, as it allows you to unlock new super powers, skills, weapons, accessories, and vehicles. You can level up faster by gaining more experience from completing quests and challenges, fighting enemies, using your super powers, and participating in mini-games.</li>
|
186 |
-
<li>Have more fun. The game is meant to be fun and entertaining, so don't be afraid to use your creativity, imagination, and humor to enjoy the game more. You can do things like swinging from a helicopter, jumping from a skyscraper, throwing a car at a gangster, or wearing a funny outfit.</li>
|
187 |
-
</ul>
|
188 |
-
<h2>Conclusion</h2>
|
189 |
-
<p>The rope hero game is one of the best superhero games that you can play on your device. It lets you become a super rope hero in a vice city full of crime and chaos. You can use your super powers to help the police and free the city from gangsters, or you can become a villain and cause mayhem and destruction. The choice is yours.</p>
|
190 |
-
<p>The game has amazing graphics, realistic physics, dynamic gameplay, humorous dialogue, high replay value, and a lot of fun and excitement. You can also customize your character and vehicles with different weapons, accessories, and colors. You can also complete quests and challenges to earn money, experience, and rewards. You can also create your own quests and challenges by using your imagination and creativity.</p>
|
191 |
-
<p>If you are looking for a game that lets you swing like a spider, fight like a ninja, and drive like a racer, you should download the rope hero game now. It is free to download on different devices. You will not regret it.</p>
|
192 |
-
<h2>FAQs</h2>
|
193 |
-
<p>Here are some of the frequently asked questions about the rope hero game:</p>
|
194 |
-
<ol>
|
195 |
-
<li>Q: Is the rope hero game safe to download?</li>
|
196 |
-
<li>A: Yes, the rope hero game is safe to download from the official app stores or from other sources that offer APK or IPA files. However, you should always be careful when downloading apps from unknown sources and check for viruses or malware before installing them.</li>
|
197 |
-
<li>Q: Is the rope hero game online or offline?</li>
|
198 |
-
<li>A: The rope hero game is mostly offline, meaning that you can play it without an internet connection. However, some features of the game may require an internet connection such as watching ads to get free rewards or accessing online leaderboards.</li>
|
199 |
-
<li>Q: How do I update the rope hero game?</li>
|
200 |
-
<li>A: The rope hero game updates automatically when there is a new version available. However, you can also check for updates manually by going to the app store or website where you downloaded the game and tapping on the update button.</li>
|
201 |
-
<li>Q: How do I contact the developers of the rope hero game?</li>
|
202 |
-
<li>A: You can contact the developers of the rope hero game by sending them an email at [email protected] or by visiting their website at https://naxeex.com/.</li>
|
203 |
-
<li>Q: How do I share my feedback or suggestions for the rope hero game?</li>
|
204 |
-
<li>A: You can share your feedback or suggestions for the rope hero game by leaving a review or rating on the app store or website where you downloaded the game. You can also join the rope hero game community on social media platforms such as Facebook, Twitter, Instagram, or YouTube and share your feedback or suggestions there.</li>
|
205 |
-
</ol></p> 197e85843d<br />
|
206 |
-
<br />
|
207 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FNaF x Brawl Stars Download the APK and Join the Fun.md
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is FNAF Brawl Stars APK?</h1>
|
3 |
-
<p>FNAF Brawl Stars APK is a fan-made game that combines the characters and gameplay of two popular games: Five Nights at Freddy's (FNAF) and Brawl Stars. FNAF is a horror game series that features animatronic robots that come to life at night and try to kill you. Brawl Stars is a multiplayer game that features various modes of 3v3 battles with different characters that have unique abilities.</p>
|
4 |
-
<p>FNAF Brawl Stars APK is a game that lets you play as your favorite FNAF characters in a Brawl Stars style. You can choose from Freddy, Bonnie, Chica, Foxy, Golden Freddy, and more. You can also customize your character with skins and gadgets. You can play solo or with your friends in different game modes such as Gem Grab, Showdown, Bounty, Heist, and more. You can also compete in the Championship Challenge and climb the leaderboards.</p>
|
5 |
-
<h2>fnaf brawl stars apk</h2><br /><p><b><b>Download Zip</b> ===== <a href="https://jinyurl.com/2uNUsA">https://jinyurl.com/2uNUsA</a></b></p><br /><br />
|
6 |
-
<h2>FNAF Brawl Stars APK Features</h2>
|
7 |
-
<p>FNAF Brawl Stars APK has many features that make it an exciting and fun game to play. Here are some of them:</p>
|
8 |
-
<ul>
|
9 |
-
<li>It has high-quality graphics and sound effects that create a spooky and immersive atmosphere.</li>
|
10 |
-
<li>It has a variety of game modes that offer different challenges and strategies.</li>
|
11 |
-
<li>It has a large roster of characters that have different skills and personalities.</li>
|
12 |
-
<li>It has a customization system that allows you to change your character's appearance and performance.</li>
|
13 |
-
<li>It has a social aspect that lets you chat with other players and join clubs.</li>
|
14 |
-
<li>It has frequent updates that add new content and features.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to Download and Install FNAF Brawl Stars APK</h3>
|
17 |
-
<p>If you want to try FNAF Brawl Stars APK, you need to download it from a third-party source since it is not available on the official app stores. Here are the steps to download and install FNAF Brawl Stars APK:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Go to [Game Jolt](^1^) and search for FNAF Brawl Stars.</li>
|
20 |
-
<li>Select the game and click on Download. You will get a file named Fnaf_stars.apk.</li>
|
21 |
-
<li>Before installing the file, you need to enable Unknown Sources on your device. Go to Settings > Security > Unknown Sources and toggle it on.</li>
|
22 |
-
<li>Locate the Fnaf_stars.apk file on your device and tap on it to install it.</li>
|
23 |
-
<li>Wait for the installation to finish and then launch the game.</li>
|
24 |
-
</ol>
|
25 |
-
<h3>How to Play FNAF Brawl Stars APK</h3>
|
26 |
-
<p>FNAF Brawl Stars APK is easy to play once you get familiar with the controls and mechanics. Here are some tips on how to play FNAF Brawl Stars APK:</p>
|
27 |
-
<ul>
|
28 |
-
<li>To move your character, use the joystick on the left side of the screen.</li>
|
29 |
-
<li>To attack your enemies, use the button on the right side of the screen. You can also drag it to aim your shots.</li>
|
30 |
-
<li>To use your character's Super ability, use the button on the bottom right corner of the screen. You need to charge it up by hitting enemies or collecting gems.</li>
|
31 |
-
<li>To use your character's Gadget, use the button on the top right corner of the screen. You have a limited number of uses per match.</li>
|
32 |
-
<li>To switch between different game modes, use the menu on the bottom left corner of the screen.</li>
|
33 |
-
<li>To access your profile, shop, club, news, settings, and more, use the menu on the top left corner of the screen.</li>
|
34 |
-
</ul>
|
35 |
-
<h2>FNAF Brawl Stars APK Review</h2 <h3>Pros and Cons of FNAF Brawl Stars APK</h3>
|
36 |
-
<p>FNAF Brawl Stars APK is a game that has both advantages and disadvantages. Here are some of them:</p>
|
37 |
-
<table>
|
38 |
-
<tr>
|
39 |
-
<th>Pros</th>
|
40 |
-
<th>Cons</th>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td>It is free to play and download.</td>
|
44 |
-
<td>It is not an official game and may have bugs and glitches.</td>
|
45 |
-
</tr>
|
46 |
-
<tr>
|
47 |
-
<td>It is fun and addictive to play with different characters and modes.</td>
|
48 |
-
<td>It may be too scary or violent for some players.</td>
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
<td>It has a large fan community that supports the game.</td>
|
52 |
-
<td>It may not be compatible with some devices or regions.</td>
|
53 |
-
</tr>
|
54 |
-
</table>
|
55 |
-
<h3>User Ratings and Feedback</h3>
|
56 |
-
<p>FNAF Brawl Stars APK has received mostly positive ratings and feedback from the users who have played it. Here are some of the comments from the Game Jolt page:</p>
|
57 |
-
<p>fnaf brawl stars mod apk download<br />
|
58 |
-
fnaf x brawl stars fan game<br />
|
59 |
-
fnaf stars game jolt<br />
|
60 |
-
fnaf brawl stars apk free<br />
|
61 |
-
fnaf brawl stars android<br />
|
62 |
-
fnaf brawl stars online<br />
|
63 |
-
fnaf brawl stars characters<br />
|
64 |
-
fnaf brawl stars gameplay<br />
|
65 |
-
fnaf brawl stars update<br />
|
66 |
-
fnaf brawl stars skins<br />
|
67 |
-
fnaf brawl stars hack<br />
|
68 |
-
fnaf brawl stars cheats<br />
|
69 |
-
fnaf brawl stars tips<br />
|
70 |
-
fnaf brawl stars guide<br />
|
71 |
-
fnaf brawl stars review<br />
|
72 |
-
fnaf brawl stars trailer<br />
|
73 |
-
fnaf brawl stars wiki<br />
|
74 |
-
fnaf brawl stars reddit<br />
|
75 |
-
fnaf brawl stars discord<br />
|
76 |
-
fnaf brawl stars youtube<br />
|
77 |
-
fnaf brawl stars fan art<br />
|
78 |
-
fnaf brawl stars memes<br />
|
79 |
-
fnaf brawl stars theories<br />
|
80 |
-
fnaf brawl stars lore<br />
|
81 |
-
fnaf brawl stars easter eggs<br />
|
82 |
-
fnaf brawl stars secrets<br />
|
83 |
-
fnaf brawl stars glitches<br />
|
84 |
-
fnaf brawl stars bugs<br />
|
85 |
-
fnaf brawl stars maps<br />
|
86 |
-
fnaf brawl stars modes<br />
|
87 |
-
fnaf brawl stars events<br />
|
88 |
-
fnaf brawl stars challenges<br />
|
89 |
-
fnaf brawl stars quests<br />
|
90 |
-
fnaf brawl stars trophies<br />
|
91 |
-
fnaf brawl stars ranks<br />
|
92 |
-
fnaf brawl stars rewards<br />
|
93 |
-
fnaf brawl stars gems<br />
|
94 |
-
fnaf brawl stars coins<br />
|
95 |
-
fnaf brawl stars boxes<br />
|
96 |
-
fnaf brawl stars brawlers<br />
|
97 |
-
fnaf brawl stars star powers<br />
|
98 |
-
fnaf brawl stars gadgets<br />
|
99 |
-
fnaf brawl stars pins<br />
|
100 |
-
fnaf brawl stars stickers<br />
|
101 |
-
fnaf brawl stars emotes<br />
|
102 |
-
fnaf brawl stars voice lines<br />
|
103 |
-
fnaf brawl stars soundtracks<br />
|
104 |
-
fnaf brawl stars wallpapers</p>
|
105 |
-
<blockquote>"This game is awesome! I love how you can play as FNAF characters in Brawl Stars. It's like a dream come true!" - FoxyFan123</blockquote>
|
106 |
-
<blockquote>"This game is very well made and fun to play. The graphics and sound effects are amazing. The gameplay is smooth and challenging. I recommend this game to anyone who likes FNAF and Brawl Stars." - GamerGirl456</blockquote>
|
107 |
-
<blockquote>"This game is good but it needs some improvements. The loading time is too long and sometimes the game crashes. The characters are also unbalanced and some of them are too OP. Please fix these issues." - BrawlerBoy789</blockquote>
|
108 |
-
<h2>Conclusion</h2>
|
109 |
-
<p>FNAF Brawl Stars APK is a fan-made game that combines the elements of FNAF and Brawl Stars. It is a game that offers a lot of fun and excitement for the fans of both games. It has many features, such as high-quality graphics, various game modes, customization options, social interaction, and more. It also has some drawbacks, such as being unofficial, having bugs and glitches, being too scary or violent, and being incompatible with some devices or regions. Overall, FNAF Brawl Stars APK is a game that is worth trying if you are looking for a new and unique gaming experience.</p>
|
110 |
-
<h3>FAQs</h3>
|
111 |
-
<p>Here are some frequently asked questions about FNAF Brawl Stars APK:</p>
|
112 |
-
<ul>
|
113 |
-
<li><b>Q: Is FNAF Brawl Stars APK safe to download and play?</b></li>
|
114 |
-
<li>A: FNAF Brawl Stars APK is safe to download and play as long as you get it from a trusted source, such as Game Jolt. However, you should always be careful when downloading files from unknown sources and scan them for viruses or malware before installing them.</li>
|
115 |
-
<li><b>Q: How can I update FNAF Brawl Stars APK?</b></li>
|
116 |
-
<li>A: FNAF Brawl Stars APK is updated regularly by the developer, who posts the latest version on Game Jolt. You can check the page for any updates and download them manually. You can also follow the developer on Game Jolt or Twitter to get notified of any updates.</li>
|
117 |
-
<li><b>Q: How can I contact the developer of FNAF Brawl Stars APK?</b></li>
|
118 |
-
<li>A: You can contact the developer of FNAF Brawl Stars APK by leaving a comment on the Game Jolt page or by sending a message on Twitter (@FnafStars). The developer is very responsive and friendly and will answer any questions or feedback you have about the game.</li>
|
119 |
-
<li><b>Q: How can I support the development of FNAF Brawl Stars APK?</b></li>
|
120 |
-
<li>A: You can support the development of FNAF Brawl Stars APK by playing the game, sharing it with your friends, leaving a rating and review on Game Jolt, donating to the developer via PayPal or Patreon, or joining the Discord server for the game.</li>
|
121 |
-
<li><b>Q: What are some similar games to FNAF Brawl Stars APK?</b></li>
|
122 |
-
<li>A: Some similar games to FNAF Brawl Stars APK are FNAF World, which is an official RPG spin-off of FNAF, Super FNaF 2: Wonderful Day, which is a fan-made platformer game based on FNAF, and Freddy in Space 2, which is an official shooter game based on FNAF.</li>
|
123 |
-
</ul></p> 401be4b1e0<br />
|
124 |
-
<br />
|
125 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/bisenet/resnet.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
#!/usr/bin/python
|
2 |
-
# -*- encoding: utf-8 -*-
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import torch.utils.model_zoo as modelzoo
|
8 |
-
|
9 |
-
# from modules.bn import InPlaceABNSync as BatchNorm2d
|
10 |
-
|
11 |
-
resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
|
12 |
-
|
13 |
-
|
14 |
-
def conv3x3(in_planes, out_planes, stride=1):
|
15 |
-
"""3x3 convolution with padding"""
|
16 |
-
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
17 |
-
padding=1, bias=False)
|
18 |
-
|
19 |
-
|
20 |
-
class BasicBlock(nn.Module):
|
21 |
-
def __init__(self, in_chan, out_chan, stride=1):
|
22 |
-
super(BasicBlock, self).__init__()
|
23 |
-
self.conv1 = conv3x3(in_chan, out_chan, stride)
|
24 |
-
self.bn1 = nn.BatchNorm2d(out_chan)
|
25 |
-
self.conv2 = conv3x3(out_chan, out_chan)
|
26 |
-
self.bn2 = nn.BatchNorm2d(out_chan)
|
27 |
-
self.relu = nn.ReLU(inplace=True)
|
28 |
-
self.downsample = None
|
29 |
-
if in_chan != out_chan or stride != 1:
|
30 |
-
self.downsample = nn.Sequential(
|
31 |
-
nn.Conv2d(in_chan, out_chan,
|
32 |
-
kernel_size=1, stride=stride, bias=False),
|
33 |
-
nn.BatchNorm2d(out_chan),
|
34 |
-
)
|
35 |
-
|
36 |
-
def forward(self, x):
|
37 |
-
residual = self.conv1(x)
|
38 |
-
residual = F.relu(self.bn1(residual))
|
39 |
-
residual = self.conv2(residual)
|
40 |
-
residual = self.bn2(residual)
|
41 |
-
|
42 |
-
shortcut = x
|
43 |
-
if self.downsample is not None:
|
44 |
-
shortcut = self.downsample(x)
|
45 |
-
|
46 |
-
out = shortcut + residual
|
47 |
-
out = self.relu(out)
|
48 |
-
return out
|
49 |
-
|
50 |
-
|
51 |
-
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
|
52 |
-
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
|
53 |
-
for i in range(bnum-1):
|
54 |
-
layers.append(BasicBlock(out_chan, out_chan, stride=1))
|
55 |
-
return nn.Sequential(*layers)
|
56 |
-
|
57 |
-
|
58 |
-
class Resnet18(nn.Module):
|
59 |
-
def __init__(self):
|
60 |
-
super(Resnet18, self).__init__()
|
61 |
-
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
|
62 |
-
bias=False)
|
63 |
-
self.bn1 = nn.BatchNorm2d(64)
|
64 |
-
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
65 |
-
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
|
66 |
-
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
|
67 |
-
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
|
68 |
-
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
|
69 |
-
self.init_weight()
|
70 |
-
|
71 |
-
def forward(self, x):
|
72 |
-
x = self.conv1(x)
|
73 |
-
x = F.relu(self.bn1(x))
|
74 |
-
x = self.maxpool(x)
|
75 |
-
|
76 |
-
x = self.layer1(x)
|
77 |
-
feat8 = self.layer2(x) # 1/8
|
78 |
-
feat16 = self.layer3(feat8) # 1/16
|
79 |
-
feat32 = self.layer4(feat16) # 1/32
|
80 |
-
return feat8, feat16, feat32
|
81 |
-
|
82 |
-
def init_weight(self):
|
83 |
-
state_dict = modelzoo.load_url(resnet18_url)
|
84 |
-
self_state_dict = self.state_dict()
|
85 |
-
for k, v in state_dict.items():
|
86 |
-
if 'fc' in k: continue
|
87 |
-
self_state_dict.update({k: v})
|
88 |
-
self.load_state_dict(self_state_dict)
|
89 |
-
|
90 |
-
def get_params(self):
|
91 |
-
wd_params, nowd_params = [], []
|
92 |
-
for name, module in self.named_modules():
|
93 |
-
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
94 |
-
wd_params.append(module.weight)
|
95 |
-
if not module.bias is None:
|
96 |
-
nowd_params.append(module.bias)
|
97 |
-
elif isinstance(module, nn.BatchNorm2d):
|
98 |
-
nowd_params += list(module.parameters())
|
99 |
-
return wd_params, nowd_params
|
100 |
-
|
101 |
-
|
102 |
-
if __name__ == "__main__":
|
103 |
-
net = Resnet18()
|
104 |
-
x = torch.randn(16, 3, 224, 224)
|
105 |
-
out = net(x)
|
106 |
-
print(out[0].size())
|
107 |
-
print(out[1].size())
|
108 |
-
print(out[2].size())
|
109 |
-
net.get_params()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/44ov41za8i/FreeVC/speaker_encoder/preprocess.py
DELETED
@@ -1,285 +0,0 @@
|
|
1 |
-
from multiprocess.pool import ThreadPool
|
2 |
-
from speaker_encoder.params_data import *
|
3 |
-
from speaker_encoder.config import librispeech_datasets, anglophone_nationalites
|
4 |
-
from datetime import datetime
|
5 |
-
from speaker_encoder import audio
|
6 |
-
from pathlib import Path
|
7 |
-
from tqdm import tqdm
|
8 |
-
import numpy as np
|
9 |
-
|
10 |
-
|
11 |
-
class DatasetLog:
|
12 |
-
"""
|
13 |
-
Registers metadata about the dataset in a text file.
|
14 |
-
"""
|
15 |
-
def __init__(self, root, name):
|
16 |
-
self.text_file = open(Path(root, "Log_%s.txt" % name.replace("/", "_")), "w")
|
17 |
-
self.sample_data = dict()
|
18 |
-
|
19 |
-
start_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M"))
|
20 |
-
self.write_line("Creating dataset %s on %s" % (name, start_time))
|
21 |
-
self.write_line("-----")
|
22 |
-
self._log_params()
|
23 |
-
|
24 |
-
def _log_params(self):
|
25 |
-
from speaker_encoder import params_data
|
26 |
-
self.write_line("Parameter values:")
|
27 |
-
for param_name in (p for p in dir(params_data) if not p.startswith("__")):
|
28 |
-
value = getattr(params_data, param_name)
|
29 |
-
self.write_line("\t%s: %s" % (param_name, value))
|
30 |
-
self.write_line("-----")
|
31 |
-
|
32 |
-
def write_line(self, line):
|
33 |
-
self.text_file.write("%s\n" % line)
|
34 |
-
|
35 |
-
def add_sample(self, **kwargs):
|
36 |
-
for param_name, value in kwargs.items():
|
37 |
-
if not param_name in self.sample_data:
|
38 |
-
self.sample_data[param_name] = []
|
39 |
-
self.sample_data[param_name].append(value)
|
40 |
-
|
41 |
-
def finalize(self):
|
42 |
-
self.write_line("Statistics:")
|
43 |
-
for param_name, values in self.sample_data.items():
|
44 |
-
self.write_line("\t%s:" % param_name)
|
45 |
-
self.write_line("\t\tmin %.3f, max %.3f" % (np.min(values), np.max(values)))
|
46 |
-
self.write_line("\t\tmean %.3f, median %.3f" % (np.mean(values), np.median(values)))
|
47 |
-
self.write_line("-----")
|
48 |
-
end_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M"))
|
49 |
-
self.write_line("Finished on %s" % end_time)
|
50 |
-
self.text_file.close()
|
51 |
-
|
52 |
-
|
53 |
-
def _init_preprocess_dataset(dataset_name, datasets_root, out_dir) -> (Path, DatasetLog):
|
54 |
-
dataset_root = datasets_root.joinpath(dataset_name)
|
55 |
-
if not dataset_root.exists():
|
56 |
-
print("Couldn\'t find %s, skipping this dataset." % dataset_root)
|
57 |
-
return None, None
|
58 |
-
return dataset_root, DatasetLog(out_dir, dataset_name)
|
59 |
-
|
60 |
-
|
61 |
-
def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, extension,
|
62 |
-
skip_existing, logger):
|
63 |
-
print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs)))
|
64 |
-
|
65 |
-
# Function to preprocess utterances for one speaker
|
66 |
-
def preprocess_speaker(speaker_dir: Path):
|
67 |
-
# Give a name to the speaker that includes its dataset
|
68 |
-
speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts)
|
69 |
-
|
70 |
-
# Create an output directory with that name, as well as a txt file containing a
|
71 |
-
# reference to each source file.
|
72 |
-
speaker_out_dir = out_dir.joinpath(speaker_name)
|
73 |
-
speaker_out_dir.mkdir(exist_ok=True)
|
74 |
-
sources_fpath = speaker_out_dir.joinpath("_sources.txt")
|
75 |
-
|
76 |
-
# There's a possibility that the preprocessing was interrupted earlier, check if
|
77 |
-
# there already is a sources file.
|
78 |
-
if sources_fpath.exists():
|
79 |
-
try:
|
80 |
-
with sources_fpath.open("r") as sources_file:
|
81 |
-
existing_fnames = {line.split(",")[0] for line in sources_file}
|
82 |
-
except:
|
83 |
-
existing_fnames = {}
|
84 |
-
else:
|
85 |
-
existing_fnames = {}
|
86 |
-
|
87 |
-
# Gather all audio files for that speaker recursively
|
88 |
-
sources_file = sources_fpath.open("a" if skip_existing else "w")
|
89 |
-
for in_fpath in speaker_dir.glob("**/*.%s" % extension):
|
90 |
-
# Check if the target output file already exists
|
91 |
-
out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts)
|
92 |
-
out_fname = out_fname.replace(".%s" % extension, ".npy")
|
93 |
-
if skip_existing and out_fname in existing_fnames:
|
94 |
-
continue
|
95 |
-
|
96 |
-
# Load and preprocess the waveform
|
97 |
-
wav = audio.preprocess_wav(in_fpath)
|
98 |
-
if len(wav) == 0:
|
99 |
-
continue
|
100 |
-
|
101 |
-
# Create the mel spectrogram, discard those that are too short
|
102 |
-
frames = audio.wav_to_mel_spectrogram(wav)
|
103 |
-
if len(frames) < partials_n_frames:
|
104 |
-
continue
|
105 |
-
|
106 |
-
out_fpath = speaker_out_dir.joinpath(out_fname)
|
107 |
-
np.save(out_fpath, frames)
|
108 |
-
logger.add_sample(duration=len(wav) / sampling_rate)
|
109 |
-
sources_file.write("%s,%s\n" % (out_fname, in_fpath))
|
110 |
-
|
111 |
-
sources_file.close()
|
112 |
-
|
113 |
-
# Process the utterances for each speaker
|
114 |
-
with ThreadPool(8) as pool:
|
115 |
-
list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs),
|
116 |
-
unit="speakers"))
|
117 |
-
logger.finalize()
|
118 |
-
print("Done preprocessing %s.\n" % dataset_name)
|
119 |
-
|
120 |
-
|
121 |
-
# Function to preprocess utterances for one speaker
|
122 |
-
def __preprocess_speaker(speaker_dir: Path, datasets_root: Path, out_dir: Path, extension: str, skip_existing: bool):
|
123 |
-
# Give a name to the speaker that includes its dataset
|
124 |
-
speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts)
|
125 |
-
|
126 |
-
# Create an output directory with that name, as well as a txt file containing a
|
127 |
-
# reference to each source file.
|
128 |
-
speaker_out_dir = out_dir.joinpath(speaker_name)
|
129 |
-
speaker_out_dir.mkdir(exist_ok=True)
|
130 |
-
sources_fpath = speaker_out_dir.joinpath("_sources.txt")
|
131 |
-
|
132 |
-
# There's a possibility that the preprocessing was interrupted earlier, check if
|
133 |
-
# there already is a sources file.
|
134 |
-
# if sources_fpath.exists():
|
135 |
-
# try:
|
136 |
-
# with sources_fpath.open("r") as sources_file:
|
137 |
-
# existing_fnames = {line.split(",")[0] for line in sources_file}
|
138 |
-
# except:
|
139 |
-
# existing_fnames = {}
|
140 |
-
# else:
|
141 |
-
# existing_fnames = {}
|
142 |
-
existing_fnames = {}
|
143 |
-
# Gather all audio files for that speaker recursively
|
144 |
-
sources_file = sources_fpath.open("a" if skip_existing else "w")
|
145 |
-
|
146 |
-
for in_fpath in speaker_dir.glob("**/*.%s" % extension):
|
147 |
-
# Check if the target output file already exists
|
148 |
-
out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts)
|
149 |
-
out_fname = out_fname.replace(".%s" % extension, ".npy")
|
150 |
-
if skip_existing and out_fname in existing_fnames:
|
151 |
-
continue
|
152 |
-
|
153 |
-
# Load and preprocess the waveform
|
154 |
-
wav = audio.preprocess_wav(in_fpath)
|
155 |
-
if len(wav) == 0:
|
156 |
-
continue
|
157 |
-
|
158 |
-
# Create the mel spectrogram, discard those that are too short
|
159 |
-
frames = audio.wav_to_mel_spectrogram(wav)
|
160 |
-
if len(frames) < partials_n_frames:
|
161 |
-
continue
|
162 |
-
|
163 |
-
out_fpath = speaker_out_dir.joinpath(out_fname)
|
164 |
-
np.save(out_fpath, frames)
|
165 |
-
# logger.add_sample(duration=len(wav) / sampling_rate)
|
166 |
-
sources_file.write("%s,%s\n" % (out_fname, in_fpath))
|
167 |
-
|
168 |
-
sources_file.close()
|
169 |
-
return len(wav)
|
170 |
-
|
171 |
-
def _preprocess_speaker_dirs_vox2(speaker_dirs, dataset_name, datasets_root, out_dir, extension,
|
172 |
-
skip_existing, logger):
|
173 |
-
# from multiprocessing import Pool, cpu_count
|
174 |
-
from pathos.multiprocessing import ProcessingPool as Pool
|
175 |
-
# Function to preprocess utterances for one speaker
|
176 |
-
def __preprocess_speaker(speaker_dir: Path):
|
177 |
-
# Give a name to the speaker that includes its dataset
|
178 |
-
speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts)
|
179 |
-
|
180 |
-
# Create an output directory with that name, as well as a txt file containing a
|
181 |
-
# reference to each source file.
|
182 |
-
speaker_out_dir = out_dir.joinpath(speaker_name)
|
183 |
-
speaker_out_dir.mkdir(exist_ok=True)
|
184 |
-
sources_fpath = speaker_out_dir.joinpath("_sources.txt")
|
185 |
-
|
186 |
-
existing_fnames = {}
|
187 |
-
# Gather all audio files for that speaker recursively
|
188 |
-
sources_file = sources_fpath.open("a" if skip_existing else "w")
|
189 |
-
wav_lens = []
|
190 |
-
for in_fpath in speaker_dir.glob("**/*.%s" % extension):
|
191 |
-
# Check if the target output file already exists
|
192 |
-
out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts)
|
193 |
-
out_fname = out_fname.replace(".%s" % extension, ".npy")
|
194 |
-
if skip_existing and out_fname in existing_fnames:
|
195 |
-
continue
|
196 |
-
|
197 |
-
# Load and preprocess the waveform
|
198 |
-
wav = audio.preprocess_wav(in_fpath)
|
199 |
-
if len(wav) == 0:
|
200 |
-
continue
|
201 |
-
|
202 |
-
# Create the mel spectrogram, discard those that are too short
|
203 |
-
frames = audio.wav_to_mel_spectrogram(wav)
|
204 |
-
if len(frames) < partials_n_frames:
|
205 |
-
continue
|
206 |
-
|
207 |
-
out_fpath = speaker_out_dir.joinpath(out_fname)
|
208 |
-
np.save(out_fpath, frames)
|
209 |
-
# logger.add_sample(duration=len(wav) / sampling_rate)
|
210 |
-
sources_file.write("%s,%s\n" % (out_fname, in_fpath))
|
211 |
-
wav_lens.append(len(wav))
|
212 |
-
sources_file.close()
|
213 |
-
return wav_lens
|
214 |
-
|
215 |
-
print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs)))
|
216 |
-
# Process the utterances for each speaker
|
217 |
-
# with ThreadPool(8) as pool:
|
218 |
-
# list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs),
|
219 |
-
# unit="speakers"))
|
220 |
-
pool = Pool(processes=20)
|
221 |
-
for i, wav_lens in enumerate(pool.map(__preprocess_speaker, speaker_dirs), 1):
|
222 |
-
for wav_len in wav_lens:
|
223 |
-
logger.add_sample(duration=wav_len / sampling_rate)
|
224 |
-
print(f'{i}/{len(speaker_dirs)} \r')
|
225 |
-
|
226 |
-
logger.finalize()
|
227 |
-
print("Done preprocessing %s.\n" % dataset_name)
|
228 |
-
|
229 |
-
|
230 |
-
def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False):
|
231 |
-
for dataset_name in librispeech_datasets["train"]["other"]:
|
232 |
-
# Initialize the preprocessing
|
233 |
-
dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
|
234 |
-
if not dataset_root:
|
235 |
-
return
|
236 |
-
|
237 |
-
# Preprocess all speakers
|
238 |
-
speaker_dirs = list(dataset_root.glob("*"))
|
239 |
-
_preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "flac",
|
240 |
-
skip_existing, logger)
|
241 |
-
|
242 |
-
|
243 |
-
def preprocess_voxceleb1(datasets_root: Path, out_dir: Path, skip_existing=False):
|
244 |
-
# Initialize the preprocessing
|
245 |
-
dataset_name = "VoxCeleb1"
|
246 |
-
dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
|
247 |
-
if not dataset_root:
|
248 |
-
return
|
249 |
-
|
250 |
-
# Get the contents of the meta file
|
251 |
-
with dataset_root.joinpath("vox1_meta.csv").open("r") as metafile:
|
252 |
-
metadata = [line.split("\t") for line in metafile][1:]
|
253 |
-
|
254 |
-
# Select the ID and the nationality, filter out non-anglophone speakers
|
255 |
-
nationalities = {line[0]: line[3] for line in metadata}
|
256 |
-
# keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items() if
|
257 |
-
# nationality.lower() in anglophone_nationalites]
|
258 |
-
keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items()]
|
259 |
-
print("VoxCeleb1: using samples from %d (presumed anglophone) speakers out of %d." %
|
260 |
-
(len(keep_speaker_ids), len(nationalities)))
|
261 |
-
|
262 |
-
# Get the speaker directories for anglophone speakers only
|
263 |
-
speaker_dirs = dataset_root.joinpath("wav").glob("*")
|
264 |
-
speaker_dirs = [speaker_dir for speaker_dir in speaker_dirs if
|
265 |
-
speaker_dir.name in keep_speaker_ids]
|
266 |
-
print("VoxCeleb1: found %d anglophone speakers on the disk, %d missing (this is normal)." %
|
267 |
-
(len(speaker_dirs), len(keep_speaker_ids) - len(speaker_dirs)))
|
268 |
-
|
269 |
-
# Preprocess all speakers
|
270 |
-
_preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "wav",
|
271 |
-
skip_existing, logger)
|
272 |
-
|
273 |
-
|
274 |
-
def preprocess_voxceleb2(datasets_root: Path, out_dir: Path, skip_existing=False):
|
275 |
-
# Initialize the preprocessing
|
276 |
-
dataset_name = "VoxCeleb2"
|
277 |
-
dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
|
278 |
-
if not dataset_root:
|
279 |
-
return
|
280 |
-
|
281 |
-
# Get the speaker directories
|
282 |
-
# Preprocess all speakers
|
283 |
-
speaker_dirs = list(dataset_root.joinpath("dev", "aac").glob("*"))
|
284 |
-
_preprocess_speaker_dirs_vox2(speaker_dirs, dataset_name, datasets_root, out_dir, "m4a",
|
285 |
-
skip_existing, logger)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
from . import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class SeperableConv2DBNActiv(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
-
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
-
self.conv = nn.Sequential(
|
33 |
-
nn.Conv2d(
|
34 |
-
nin,
|
35 |
-
nin,
|
36 |
-
kernel_size=ksize,
|
37 |
-
stride=stride,
|
38 |
-
padding=pad,
|
39 |
-
dilation=dilation,
|
40 |
-
groups=nin,
|
41 |
-
bias=False,
|
42 |
-
),
|
43 |
-
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
-
nn.BatchNorm2d(nout),
|
45 |
-
activ(),
|
46 |
-
)
|
47 |
-
|
48 |
-
def __call__(self, x):
|
49 |
-
return self.conv(x)
|
50 |
-
|
51 |
-
|
52 |
-
class Encoder(nn.Module):
|
53 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
-
super(Encoder, self).__init__()
|
55 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
-
|
58 |
-
def __call__(self, x):
|
59 |
-
skip = self.conv1(x)
|
60 |
-
h = self.conv2(skip)
|
61 |
-
|
62 |
-
return h, skip
|
63 |
-
|
64 |
-
|
65 |
-
class Decoder(nn.Module):
|
66 |
-
def __init__(
|
67 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
-
):
|
69 |
-
super(Decoder, self).__init__()
|
70 |
-
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
-
|
73 |
-
def __call__(self, x, skip=None):
|
74 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
-
if skip is not None:
|
76 |
-
skip = spec_utils.crop_center(skip, x)
|
77 |
-
x = torch.cat([x, skip], dim=1)
|
78 |
-
h = self.conv(x)
|
79 |
-
|
80 |
-
if self.dropout is not None:
|
81 |
-
h = self.dropout(h)
|
82 |
-
|
83 |
-
return h
|
84 |
-
|
85 |
-
|
86 |
-
class ASPPModule(nn.Module):
|
87 |
-
def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
|
88 |
-
super(ASPPModule, self).__init__()
|
89 |
-
self.conv1 = nn.Sequential(
|
90 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
-
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
-
)
|
93 |
-
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
-
self.conv3 = SeperableConv2DBNActiv(
|
95 |
-
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
-
)
|
97 |
-
self.conv4 = SeperableConv2DBNActiv(
|
98 |
-
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
-
)
|
100 |
-
self.conv5 = SeperableConv2DBNActiv(
|
101 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
-
)
|
103 |
-
self.bottleneck = nn.Sequential(
|
104 |
-
Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
105 |
-
)
|
106 |
-
|
107 |
-
def forward(self, x):
|
108 |
-
_, _, h, w = x.size()
|
109 |
-
feat1 = F.interpolate(
|
110 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
111 |
-
)
|
112 |
-
feat2 = self.conv2(x)
|
113 |
-
feat3 = self.conv3(x)
|
114 |
-
feat4 = self.conv4(x)
|
115 |
-
feat5 = self.conv5(x)
|
116 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
117 |
-
bottle = self.bottleneck(out)
|
118 |
-
return bottle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/modules/train/extract_feature_print.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import traceback
|
4 |
-
|
5 |
-
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
6 |
-
os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
|
7 |
-
|
8 |
-
device = sys.argv[1]
|
9 |
-
n_part = int(sys.argv[2])
|
10 |
-
i_part = int(sys.argv[3])
|
11 |
-
if len(sys.argv) == 6:
|
12 |
-
exp_dir = sys.argv[4]
|
13 |
-
version = sys.argv[5]
|
14 |
-
else:
|
15 |
-
i_gpu = sys.argv[4]
|
16 |
-
exp_dir = sys.argv[5]
|
17 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
|
18 |
-
version = sys.argv[6]
|
19 |
-
import fairseq
|
20 |
-
import numpy as np
|
21 |
-
import soundfile as sf
|
22 |
-
import torch
|
23 |
-
import torch.nn.functional as F
|
24 |
-
|
25 |
-
if "privateuseone" not in device:
|
26 |
-
device = "cpu"
|
27 |
-
if torch.cuda.is_available():
|
28 |
-
device = "cuda"
|
29 |
-
elif torch.backends.mps.is_available():
|
30 |
-
device = "mps"
|
31 |
-
else:
|
32 |
-
import torch_directml
|
33 |
-
|
34 |
-
device = torch_directml.device(torch_directml.default_device())
|
35 |
-
|
36 |
-
def forward_dml(ctx, x, scale):
|
37 |
-
ctx.scale = scale
|
38 |
-
res = x.clone().detach()
|
39 |
-
return res
|
40 |
-
|
41 |
-
fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
|
42 |
-
|
43 |
-
f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
|
44 |
-
|
45 |
-
|
46 |
-
def printt(strr):
|
47 |
-
print(strr)
|
48 |
-
f.write("%s\n" % strr)
|
49 |
-
f.flush()
|
50 |
-
|
51 |
-
|
52 |
-
printt(sys.argv)
|
53 |
-
model_path = "assets/hubert/hubert_base.pt"
|
54 |
-
|
55 |
-
printt(exp_dir)
|
56 |
-
wavPath = "%s/1_16k_wavs" % exp_dir
|
57 |
-
outPath = (
|
58 |
-
"%s/3_feature256" % exp_dir if version == "v1" else "%s/3_feature768" % exp_dir
|
59 |
-
)
|
60 |
-
os.makedirs(outPath, exist_ok=True)
|
61 |
-
|
62 |
-
|
63 |
-
# wave must be 16k, hop_size=320
|
64 |
-
def readwave(wav_path, normalize=False):
|
65 |
-
wav, sr = sf.read(wav_path)
|
66 |
-
assert sr == 16000
|
67 |
-
feats = torch.from_numpy(wav).float()
|
68 |
-
if feats.dim() == 2: # double channels
|
69 |
-
feats = feats.mean(-1)
|
70 |
-
assert feats.dim() == 1, feats.dim()
|
71 |
-
if normalize:
|
72 |
-
with torch.no_grad():
|
73 |
-
feats = F.layer_norm(feats, feats.shape)
|
74 |
-
feats = feats.view(1, -1)
|
75 |
-
return feats
|
76 |
-
|
77 |
-
|
78 |
-
# HuBERT model
|
79 |
-
printt("load model(s) from {}".format(model_path))
|
80 |
-
# if hubert model is exist
|
81 |
-
if os.access(model_path, os.F_OK) == False:
|
82 |
-
printt(
|
83 |
-
"Error: Extracting is shut down because %s does not exist, you may download it from https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main"
|
84 |
-
% model_path
|
85 |
-
)
|
86 |
-
exit(0)
|
87 |
-
models, saved_cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
|
88 |
-
[model_path],
|
89 |
-
suffix="",
|
90 |
-
)
|
91 |
-
model = models[0]
|
92 |
-
model = model.to(device)
|
93 |
-
printt("move model to %s" % device)
|
94 |
-
if device not in ["mps", "cpu"]:
|
95 |
-
model = model.half()
|
96 |
-
model.eval()
|
97 |
-
|
98 |
-
todo = sorted(list(os.listdir(wavPath)))[i_part::n_part]
|
99 |
-
n = max(1, len(todo) // 10) # 最多打印十条
|
100 |
-
if len(todo) == 0:
|
101 |
-
printt("no-feature-todo")
|
102 |
-
else:
|
103 |
-
printt("all-feature-%s" % len(todo))
|
104 |
-
for idx, file in enumerate(todo):
|
105 |
-
try:
|
106 |
-
if file.endswith(".wav"):
|
107 |
-
wav_path = "%s/%s" % (wavPath, file)
|
108 |
-
out_path = "%s/%s" % (outPath, file.replace("wav", "npy"))
|
109 |
-
|
110 |
-
if os.path.exists(out_path):
|
111 |
-
continue
|
112 |
-
|
113 |
-
feats = readwave(wav_path, normalize=saved_cfg.task.normalize)
|
114 |
-
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
115 |
-
inputs = {
|
116 |
-
"source": feats.half().to(device)
|
117 |
-
if device not in ["mps", "cpu"]
|
118 |
-
else feats.to(device),
|
119 |
-
"padding_mask": padding_mask.to(device),
|
120 |
-
"output_layer": 9 if version == "v1" else 12, # layer 9
|
121 |
-
}
|
122 |
-
with torch.no_grad():
|
123 |
-
logits = model.extract_features(**inputs)
|
124 |
-
feats = (
|
125 |
-
model.final_proj(logits[0]) if version == "v1" else logits[0]
|
126 |
-
)
|
127 |
-
|
128 |
-
feats = feats.squeeze(0).float().cpu().numpy()
|
129 |
-
if np.isnan(feats).sum() == 0:
|
130 |
-
np.save(out_path, feats, allow_pickle=False)
|
131 |
-
else:
|
132 |
-
printt("%s-contains nan" % file)
|
133 |
-
if idx % n == 0:
|
134 |
-
printt("now-%s,all-%s,%s,%s" % (len(todo), idx, file, feats.shape))
|
135 |
-
except:
|
136 |
-
printt(traceback.format_exc())
|
137 |
-
printt("all-feature-done")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/lib/bots/bing/index.ts
DELETED
@@ -1,426 +0,0 @@
|
|
1 |
-
import { fetch, WebSocket, debug } from '@/lib/isomorphic'
|
2 |
-
import WebSocketAsPromised from 'websocket-as-promised'
|
3 |
-
import {
|
4 |
-
SendMessageParams,
|
5 |
-
BingConversationStyle,
|
6 |
-
ConversationResponse,
|
7 |
-
ChatResponseMessage,
|
8 |
-
ConversationInfo,
|
9 |
-
InvocationEventType,
|
10 |
-
ChatError,
|
11 |
-
ErrorCode,
|
12 |
-
ChatUpdateCompleteResponse,
|
13 |
-
ImageInfo,
|
14 |
-
KBlobResponse
|
15 |
-
} from './types'
|
16 |
-
|
17 |
-
import { convertMessageToMarkdown, websocketUtils, streamAsyncIterable } from './utils'
|
18 |
-
import { WatchDog, createChunkDecoder } from '@/lib/utils'
|
19 |
-
|
20 |
-
type Params = SendMessageParams<{ bingConversationStyle: BingConversationStyle }>
|
21 |
-
|
22 |
-
const OPTIONS_SETS = [
|
23 |
-
'nlu_direct_response_filter',
|
24 |
-
'deepleo',
|
25 |
-
'disable_emoji_spoken_text',
|
26 |
-
'responsible_ai_policy_235',
|
27 |
-
'enablemm',
|
28 |
-
'iycapbing',
|
29 |
-
'iyxapbing',
|
30 |
-
'objopinion',
|
31 |
-
'rweasgv2',
|
32 |
-
'dagslnv1',
|
33 |
-
'dv3sugg',
|
34 |
-
'autosave',
|
35 |
-
'iyoloxap',
|
36 |
-
'iyoloneutral',
|
37 |
-
'clgalileo',
|
38 |
-
'gencontentv3',
|
39 |
-
]
|
40 |
-
|
41 |
-
export class BingWebBot {
|
42 |
-
protected conversationContext?: ConversationInfo
|
43 |
-
protected cookie: string
|
44 |
-
protected ua: string
|
45 |
-
protected endpoint = ''
|
46 |
-
private lastText = ''
|
47 |
-
private asyncTasks: Array<Promise<any>> = []
|
48 |
-
|
49 |
-
constructor(opts: {
|
50 |
-
cookie: string
|
51 |
-
ua: string
|
52 |
-
bingConversationStyle?: BingConversationStyle
|
53 |
-
conversationContext?: ConversationInfo
|
54 |
-
}) {
|
55 |
-
const { cookie, ua, conversationContext } = opts
|
56 |
-
this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}`
|
57 |
-
this.ua = ua
|
58 |
-
this.conversationContext = conversationContext
|
59 |
-
}
|
60 |
-
|
61 |
-
static buildChatRequest(conversation: ConversationInfo) {
|
62 |
-
const optionsSets = OPTIONS_SETS
|
63 |
-
if (conversation.conversationStyle === BingConversationStyle.Precise) {
|
64 |
-
optionsSets.push('h3precise')
|
65 |
-
} else if (conversation.conversationStyle === BingConversationStyle.Creative) {
|
66 |
-
optionsSets.push('h3imaginative')
|
67 |
-
}
|
68 |
-
return {
|
69 |
-
arguments: [
|
70 |
-
{
|
71 |
-
source: 'cib',
|
72 |
-
optionsSets,
|
73 |
-
allowedMessageTypes: [
|
74 |
-
'ActionRequest',
|
75 |
-
'Chat',
|
76 |
-
'Context',
|
77 |
-
'InternalSearchQuery',
|
78 |
-
'InternalSearchResult',
|
79 |
-
'Disengaged',
|
80 |
-
'InternalLoaderMessage',
|
81 |
-
'Progress',
|
82 |
-
'RenderCardRequest',
|
83 |
-
'SemanticSerp',
|
84 |
-
'GenerateContentQuery',
|
85 |
-
'SearchQuery',
|
86 |
-
],
|
87 |
-
sliceIds: [
|
88 |
-
'winmuid1tf',
|
89 |
-
'anssupfor_c',
|
90 |
-
'imgchatgptv2',
|
91 |
-
'tts2cf',
|
92 |
-
'contansperf',
|
93 |
-
'mlchatpc8500w',
|
94 |
-
'mlchatpc2',
|
95 |
-
'ctrlworkpay',
|
96 |
-
'winshortmsgtf',
|
97 |
-
'cibctrl',
|
98 |
-
'sydtransctrl',
|
99 |
-
'sydconfigoptc',
|
100 |
-
'0705trt4',
|
101 |
-
'517opinion',
|
102 |
-
'628ajcopus0',
|
103 |
-
'330uaugs0',
|
104 |
-
'529rwea',
|
105 |
-
'0626snptrcs0',
|
106 |
-
'424dagslnv1',
|
107 |
-
],
|
108 |
-
isStartOfSession: conversation.invocationId === 0,
|
109 |
-
message: {
|
110 |
-
author: 'user',
|
111 |
-
inputMethod: 'Keyboard',
|
112 |
-
text: conversation.prompt,
|
113 |
-
imageUrl: conversation.imageUrl,
|
114 |
-
messageType: 'Chat',
|
115 |
-
},
|
116 |
-
conversationId: conversation.conversationId,
|
117 |
-
conversationSignature: conversation.conversationSignature,
|
118 |
-
participant: { id: conversation.clientId },
|
119 |
-
},
|
120 |
-
],
|
121 |
-
invocationId: conversation.invocationId.toString(),
|
122 |
-
target: 'chat',
|
123 |
-
type: InvocationEventType.StreamInvocation,
|
124 |
-
}
|
125 |
-
}
|
126 |
-
|
127 |
-
async createConversation(): Promise<ConversationResponse> {
|
128 |
-
const headers = {
|
129 |
-
'Accept-Encoding': 'gzip, deflate, br, zsdch',
|
130 |
-
'User-Agent': this.ua,
|
131 |
-
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
|
132 |
-
cookie: this.cookie,
|
133 |
-
}
|
134 |
-
|
135 |
-
let resp: ConversationResponse | undefined
|
136 |
-
try {
|
137 |
-
const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' })
|
138 |
-
if (response.status === 404) {
|
139 |
-
throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR)
|
140 |
-
}
|
141 |
-
resp = await response.json() as ConversationResponse
|
142 |
-
} catch (err) {
|
143 |
-
console.error('create conversation error', err)
|
144 |
-
}
|
145 |
-
|
146 |
-
if (!resp?.result) {
|
147 |
-
throw new ChatError('你的 VPS 或代理可能被封禁,如有疑问,请前往 https://github.com/weaigc/bingo 咨询', ErrorCode.UNKOWN_ERROR)
|
148 |
-
}
|
149 |
-
|
150 |
-
const { value, message } = resp.result || {}
|
151 |
-
if (value !== 'Success') {
|
152 |
-
const errorMsg = `${value}: ${message}`
|
153 |
-
if (value === 'UnauthorizedRequest') {
|
154 |
-
throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED)
|
155 |
-
}
|
156 |
-
if (value === 'Forbidden') {
|
157 |
-
throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN)
|
158 |
-
}
|
159 |
-
throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR)
|
160 |
-
}
|
161 |
-
return resp
|
162 |
-
}
|
163 |
-
|
164 |
-
private async createContext(conversationStyle: BingConversationStyle) {
|
165 |
-
if (!this.conversationContext) {
|
166 |
-
const conversation = await this.createConversation()
|
167 |
-
this.conversationContext = {
|
168 |
-
conversationId: conversation.conversationId,
|
169 |
-
conversationSignature: conversation.conversationSignature,
|
170 |
-
clientId: conversation.clientId,
|
171 |
-
invocationId: 0,
|
172 |
-
conversationStyle,
|
173 |
-
prompt: '',
|
174 |
-
}
|
175 |
-
}
|
176 |
-
return this.conversationContext
|
177 |
-
}
|
178 |
-
|
179 |
-
async sendMessage(params: Params) {
|
180 |
-
try {
|
181 |
-
await this.createContext(params.options.bingConversationStyle)
|
182 |
-
Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl })
|
183 |
-
return this.sydneyProxy(params)
|
184 |
-
} catch (error) {
|
185 |
-
params.onEvent({
|
186 |
-
type: 'ERROR',
|
187 |
-
error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR),
|
188 |
-
})
|
189 |
-
}
|
190 |
-
}
|
191 |
-
|
192 |
-
private async sydneyProxy(params: Params) {
|
193 |
-
const abortController = new AbortController()
|
194 |
-
const response = await fetch(this.endpoint + '/api/sydney', {
|
195 |
-
method: 'POST',
|
196 |
-
headers: {
|
197 |
-
'Content-Type': 'application/json',
|
198 |
-
},
|
199 |
-
signal: abortController.signal,
|
200 |
-
body: JSON.stringify(this.conversationContext!)
|
201 |
-
})
|
202 |
-
if (response.status !== 200) {
|
203 |
-
params.onEvent({
|
204 |
-
type: 'ERROR',
|
205 |
-
error: new ChatError(
|
206 |
-
'Unknown error',
|
207 |
-
ErrorCode.UNKOWN_ERROR,
|
208 |
-
),
|
209 |
-
})
|
210 |
-
}
|
211 |
-
params.signal?.addEventListener('abort', () => {
|
212 |
-
abortController.abort()
|
213 |
-
})
|
214 |
-
|
215 |
-
const textDecoder = createChunkDecoder()
|
216 |
-
for await (const chunk of streamAsyncIterable(response.body!)) {
|
217 |
-
this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk)))
|
218 |
-
}
|
219 |
-
}
|
220 |
-
|
221 |
-
async sendWs() {
|
222 |
-
const wsConfig: ConstructorParameters<typeof WebSocketAsPromised>[1] = {
|
223 |
-
packMessage: websocketUtils.packMessage,
|
224 |
-
unpackMessage: websocketUtils.unpackMessage,
|
225 |
-
createWebSocket: (url) => new WebSocket(url, {
|
226 |
-
headers: {
|
227 |
-
'accept-language': 'zh-CN,zh;q=0.9',
|
228 |
-
'cache-control': 'no-cache',
|
229 |
-
'User-Agent': this.ua,
|
230 |
-
pragma: 'no-cache',
|
231 |
-
cookie: this.cookie,
|
232 |
-
}
|
233 |
-
})
|
234 |
-
}
|
235 |
-
const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig)
|
236 |
-
|
237 |
-
wsp.open().then(() => {
|
238 |
-
wsp.sendPacked({ protocol: 'json', version: 1 })
|
239 |
-
wsp.sendPacked({ type: 6 })
|
240 |
-
wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!))
|
241 |
-
})
|
242 |
-
|
243 |
-
return wsp
|
244 |
-
}
|
245 |
-
|
246 |
-
private async useWs(params: Params) {
|
247 |
-
const wsp = await this.sendWs()
|
248 |
-
const watchDog = new WatchDog()
|
249 |
-
wsp.onUnpackedMessage.addListener((events) => {
|
250 |
-
watchDog.watch(() => {
|
251 |
-
wsp.sendPacked({ type: 6 })
|
252 |
-
})
|
253 |
-
this.parseEvents(params, events)
|
254 |
-
})
|
255 |
-
|
256 |
-
wsp.onClose.addListener(() => {
|
257 |
-
watchDog.reset()
|
258 |
-
params.onEvent({ type: 'DONE' })
|
259 |
-
wsp.removeAllListeners()
|
260 |
-
})
|
261 |
-
|
262 |
-
params.signal?.addEventListener('abort', () => {
|
263 |
-
wsp.removeAllListeners()
|
264 |
-
wsp.close()
|
265 |
-
})
|
266 |
-
}
|
267 |
-
|
268 |
-
private async createImage(prompt: string, id: string) {
|
269 |
-
try {
|
270 |
-
const headers = {
|
271 |
-
'Accept-Encoding': 'gzip, deflate, br, zsdch',
|
272 |
-
'User-Agent': this.ua,
|
273 |
-
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
|
274 |
-
cookie: this.cookie,
|
275 |
-
}
|
276 |
-
const query = new URLSearchParams({
|
277 |
-
prompt,
|
278 |
-
id
|
279 |
-
})
|
280 |
-
const response = await fetch(this.endpoint + '/api/image?' + query.toString(),
|
281 |
-
{
|
282 |
-
method: 'POST',
|
283 |
-
headers,
|
284 |
-
mode: 'cors',
|
285 |
-
credentials: 'include'
|
286 |
-
})
|
287 |
-
.then(res => res.text())
|
288 |
-
if (response) {
|
289 |
-
this.lastText += '\n' + response
|
290 |
-
}
|
291 |
-
} catch (err) {
|
292 |
-
console.error('Create Image Error', err)
|
293 |
-
}
|
294 |
-
}
|
295 |
-
|
296 |
-
private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) {
|
297 |
-
const imageInfo: ImageInfo = {}
|
298 |
-
let imageBase64: string | undefined = undefined
|
299 |
-
const knowledgeRequest = {
|
300 |
-
imageInfo,
|
301 |
-
knowledgeRequest: {
|
302 |
-
invokedSkills: [
|
303 |
-
'ImageById'
|
304 |
-
],
|
305 |
-
subscriptionId: 'Bing.Chat.Multimodal',
|
306 |
-
invokedSkillsRequestData: {
|
307 |
-
enableFaceBlur: true
|
308 |
-
},
|
309 |
-
convoData: {
|
310 |
-
convoid: this.conversationContext?.conversationId,
|
311 |
-
convotone: conversationStyle,
|
312 |
-
}
|
313 |
-
},
|
314 |
-
}
|
315 |
-
|
316 |
-
if (imageUrl.startsWith('data:image/')) {
|
317 |
-
imageBase64 = imageUrl.replace('data:image/', '');
|
318 |
-
const partIndex = imageBase64.indexOf(',')
|
319 |
-
if (partIndex) {
|
320 |
-
imageBase64 = imageBase64.substring(partIndex + 1)
|
321 |
-
}
|
322 |
-
} else {
|
323 |
-
imageInfo.url = imageUrl
|
324 |
-
}
|
325 |
-
return { knowledgeRequest, imageBase64 }
|
326 |
-
}
|
327 |
-
|
328 |
-
async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise<KBlobResponse | undefined> {
|
329 |
-
if (!imageUrl) {
|
330 |
-
return
|
331 |
-
}
|
332 |
-
await this.createContext(conversationStyle)
|
333 |
-
const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle)
|
334 |
-
|
335 |
-
const response = await fetch(this.endpoint + '/api/kblob',
|
336 |
-
{
|
337 |
-
headers: {
|
338 |
-
'Content-Type': 'application/json',
|
339 |
-
},
|
340 |
-
method: 'POST',
|
341 |
-
mode: 'cors',
|
342 |
-
credentials: 'include',
|
343 |
-
body: JSON.stringify(payload),
|
344 |
-
})
|
345 |
-
.then(res => res.json())
|
346 |
-
.catch(e => {
|
347 |
-
console.log('Error', e)
|
348 |
-
})
|
349 |
-
return response
|
350 |
-
}
|
351 |
-
|
352 |
-
private async generateContent(message: ChatResponseMessage) {
|
353 |
-
if (message.contentType === 'IMAGE') {
|
354 |
-
this.asyncTasks.push(this.createImage(message.text, message.messageId))
|
355 |
-
}
|
356 |
-
}
|
357 |
-
|
358 |
-
private async parseEvents(params: Params, events: any) {
|
359 |
-
const conversation = this.conversationContext!
|
360 |
-
|
361 |
-
events?.forEach(async (event: ChatUpdateCompleteResponse) => {
|
362 |
-
debug('bing event', event)
|
363 |
-
if (event.type === 3) {
|
364 |
-
await Promise.all(this.asyncTasks)
|
365 |
-
this.asyncTasks = []
|
366 |
-
params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } })
|
367 |
-
params.onEvent({ type: 'DONE' })
|
368 |
-
conversation.invocationId = parseInt(event.invocationId, 10) + 1
|
369 |
-
} else if (event.type === 1) {
|
370 |
-
const messages = event.arguments[0].messages
|
371 |
-
if (messages) {
|
372 |
-
const text = convertMessageToMarkdown(messages[0])
|
373 |
-
this.lastText = text
|
374 |
-
params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } })
|
375 |
-
}
|
376 |
-
} else if (event.type === 2) {
|
377 |
-
const messages = event.item.messages as ChatResponseMessage[] | undefined
|
378 |
-
if (!messages) {
|
379 |
-
params.onEvent({
|
380 |
-
type: 'ERROR',
|
381 |
-
error: new ChatError(
|
382 |
-
event.item.result.error || 'Unknown error',
|
383 |
-
event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT
|
384 |
-
: event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA)
|
385 |
-
: ErrorCode.UNKOWN_ERROR
|
386 |
-
),
|
387 |
-
})
|
388 |
-
return
|
389 |
-
}
|
390 |
-
const limited = messages.some((message) =>
|
391 |
-
message.contentOrigin === 'TurnLimiter'
|
392 |
-
|| message.messageType === 'Disengaged'
|
393 |
-
)
|
394 |
-
if (limited) {
|
395 |
-
params.onEvent({
|
396 |
-
type: 'ERROR',
|
397 |
-
error: new ChatError(
|
398 |
-
'Sorry, you have reached chat limit in this conversation.',
|
399 |
-
ErrorCode.CONVERSATION_LIMIT,
|
400 |
-
),
|
401 |
-
})
|
402 |
-
return
|
403 |
-
}
|
404 |
-
|
405 |
-
const lastMessage = event.item.messages.at(-1) as ChatResponseMessage
|
406 |
-
const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE')
|
407 |
-
if (specialMessage) {
|
408 |
-
this.generateContent(specialMessage)
|
409 |
-
}
|
410 |
-
|
411 |
-
if (lastMessage) {
|
412 |
-
const text = convertMessageToMarkdown(lastMessage)
|
413 |
-
this.lastText = text
|
414 |
-
params.onEvent({
|
415 |
-
type: 'UPDATE_ANSWER',
|
416 |
-
data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions },
|
417 |
-
})
|
418 |
-
}
|
419 |
-
}
|
420 |
-
})
|
421 |
-
}
|
422 |
-
|
423 |
-
resetConversation() {
|
424 |
-
this.conversationContext = undefined
|
425 |
-
}
|
426 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/promopts.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
from langchain.prompts import PromptTemplate
|
2 |
-
|
3 |
-
FEEDBACK = """You are a business analyst who is familiar with specification by example. I'm the domain expert.
|
4 |
-
|
5 |
-
===CONTEXT
|
6 |
-
{context}
|
7 |
-
===END OF CONTXT
|
8 |
-
|
9 |
-
===USER STORY
|
10 |
-
{story}
|
11 |
-
===END OF USER STORY
|
12 |
-
|
13 |
-
Explain the user story as scenarios. use the following format:
|
14 |
-
|
15 |
-
Thought: you should always think about what is still uncertain about the user story. Ignore technical concerns.
|
16 |
-
Question: the Question to ask to clarify the user story
|
17 |
-
Answer: the answer I responded to the question
|
18 |
-
... (this Thought/Question/Answer repeat at least 3 times, at most 10 times)
|
19 |
-
Thought: I know enough to explain the user story
|
20 |
-
Scenarios: List all possible scenarios with concrete example in Given/When/Then style
|
21 |
-
|
22 |
-
Please use Chinese! Begin!
|
23 |
-
|
24 |
-
{history}
|
25 |
-
Answer:{input}"""
|
26 |
-
|
27 |
-
FEEDBACK_PROMPT = PromptTemplate(
|
28 |
-
input_variables=["context", "story", "history", "input"], template=FEEDBACK,
|
29 |
-
)
|
30 |
-
|
31 |
-
|
32 |
-
agent_template = """You are a business analyst who is familiar with specification by example. Your main task is to explain the user story as scenarios.
|
33 |
-
You have access to the following tools:
|
34 |
-
|
35 |
-
{tools}
|
36 |
-
|
37 |
-
Use the following format:
|
38 |
-
|
39 |
-
Story: the story that you need to explain
|
40 |
-
Thought: you should always think about what is still uncertain about the user story to Explain the user story. Ignore technical concerns.
|
41 |
-
Action: the action to take, should be one of [{tool_names}]
|
42 |
-
Action Input: the input to the action
|
43 |
-
Observation: the result of the action
|
44 |
-
... (this Thought/Action/Action Input/Observation can repeat 10 times)
|
45 |
-
Thought: I now know the final answer
|
46 |
-
Final Answer: List all possible scenarios with concrete example in Given/When/Then style
|
47 |
-
|
48 |
-
Begin!
|
49 |
-
|
50 |
-
Story: {input}
|
51 |
-
{agent_scratchpad}"""
|
52 |
-
|
53 |
-
CONTENT_RE_WRIGHT = """你是一个文案助手,请将如下文案整理重写,去除重复的内容,尽量保留原有信息:
|
54 |
-
```
|
55 |
-
{input}
|
56 |
-
```"""
|
57 |
-
CONTENT_RE_WRIGHT_PROMPT = PromptTemplate(input_variables=["input"], template=CONTENT_RE_WRIGHT,)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/datasets/ffhq_degradation_dataset.py
DELETED
@@ -1,235 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import os.path as osp
|
5 |
-
import torch
|
6 |
-
import torch.utils.data as data
|
7 |
-
from basicsr.data import degradations as degradations
|
8 |
-
from basicsr.data.data_util import paths_from_folder
|
9 |
-
from basicsr.data.transforms import augment
|
10 |
-
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
|
11 |
-
from basicsr.utils.registry import DATASET_REGISTRY
|
12 |
-
from torchvision.transforms.functional import (adjust_brightness, adjust_contrast, adjust_hue, adjust_saturation,
|
13 |
-
normalize)
|
14 |
-
|
15 |
-
|
16 |
-
@DATASET_REGISTRY.register()
|
17 |
-
class FFHQDegradationDataset(data.Dataset):
|
18 |
-
"""FFHQ dataset for GFPGAN.
|
19 |
-
It reads high resolution images, and then generate low-quality (LQ) images on-the-fly.
|
20 |
-
Args:
|
21 |
-
opt (dict): Config for train datasets. It contains the following keys:
|
22 |
-
dataroot_gt (str): Data root path for gt.
|
23 |
-
io_backend (dict): IO backend type and other kwarg.
|
24 |
-
mean (list | tuple): Image mean.
|
25 |
-
std (list | tuple): Image std.
|
26 |
-
use_hflip (bool): Whether to horizontally flip.
|
27 |
-
Please see more options in the codes.
|
28 |
-
"""
|
29 |
-
|
30 |
-
def __init__(self, opt):
|
31 |
-
super(FFHQDegradationDataset, self).__init__()
|
32 |
-
self.opt = opt
|
33 |
-
# file client (io backend)
|
34 |
-
self.file_client = None
|
35 |
-
self.io_backend_opt = opt['io_backend']
|
36 |
-
|
37 |
-
self.gt_folder = opt['dataroot_gt']
|
38 |
-
self.mean = opt['mean']
|
39 |
-
self.std = opt['std']
|
40 |
-
self.out_size = opt['out_size']
|
41 |
-
|
42 |
-
self.crop_components = opt.get('crop_components', False) # facial components
|
43 |
-
self.eye_enlarge_ratio = opt.get('eye_enlarge_ratio', 1) # whether enlarge eye regions
|
44 |
-
|
45 |
-
if self.crop_components:
|
46 |
-
# load component list from a pre-process pth files
|
47 |
-
self.components_list = torch.load(opt.get('component_path'))
|
48 |
-
|
49 |
-
# file client (lmdb io backend)
|
50 |
-
if self.io_backend_opt['type'] == 'lmdb':
|
51 |
-
self.io_backend_opt['db_paths'] = self.gt_folder
|
52 |
-
if not self.gt_folder.endswith('.lmdb'):
|
53 |
-
raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
|
54 |
-
with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
|
55 |
-
self.paths = [line.split('.')[0] for line in fin]
|
56 |
-
else:
|
57 |
-
# disk backend: scan file list from a folder
|
58 |
-
self.paths = paths_from_folder(self.gt_folder)
|
59 |
-
|
60 |
-
# degradation configurations
|
61 |
-
self.blur_kernel_size = opt['blur_kernel_size']
|
62 |
-
self.kernel_list = opt['kernel_list']
|
63 |
-
self.kernel_prob = opt['kernel_prob']
|
64 |
-
self.blur_sigma = opt['blur_sigma']
|
65 |
-
self.downsample_range = opt['downsample_range']
|
66 |
-
self.noise_range = opt['noise_range']
|
67 |
-
self.jpeg_range = opt['jpeg_range']
|
68 |
-
|
69 |
-
# color jitter
|
70 |
-
self.color_jitter_prob = opt.get('color_jitter_prob')
|
71 |
-
self.color_jitter_pt_prob = opt.get('color_jitter_pt_prob')
|
72 |
-
self.color_jitter_shift = opt.get('color_jitter_shift', 20)
|
73 |
-
# to gray
|
74 |
-
self.gray_prob = opt.get('gray_prob')
|
75 |
-
|
76 |
-
logger = get_root_logger()
|
77 |
-
logger.info(f'Blur: blur_kernel_size {self.blur_kernel_size}, sigma: [{", ".join(map(str, self.blur_sigma))}]')
|
78 |
-
logger.info(f'Downsample: downsample_range [{", ".join(map(str, self.downsample_range))}]')
|
79 |
-
logger.info(f'Noise: [{", ".join(map(str, self.noise_range))}]')
|
80 |
-
logger.info(f'JPEG compression: [{", ".join(map(str, self.jpeg_range))}]')
|
81 |
-
|
82 |
-
if self.color_jitter_prob is not None:
|
83 |
-
logger.info(f'Use random color jitter. Prob: {self.color_jitter_prob}, shift: {self.color_jitter_shift}')
|
84 |
-
if self.gray_prob is not None:
|
85 |
-
logger.info(f'Use random gray. Prob: {self.gray_prob}')
|
86 |
-
self.color_jitter_shift /= 255.
|
87 |
-
|
88 |
-
@staticmethod
|
89 |
-
def color_jitter(img, shift):
|
90 |
-
"""jitter color: randomly jitter the RGB values, in numpy formats"""
|
91 |
-
jitter_val = np.random.uniform(-shift, shift, 3).astype(np.float32)
|
92 |
-
img = img + jitter_val
|
93 |
-
img = np.clip(img, 0, 1)
|
94 |
-
return img
|
95 |
-
|
96 |
-
@staticmethod
|
97 |
-
def color_jitter_pt(img, brightness, contrast, saturation, hue):
|
98 |
-
"""jitter color: randomly jitter the brightness, contrast, saturation, and hue, in torch Tensor formats"""
|
99 |
-
fn_idx = torch.randperm(4)
|
100 |
-
for fn_id in fn_idx:
|
101 |
-
if fn_id == 0 and brightness is not None:
|
102 |
-
brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()
|
103 |
-
img = adjust_brightness(img, brightness_factor)
|
104 |
-
|
105 |
-
if fn_id == 1 and contrast is not None:
|
106 |
-
contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()
|
107 |
-
img = adjust_contrast(img, contrast_factor)
|
108 |
-
|
109 |
-
if fn_id == 2 and saturation is not None:
|
110 |
-
saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()
|
111 |
-
img = adjust_saturation(img, saturation_factor)
|
112 |
-
|
113 |
-
if fn_id == 3 and hue is not None:
|
114 |
-
hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
|
115 |
-
img = adjust_hue(img, hue_factor)
|
116 |
-
return img
|
117 |
-
|
118 |
-
def get_component_coordinates(self, index, status):
|
119 |
-
"""Get facial component (left_eye, right_eye, mouth) coordinates from a pre-loaded pth file"""
|
120 |
-
components_bbox = self.components_list[f'{index:08d}']
|
121 |
-
if status[0]: # hflip
|
122 |
-
# exchange right and left eye
|
123 |
-
tmp = components_bbox['left_eye']
|
124 |
-
components_bbox['left_eye'] = components_bbox['right_eye']
|
125 |
-
components_bbox['right_eye'] = tmp
|
126 |
-
# modify the width coordinate
|
127 |
-
components_bbox['left_eye'][0] = self.out_size - components_bbox['left_eye'][0]
|
128 |
-
components_bbox['right_eye'][0] = self.out_size - components_bbox['right_eye'][0]
|
129 |
-
components_bbox['mouth'][0] = self.out_size - components_bbox['mouth'][0]
|
130 |
-
|
131 |
-
# get coordinates
|
132 |
-
locations = []
|
133 |
-
for part in ['left_eye', 'right_eye', 'mouth']:
|
134 |
-
mean = components_bbox[part][0:2]
|
135 |
-
mean[0] = mean[0] * 2 + 128 ########
|
136 |
-
mean[1] = mean[1] * 2 + 128 ########
|
137 |
-
half_len = components_bbox[part][2] * 2 ########
|
138 |
-
if 'eye' in part:
|
139 |
-
half_len *= self.eye_enlarge_ratio
|
140 |
-
loc = np.hstack((mean - half_len + 1, mean + half_len))
|
141 |
-
loc = torch.from_numpy(loc).float()
|
142 |
-
locations.append(loc)
|
143 |
-
return locations
|
144 |
-
|
145 |
-
def __getitem__(self, index):
|
146 |
-
if self.file_client is None:
|
147 |
-
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
148 |
-
|
149 |
-
# load gt image
|
150 |
-
# Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
|
151 |
-
gt_path = self.paths[index]
|
152 |
-
img_bytes = self.file_client.get(gt_path)
|
153 |
-
img_gt = imfrombytes(img_bytes, float32=True)
|
154 |
-
|
155 |
-
# random horizontal flip
|
156 |
-
img_gt, status = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False, return_status=True)
|
157 |
-
h, w, _ = img_gt.shape
|
158 |
-
|
159 |
-
# get facial component coordinates
|
160 |
-
if self.crop_components:
|
161 |
-
locations = self.get_component_coordinates(index, status)
|
162 |
-
loc_left_eye, loc_right_eye, loc_mouth = locations
|
163 |
-
|
164 |
-
# ------------------------ generate lq image ------------------------ #
|
165 |
-
# blur
|
166 |
-
kernel = degradations.random_mixed_kernels(
|
167 |
-
self.kernel_list,
|
168 |
-
self.kernel_prob,
|
169 |
-
self.blur_kernel_size,
|
170 |
-
self.blur_sigma,
|
171 |
-
self.blur_sigma, [-math.pi, math.pi],
|
172 |
-
noise_range=None)
|
173 |
-
img_lq = cv2.filter2D(img_gt, -1, kernel)
|
174 |
-
# downsample
|
175 |
-
scale = np.random.uniform(self.downsample_range[0], self.downsample_range[1])
|
176 |
-
img_lq = cv2.resize(img_lq, (int(w // scale), int(h // scale)), interpolation=cv2.INTER_LINEAR)
|
177 |
-
# noise
|
178 |
-
if self.noise_range is not None:
|
179 |
-
img_lq = degradations.random_add_gaussian_noise(img_lq, self.noise_range)
|
180 |
-
# jpeg compression
|
181 |
-
if self.jpeg_range is not None:
|
182 |
-
img_lq = degradations.random_add_jpg_compression(img_lq, self.jpeg_range)
|
183 |
-
|
184 |
-
# resize to original size
|
185 |
-
img_lq = cv2.resize(img_lq, (int(w // self.opt['scale']), int(h // self.opt['scale'])), interpolation=cv2.INTER_LINEAR)
|
186 |
-
|
187 |
-
# random color jitter (only for lq)
|
188 |
-
if self.color_jitter_prob is not None and (np.random.uniform() < self.color_jitter_prob):
|
189 |
-
img_lq = self.color_jitter(img_lq, self.color_jitter_shift)
|
190 |
-
# random to gray (only for lq)
|
191 |
-
if self.gray_prob and np.random.uniform() < self.gray_prob:
|
192 |
-
img_lq = cv2.cvtColor(img_lq, cv2.COLOR_BGR2GRAY)
|
193 |
-
img_lq = np.tile(img_lq[:, :, None], [1, 1, 3])
|
194 |
-
if self.opt.get('gt_gray'): # whether convert GT to gray images
|
195 |
-
img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY)
|
196 |
-
img_gt = np.tile(img_gt[:, :, None], [1, 1, 3]) # repeat the color channels
|
197 |
-
|
198 |
-
# BGR to RGB, HWC to CHW, numpy to tensor
|
199 |
-
#img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
|
200 |
-
img_gt = img2tensor(img_gt, bgr2rgb=True, float32=True)
|
201 |
-
img_lq = img2tensor(img_lq, bgr2rgb=True, float32=True)
|
202 |
-
|
203 |
-
# random color jitter (pytorch version) (only for lq)
|
204 |
-
if self.color_jitter_pt_prob is not None and (np.random.uniform() < self.color_jitter_pt_prob):
|
205 |
-
brightness = self.opt.get('brightness', (0.5, 1.5))
|
206 |
-
contrast = self.opt.get('contrast', (0.5, 1.5))
|
207 |
-
saturation = self.opt.get('saturation', (0, 1.5))
|
208 |
-
hue = self.opt.get('hue', (-0.1, 0.1))
|
209 |
-
img_lq = self.color_jitter_pt(img_lq, brightness, contrast, saturation, hue)
|
210 |
-
|
211 |
-
# round and clip
|
212 |
-
img_lq = torch.clamp((img_lq * 255.0).round(), 0, 255) / 255.
|
213 |
-
|
214 |
-
# normalize
|
215 |
-
normalize(img_gt, self.mean, self.std, inplace=True)
|
216 |
-
normalize(img_lq, self.mean, self.std, inplace=True)
|
217 |
-
|
218 |
-
'''
|
219 |
-
if self.crop_components:
|
220 |
-
return_dict = {
|
221 |
-
'lq': img_lq,
|
222 |
-
'gt': img_gt,
|
223 |
-
'gt_path': gt_path,
|
224 |
-
'loc_left_eye': loc_left_eye,
|
225 |
-
'loc_right_eye': loc_right_eye,
|
226 |
-
'loc_mouth': loc_mouth
|
227 |
-
}
|
228 |
-
return return_dict
|
229 |
-
else:
|
230 |
-
return {'lq': img_lq, 'gt': img_gt, 'gt_path': gt_path}
|
231 |
-
'''
|
232 |
-
return img_lq, img_gt
|
233 |
-
|
234 |
-
def __len__(self):
|
235 |
-
return len(self.paths)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZero2HeroBootcamp/FastSpeech2LinerGradioApp/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: FastSpeech2LinerGradioApp
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/__init__.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
# flake8: noqa
|
8 |
-
from .conv import (
|
9 |
-
NormConv1d,
|
10 |
-
NormConv2d,
|
11 |
-
NormConvTranspose1d,
|
12 |
-
NormConvTranspose2d,
|
13 |
-
StreamableConv1d,
|
14 |
-
StreamableConvTranspose1d,
|
15 |
-
pad_for_conv1d,
|
16 |
-
pad1d,
|
17 |
-
unpad1d,
|
18 |
-
)
|
19 |
-
from .lstm import StreamableLSTM
|
20 |
-
from .seanet import SEANetEncoder, SEANetDecoder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhaykoul/Palm-2/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Palm 2
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: purple
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.27.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/dist_util.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501
|
2 |
-
import functools
|
3 |
-
import os
|
4 |
-
import subprocess
|
5 |
-
import torch
|
6 |
-
import torch.distributed as dist
|
7 |
-
import torch.multiprocessing as mp
|
8 |
-
from torch.nn.parallel import DataParallel, DistributedDataParallel
|
9 |
-
|
10 |
-
|
11 |
-
def init_dist(launcher, backend='nccl', **kwargs):
|
12 |
-
if mp.get_start_method(allow_none=True) is None:
|
13 |
-
mp.set_start_method('spawn')
|
14 |
-
if launcher == 'pytorch':
|
15 |
-
_init_dist_pytorch(backend, **kwargs)
|
16 |
-
elif launcher == 'slurm':
|
17 |
-
_init_dist_slurm(backend, **kwargs)
|
18 |
-
else:
|
19 |
-
raise ValueError(f'Invalid launcher type: {launcher}')
|
20 |
-
|
21 |
-
|
22 |
-
def _init_dist_pytorch(backend, **kwargs):
|
23 |
-
rank = int(os.environ['RANK'])
|
24 |
-
num_gpus = torch.cuda.device_count()
|
25 |
-
torch.cuda.set_device(rank % num_gpus)
|
26 |
-
dist.init_process_group(backend=backend, **kwargs)
|
27 |
-
|
28 |
-
|
29 |
-
def _init_dist_slurm(backend, port=None):
|
30 |
-
"""Initialize slurm distributed training environment.
|
31 |
-
|
32 |
-
If argument ``port`` is not specified, then the master port will be system
|
33 |
-
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
|
34 |
-
environment variable, then a default port ``29500`` will be used.
|
35 |
-
|
36 |
-
Args:
|
37 |
-
backend (str): Backend of torch.distributed.
|
38 |
-
port (int, optional): Master port. Defaults to None.
|
39 |
-
"""
|
40 |
-
proc_id = int(os.environ['SLURM_PROCID'])
|
41 |
-
ntasks = int(os.environ['SLURM_NTASKS'])
|
42 |
-
node_list = os.environ['SLURM_NODELIST']
|
43 |
-
num_gpus = torch.cuda.device_count()
|
44 |
-
torch.cuda.set_device(proc_id % num_gpus)
|
45 |
-
addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1')
|
46 |
-
# specify master port
|
47 |
-
if port is not None:
|
48 |
-
os.environ['MASTER_PORT'] = str(port)
|
49 |
-
elif 'MASTER_PORT' in os.environ:
|
50 |
-
pass # use MASTER_PORT in the environment variable
|
51 |
-
else:
|
52 |
-
# 29500 is torch.distributed default port
|
53 |
-
os.environ['MASTER_PORT'] = '29500'
|
54 |
-
os.environ['MASTER_ADDR'] = addr
|
55 |
-
os.environ['WORLD_SIZE'] = str(ntasks)
|
56 |
-
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
|
57 |
-
os.environ['RANK'] = str(proc_id)
|
58 |
-
dist.init_process_group(backend=backend)
|
59 |
-
|
60 |
-
|
61 |
-
def get_dist_info():
|
62 |
-
if dist.is_available():
|
63 |
-
initialized = dist.is_initialized()
|
64 |
-
else:
|
65 |
-
initialized = False
|
66 |
-
if initialized:
|
67 |
-
rank = dist.get_rank()
|
68 |
-
world_size = dist.get_world_size()
|
69 |
-
else:
|
70 |
-
rank = 0
|
71 |
-
world_size = 1
|
72 |
-
return rank, world_size
|
73 |
-
|
74 |
-
|
75 |
-
def master_only(func):
|
76 |
-
|
77 |
-
@functools.wraps(func)
|
78 |
-
def wrapper(*args, **kwargs):
|
79 |
-
rank, _ = get_dist_info()
|
80 |
-
if rank == 0:
|
81 |
-
return func(*args, **kwargs)
|
82 |
-
|
83 |
-
return wrapper
|
84 |
-
|
85 |
-
def get_bare_model(net):
|
86 |
-
"""Get bare model, especially under wrapping with
|
87 |
-
DistributedDataParallel or DataParallel.
|
88 |
-
"""
|
89 |
-
if isinstance(net, (DataParallel, DistributedDataParallel)):
|
90 |
-
net = net.module
|
91 |
-
return net
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/Factory.d.ts
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import Knob from './Knob';
|
2 |
-
|
3 |
-
export default function (
|
4 |
-
config?: Knob.IConfig
|
5 |
-
): Knob;
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/train_avd.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
from tqdm import trange
|
2 |
-
import torch
|
3 |
-
from torch.utils.data import DataLoader
|
4 |
-
from logger import Logger
|
5 |
-
from torch.optim.lr_scheduler import MultiStepLR
|
6 |
-
from frames_dataset import DatasetRepeater
|
7 |
-
|
8 |
-
|
9 |
-
def random_scale(kp_params, scale):
|
10 |
-
theta = torch.rand(kp_params['fg_kp'].shape[0], 2) * (2 * scale) + (1 - scale)
|
11 |
-
theta = torch.diag_embed(theta).unsqueeze(1).type(kp_params['fg_kp'].type())
|
12 |
-
new_kp_params = {'fg_kp': torch.matmul(theta, kp_params['fg_kp'].unsqueeze(-1)).squeeze(-1)}
|
13 |
-
return new_kp_params
|
14 |
-
|
15 |
-
|
16 |
-
def train_avd(config, inpainting_network, kp_detector, bg_predictor, dense_motion_network,
|
17 |
-
avd_network, checkpoint, log_dir, dataset):
|
18 |
-
train_params = config['train_avd_params']
|
19 |
-
|
20 |
-
optimizer = torch.optim.Adam(avd_network.parameters(), lr=train_params['lr'], betas=(0.5, 0.999))
|
21 |
-
|
22 |
-
if checkpoint is not None:
|
23 |
-
Logger.load_cpk(checkpoint, inpainting_network=inpainting_network, kp_detector=kp_detector,
|
24 |
-
bg_predictor=bg_predictor, avd_network=avd_network,
|
25 |
-
dense_motion_network= dense_motion_network,optimizer_avd=optimizer)
|
26 |
-
start_epoch = 0
|
27 |
-
else:
|
28 |
-
raise AttributeError("Checkpoint should be specified for mode='train_avd'.")
|
29 |
-
|
30 |
-
scheduler = MultiStepLR(optimizer, train_params['epoch_milestones'], gamma=0.1)
|
31 |
-
|
32 |
-
if 'num_repeats' in train_params or train_params['num_repeats'] != 1:
|
33 |
-
dataset = DatasetRepeater(dataset, train_params['num_repeats'])
|
34 |
-
|
35 |
-
dataloader = DataLoader(dataset, batch_size=train_params['batch_size'], shuffle=True,
|
36 |
-
num_workers=train_params['dataloader_workers'], drop_last=True)
|
37 |
-
|
38 |
-
with Logger(log_dir=log_dir, visualizer_params=config['visualizer_params'],
|
39 |
-
checkpoint_freq=train_params['checkpoint_freq']) as logger:
|
40 |
-
for epoch in trange(start_epoch, train_params['num_epochs']):
|
41 |
-
avd_network.train()
|
42 |
-
for x in dataloader:
|
43 |
-
with torch.no_grad():
|
44 |
-
kp_source = kp_detector(x['source'].cuda())
|
45 |
-
kp_driving_gt = kp_detector(x['driving'].cuda())
|
46 |
-
kp_driving_random = random_scale(kp_driving_gt, scale=train_params['random_scale'])
|
47 |
-
rec = avd_network(kp_source, kp_driving_random)
|
48 |
-
|
49 |
-
reconstruction_kp = train_params['lambda_shift'] * \
|
50 |
-
torch.abs(kp_driving_gt['fg_kp'] - rec['fg_kp']).mean()
|
51 |
-
|
52 |
-
loss_dict = {'rec_kp': reconstruction_kp}
|
53 |
-
loss = reconstruction_kp
|
54 |
-
|
55 |
-
loss.backward()
|
56 |
-
optimizer.step()
|
57 |
-
optimizer.zero_grad()
|
58 |
-
|
59 |
-
losses = {key: value.mean().detach().data.cpu().numpy() for key, value in loss_dict.items()}
|
60 |
-
logger.log_iter(losses=losses)
|
61 |
-
|
62 |
-
# Visualization
|
63 |
-
avd_network.eval()
|
64 |
-
with torch.no_grad():
|
65 |
-
source = x['source'][:6].cuda()
|
66 |
-
driving = torch.cat([x['driving'][[0, 1]].cuda(), source[[2, 3, 2, 1]]], dim=0)
|
67 |
-
kp_source = kp_detector(source)
|
68 |
-
kp_driving = kp_detector(driving)
|
69 |
-
|
70 |
-
out = avd_network(kp_source, kp_driving)
|
71 |
-
kp_driving = out
|
72 |
-
dense_motion = dense_motion_network(source_image=source, kp_driving=kp_driving,
|
73 |
-
kp_source=kp_source)
|
74 |
-
generated = inpainting_network(source, dense_motion)
|
75 |
-
|
76 |
-
generated.update({'kp_source': kp_source, 'kp_driving': kp_driving})
|
77 |
-
|
78 |
-
scheduler.step(epoch)
|
79 |
-
model_save = {
|
80 |
-
'inpainting_network': inpainting_network,
|
81 |
-
'dense_motion_network': dense_motion_network,
|
82 |
-
'kp_detector': kp_detector,
|
83 |
-
'avd_network': avd_network,
|
84 |
-
'optimizer_avd': optimizer
|
85 |
-
}
|
86 |
-
if bg_predictor :
|
87 |
-
model_save['bg_predictor'] = bg_predictor
|
88 |
-
|
89 |
-
logger.log_epoch(epoch, model_save,
|
90 |
-
inp={'source': source, 'driving': driving},
|
91 |
-
out=generated)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/training/losses/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_d6_r50-d16_769x769_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/psanet_r50-d8.py',
|
3 |
-
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_20k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/parrots_jit.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import os
|
3 |
-
|
4 |
-
from .parrots_wrapper import TORCH_VERSION
|
5 |
-
|
6 |
-
parrots_jit_option = os.getenv('PARROTS_JIT_OPTION')
|
7 |
-
|
8 |
-
if TORCH_VERSION == 'parrots' and parrots_jit_option == 'ON':
|
9 |
-
from parrots.jit import pat as jit
|
10 |
-
else:
|
11 |
-
|
12 |
-
def jit(func=None,
|
13 |
-
check_input=None,
|
14 |
-
full_shape=True,
|
15 |
-
derivate=False,
|
16 |
-
coderize=False,
|
17 |
-
optimize=False):
|
18 |
-
|
19 |
-
def wrapper(func):
|
20 |
-
|
21 |
-
def wrapper_inner(*args, **kargs):
|
22 |
-
return func(*args, **kargs)
|
23 |
-
|
24 |
-
return wrapper_inner
|
25 |
-
|
26 |
-
if func is None:
|
27 |
-
return wrapper
|
28 |
-
else:
|
29 |
-
return func
|
30 |
-
|
31 |
-
|
32 |
-
if TORCH_VERSION == 'parrots':
|
33 |
-
from parrots.utils.tester import skip_no_elena
|
34 |
-
else:
|
35 |
-
|
36 |
-
def skip_no_elena(func):
|
37 |
-
|
38 |
-
def wrapper(*args, **kargs):
|
39 |
-
return func(*args, **kargs)
|
40 |
-
|
41 |
-
return wrapper
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/ROOPOK/roop/face_analyser.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import threading
|
2 |
-
from typing import Any, Optional, List
|
3 |
-
import insightface
|
4 |
-
import numpy
|
5 |
-
|
6 |
-
import roop.globals
|
7 |
-
from roop.typing import Frame, Face
|
8 |
-
|
9 |
-
FACE_ANALYSER = None
|
10 |
-
THREAD_LOCK = threading.Lock()
|
11 |
-
|
12 |
-
|
13 |
-
def get_face_analyser() -> Any:
|
14 |
-
global FACE_ANALYSER
|
15 |
-
|
16 |
-
with THREAD_LOCK:
|
17 |
-
if FACE_ANALYSER is None:
|
18 |
-
FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers)
|
19 |
-
FACE_ANALYSER.prepare(ctx_id=0)
|
20 |
-
return FACE_ANALYSER
|
21 |
-
|
22 |
-
|
23 |
-
def clear_face_analyser() -> Any:
|
24 |
-
global FACE_ANALYSER
|
25 |
-
|
26 |
-
FACE_ANALYSER = None
|
27 |
-
|
28 |
-
|
29 |
-
def get_one_face(frame: Frame, position: int = 0) -> Optional[Face]:
|
30 |
-
many_faces = get_many_faces(frame)
|
31 |
-
if many_faces:
|
32 |
-
try:
|
33 |
-
return many_faces[position]
|
34 |
-
except IndexError:
|
35 |
-
return many_faces[-1]
|
36 |
-
return None
|
37 |
-
|
38 |
-
|
39 |
-
def get_many_faces(frame: Frame) -> Optional[List[Face]]:
|
40 |
-
try:
|
41 |
-
return get_face_analyser().get(frame)
|
42 |
-
except ValueError:
|
43 |
-
return None
|
44 |
-
|
45 |
-
|
46 |
-
def find_similar_face(frame: Frame, reference_face: Face) -> Optional[Face]:
|
47 |
-
many_faces = get_many_faces(frame)
|
48 |
-
if many_faces:
|
49 |
-
for face in many_faces:
|
50 |
-
if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
|
51 |
-
distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding))
|
52 |
-
if distance < roop.globals.similar_face_distance:
|
53 |
-
return face
|
54 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models-new/lib/infer_pack/attentions.py
DELETED
@@ -1,417 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
from lib.infer_pack import commons
|
9 |
-
from lib.infer_pack import modules
|
10 |
-
from lib.infer_pack.modules import LayerNorm
|
11 |
-
|
12 |
-
|
13 |
-
class Encoder(nn.Module):
|
14 |
-
def __init__(
|
15 |
-
self,
|
16 |
-
hidden_channels,
|
17 |
-
filter_channels,
|
18 |
-
n_heads,
|
19 |
-
n_layers,
|
20 |
-
kernel_size=1,
|
21 |
-
p_dropout=0.0,
|
22 |
-
window_size=10,
|
23 |
-
**kwargs
|
24 |
-
):
|
25 |
-
super().__init__()
|
26 |
-
self.hidden_channels = hidden_channels
|
27 |
-
self.filter_channels = filter_channels
|
28 |
-
self.n_heads = n_heads
|
29 |
-
self.n_layers = n_layers
|
30 |
-
self.kernel_size = kernel_size
|
31 |
-
self.p_dropout = p_dropout
|
32 |
-
self.window_size = window_size
|
33 |
-
|
34 |
-
self.drop = nn.Dropout(p_dropout)
|
35 |
-
self.attn_layers = nn.ModuleList()
|
36 |
-
self.norm_layers_1 = nn.ModuleList()
|
37 |
-
self.ffn_layers = nn.ModuleList()
|
38 |
-
self.norm_layers_2 = nn.ModuleList()
|
39 |
-
for i in range(self.n_layers):
|
40 |
-
self.attn_layers.append(
|
41 |
-
MultiHeadAttention(
|
42 |
-
hidden_channels,
|
43 |
-
hidden_channels,
|
44 |
-
n_heads,
|
45 |
-
p_dropout=p_dropout,
|
46 |
-
window_size=window_size,
|
47 |
-
)
|
48 |
-
)
|
49 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
50 |
-
self.ffn_layers.append(
|
51 |
-
FFN(
|
52 |
-
hidden_channels,
|
53 |
-
hidden_channels,
|
54 |
-
filter_channels,
|
55 |
-
kernel_size,
|
56 |
-
p_dropout=p_dropout,
|
57 |
-
)
|
58 |
-
)
|
59 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
60 |
-
|
61 |
-
def forward(self, x, x_mask):
|
62 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
63 |
-
x = x * x_mask
|
64 |
-
for i in range(self.n_layers):
|
65 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
66 |
-
y = self.drop(y)
|
67 |
-
x = self.norm_layers_1[i](x + y)
|
68 |
-
|
69 |
-
y = self.ffn_layers[i](x, x_mask)
|
70 |
-
y = self.drop(y)
|
71 |
-
x = self.norm_layers_2[i](x + y)
|
72 |
-
x = x * x_mask
|
73 |
-
return x
|
74 |
-
|
75 |
-
|
76 |
-
class Decoder(nn.Module):
|
77 |
-
def __init__(
|
78 |
-
self,
|
79 |
-
hidden_channels,
|
80 |
-
filter_channels,
|
81 |
-
n_heads,
|
82 |
-
n_layers,
|
83 |
-
kernel_size=1,
|
84 |
-
p_dropout=0.0,
|
85 |
-
proximal_bias=False,
|
86 |
-
proximal_init=True,
|
87 |
-
**kwargs
|
88 |
-
):
|
89 |
-
super().__init__()
|
90 |
-
self.hidden_channels = hidden_channels
|
91 |
-
self.filter_channels = filter_channels
|
92 |
-
self.n_heads = n_heads
|
93 |
-
self.n_layers = n_layers
|
94 |
-
self.kernel_size = kernel_size
|
95 |
-
self.p_dropout = p_dropout
|
96 |
-
self.proximal_bias = proximal_bias
|
97 |
-
self.proximal_init = proximal_init
|
98 |
-
|
99 |
-
self.drop = nn.Dropout(p_dropout)
|
100 |
-
self.self_attn_layers = nn.ModuleList()
|
101 |
-
self.norm_layers_0 = nn.ModuleList()
|
102 |
-
self.encdec_attn_layers = nn.ModuleList()
|
103 |
-
self.norm_layers_1 = nn.ModuleList()
|
104 |
-
self.ffn_layers = nn.ModuleList()
|
105 |
-
self.norm_layers_2 = nn.ModuleList()
|
106 |
-
for i in range(self.n_layers):
|
107 |
-
self.self_attn_layers.append(
|
108 |
-
MultiHeadAttention(
|
109 |
-
hidden_channels,
|
110 |
-
hidden_channels,
|
111 |
-
n_heads,
|
112 |
-
p_dropout=p_dropout,
|
113 |
-
proximal_bias=proximal_bias,
|
114 |
-
proximal_init=proximal_init,
|
115 |
-
)
|
116 |
-
)
|
117 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
118 |
-
self.encdec_attn_layers.append(
|
119 |
-
MultiHeadAttention(
|
120 |
-
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
|
121 |
-
)
|
122 |
-
)
|
123 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
124 |
-
self.ffn_layers.append(
|
125 |
-
FFN(
|
126 |
-
hidden_channels,
|
127 |
-
hidden_channels,
|
128 |
-
filter_channels,
|
129 |
-
kernel_size,
|
130 |
-
p_dropout=p_dropout,
|
131 |
-
causal=True,
|
132 |
-
)
|
133 |
-
)
|
134 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
135 |
-
|
136 |
-
def forward(self, x, x_mask, h, h_mask):
|
137 |
-
"""
|
138 |
-
x: decoder input
|
139 |
-
h: encoder output
|
140 |
-
"""
|
141 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
|
142 |
-
device=x.device, dtype=x.dtype
|
143 |
-
)
|
144 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
145 |
-
x = x * x_mask
|
146 |
-
for i in range(self.n_layers):
|
147 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
148 |
-
y = self.drop(y)
|
149 |
-
x = self.norm_layers_0[i](x + y)
|
150 |
-
|
151 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
152 |
-
y = self.drop(y)
|
153 |
-
x = self.norm_layers_1[i](x + y)
|
154 |
-
|
155 |
-
y = self.ffn_layers[i](x, x_mask)
|
156 |
-
y = self.drop(y)
|
157 |
-
x = self.norm_layers_2[i](x + y)
|
158 |
-
x = x * x_mask
|
159 |
-
return x
|
160 |
-
|
161 |
-
|
162 |
-
class MultiHeadAttention(nn.Module):
|
163 |
-
def __init__(
|
164 |
-
self,
|
165 |
-
channels,
|
166 |
-
out_channels,
|
167 |
-
n_heads,
|
168 |
-
p_dropout=0.0,
|
169 |
-
window_size=None,
|
170 |
-
heads_share=True,
|
171 |
-
block_length=None,
|
172 |
-
proximal_bias=False,
|
173 |
-
proximal_init=False,
|
174 |
-
):
|
175 |
-
super().__init__()
|
176 |
-
assert channels % n_heads == 0
|
177 |
-
|
178 |
-
self.channels = channels
|
179 |
-
self.out_channels = out_channels
|
180 |
-
self.n_heads = n_heads
|
181 |
-
self.p_dropout = p_dropout
|
182 |
-
self.window_size = window_size
|
183 |
-
self.heads_share = heads_share
|
184 |
-
self.block_length = block_length
|
185 |
-
self.proximal_bias = proximal_bias
|
186 |
-
self.proximal_init = proximal_init
|
187 |
-
self.attn = None
|
188 |
-
|
189 |
-
self.k_channels = channels // n_heads
|
190 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
191 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
192 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
193 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
194 |
-
self.drop = nn.Dropout(p_dropout)
|
195 |
-
|
196 |
-
if window_size is not None:
|
197 |
-
n_heads_rel = 1 if heads_share else n_heads
|
198 |
-
rel_stddev = self.k_channels**-0.5
|
199 |
-
self.emb_rel_k = nn.Parameter(
|
200 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
201 |
-
* rel_stddev
|
202 |
-
)
|
203 |
-
self.emb_rel_v = nn.Parameter(
|
204 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
205 |
-
* rel_stddev
|
206 |
-
)
|
207 |
-
|
208 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
209 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
210 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
211 |
-
if proximal_init:
|
212 |
-
with torch.no_grad():
|
213 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
214 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
215 |
-
|
216 |
-
def forward(self, x, c, attn_mask=None):
|
217 |
-
q = self.conv_q(x)
|
218 |
-
k = self.conv_k(c)
|
219 |
-
v = self.conv_v(c)
|
220 |
-
|
221 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
222 |
-
|
223 |
-
x = self.conv_o(x)
|
224 |
-
return x
|
225 |
-
|
226 |
-
def attention(self, query, key, value, mask=None):
|
227 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
228 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
229 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
230 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
231 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
232 |
-
|
233 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
234 |
-
if self.window_size is not None:
|
235 |
-
assert (
|
236 |
-
t_s == t_t
|
237 |
-
), "Relative attention is only available for self-attention."
|
238 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
239 |
-
rel_logits = self._matmul_with_relative_keys(
|
240 |
-
query / math.sqrt(self.k_channels), key_relative_embeddings
|
241 |
-
)
|
242 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
243 |
-
scores = scores + scores_local
|
244 |
-
if self.proximal_bias:
|
245 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
246 |
-
scores = scores + self._attention_bias_proximal(t_s).to(
|
247 |
-
device=scores.device, dtype=scores.dtype
|
248 |
-
)
|
249 |
-
if mask is not None:
|
250 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
251 |
-
if self.block_length is not None:
|
252 |
-
assert (
|
253 |
-
t_s == t_t
|
254 |
-
), "Local attention is only available for self-attention."
|
255 |
-
block_mask = (
|
256 |
-
torch.ones_like(scores)
|
257 |
-
.triu(-self.block_length)
|
258 |
-
.tril(self.block_length)
|
259 |
-
)
|
260 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
261 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
262 |
-
p_attn = self.drop(p_attn)
|
263 |
-
output = torch.matmul(p_attn, value)
|
264 |
-
if self.window_size is not None:
|
265 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
266 |
-
value_relative_embeddings = self._get_relative_embeddings(
|
267 |
-
self.emb_rel_v, t_s
|
268 |
-
)
|
269 |
-
output = output + self._matmul_with_relative_values(
|
270 |
-
relative_weights, value_relative_embeddings
|
271 |
-
)
|
272 |
-
output = (
|
273 |
-
output.transpose(2, 3).contiguous().view(b, d, t_t)
|
274 |
-
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
275 |
-
return output, p_attn
|
276 |
-
|
277 |
-
def _matmul_with_relative_values(self, x, y):
|
278 |
-
"""
|
279 |
-
x: [b, h, l, m]
|
280 |
-
y: [h or 1, m, d]
|
281 |
-
ret: [b, h, l, d]
|
282 |
-
"""
|
283 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
284 |
-
return ret
|
285 |
-
|
286 |
-
def _matmul_with_relative_keys(self, x, y):
|
287 |
-
"""
|
288 |
-
x: [b, h, l, d]
|
289 |
-
y: [h or 1, m, d]
|
290 |
-
ret: [b, h, l, m]
|
291 |
-
"""
|
292 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
293 |
-
return ret
|
294 |
-
|
295 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
296 |
-
max_relative_position = 2 * self.window_size + 1
|
297 |
-
# Pad first before slice to avoid using cond ops.
|
298 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
299 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
300 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
301 |
-
if pad_length > 0:
|
302 |
-
padded_relative_embeddings = F.pad(
|
303 |
-
relative_embeddings,
|
304 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
305 |
-
)
|
306 |
-
else:
|
307 |
-
padded_relative_embeddings = relative_embeddings
|
308 |
-
used_relative_embeddings = padded_relative_embeddings[
|
309 |
-
:, slice_start_position:slice_end_position
|
310 |
-
]
|
311 |
-
return used_relative_embeddings
|
312 |
-
|
313 |
-
def _relative_position_to_absolute_position(self, x):
|
314 |
-
"""
|
315 |
-
x: [b, h, l, 2*l-1]
|
316 |
-
ret: [b, h, l, l]
|
317 |
-
"""
|
318 |
-
batch, heads, length, _ = x.size()
|
319 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
320 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
321 |
-
|
322 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
323 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
324 |
-
x_flat = F.pad(
|
325 |
-
x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
326 |
-
)
|
327 |
-
|
328 |
-
# Reshape and slice out the padded elements.
|
329 |
-
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
|
330 |
-
:, :, :length, length - 1 :
|
331 |
-
]
|
332 |
-
return x_final
|
333 |
-
|
334 |
-
def _absolute_position_to_relative_position(self, x):
|
335 |
-
"""
|
336 |
-
x: [b, h, l, l]
|
337 |
-
ret: [b, h, l, 2*l-1]
|
338 |
-
"""
|
339 |
-
batch, heads, length, _ = x.size()
|
340 |
-
# padd along column
|
341 |
-
x = F.pad(
|
342 |
-
x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
343 |
-
)
|
344 |
-
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
|
345 |
-
# add 0's in the beginning that will skew the elements after reshape
|
346 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
347 |
-
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
348 |
-
return x_final
|
349 |
-
|
350 |
-
def _attention_bias_proximal(self, length):
|
351 |
-
"""Bias for self-attention to encourage attention to close positions.
|
352 |
-
Args:
|
353 |
-
length: an integer scalar.
|
354 |
-
Returns:
|
355 |
-
a Tensor with shape [1, 1, length, length]
|
356 |
-
"""
|
357 |
-
r = torch.arange(length, dtype=torch.float32)
|
358 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
359 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
360 |
-
|
361 |
-
|
362 |
-
class FFN(nn.Module):
|
363 |
-
def __init__(
|
364 |
-
self,
|
365 |
-
in_channels,
|
366 |
-
out_channels,
|
367 |
-
filter_channels,
|
368 |
-
kernel_size,
|
369 |
-
p_dropout=0.0,
|
370 |
-
activation=None,
|
371 |
-
causal=False,
|
372 |
-
):
|
373 |
-
super().__init__()
|
374 |
-
self.in_channels = in_channels
|
375 |
-
self.out_channels = out_channels
|
376 |
-
self.filter_channels = filter_channels
|
377 |
-
self.kernel_size = kernel_size
|
378 |
-
self.p_dropout = p_dropout
|
379 |
-
self.activation = activation
|
380 |
-
self.causal = causal
|
381 |
-
|
382 |
-
if causal:
|
383 |
-
self.padding = self._causal_padding
|
384 |
-
else:
|
385 |
-
self.padding = self._same_padding
|
386 |
-
|
387 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
388 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
389 |
-
self.drop = nn.Dropout(p_dropout)
|
390 |
-
|
391 |
-
def forward(self, x, x_mask):
|
392 |
-
x = self.conv_1(self.padding(x * x_mask))
|
393 |
-
if self.activation == "gelu":
|
394 |
-
x = x * torch.sigmoid(1.702 * x)
|
395 |
-
else:
|
396 |
-
x = torch.relu(x)
|
397 |
-
x = self.drop(x)
|
398 |
-
x = self.conv_2(self.padding(x * x_mask))
|
399 |
-
return x * x_mask
|
400 |
-
|
401 |
-
def _causal_padding(self, x):
|
402 |
-
if self.kernel_size == 1:
|
403 |
-
return x
|
404 |
-
pad_l = self.kernel_size - 1
|
405 |
-
pad_r = 0
|
406 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
407 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
408 |
-
return x
|
409 |
-
|
410 |
-
def _same_padding(self, x):
|
411 |
-
if self.kernel_size == 1:
|
412 |
-
return x
|
413 |
-
pad_l = (self.kernel_size - 1) // 2
|
414 |
-
pad_r = self.kernel_size // 2
|
415 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
416 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
417 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/unet_blocks.py
DELETED
@@ -1,588 +0,0 @@
|
|
1 |
-
# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
|
6 |
-
from .attention import Transformer3DModel
|
7 |
-
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
|
8 |
-
|
9 |
-
|
10 |
-
def get_down_block(
|
11 |
-
down_block_type,
|
12 |
-
num_layers,
|
13 |
-
in_channels,
|
14 |
-
out_channels,
|
15 |
-
temb_channels,
|
16 |
-
add_downsample,
|
17 |
-
resnet_eps,
|
18 |
-
resnet_act_fn,
|
19 |
-
attn_num_head_channels,
|
20 |
-
resnet_groups=None,
|
21 |
-
cross_attention_dim=None,
|
22 |
-
downsample_padding=None,
|
23 |
-
dual_cross_attention=False,
|
24 |
-
use_linear_projection=False,
|
25 |
-
only_cross_attention=False,
|
26 |
-
upcast_attention=False,
|
27 |
-
resnet_time_scale_shift="default",
|
28 |
-
):
|
29 |
-
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
|
30 |
-
if down_block_type == "DownBlock3D":
|
31 |
-
return DownBlock3D(
|
32 |
-
num_layers=num_layers,
|
33 |
-
in_channels=in_channels,
|
34 |
-
out_channels=out_channels,
|
35 |
-
temb_channels=temb_channels,
|
36 |
-
add_downsample=add_downsample,
|
37 |
-
resnet_eps=resnet_eps,
|
38 |
-
resnet_act_fn=resnet_act_fn,
|
39 |
-
resnet_groups=resnet_groups,
|
40 |
-
downsample_padding=downsample_padding,
|
41 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
42 |
-
)
|
43 |
-
elif down_block_type == "CrossAttnDownBlock3D":
|
44 |
-
if cross_attention_dim is None:
|
45 |
-
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
|
46 |
-
return CrossAttnDownBlock3D(
|
47 |
-
num_layers=num_layers,
|
48 |
-
in_channels=in_channels,
|
49 |
-
out_channels=out_channels,
|
50 |
-
temb_channels=temb_channels,
|
51 |
-
add_downsample=add_downsample,
|
52 |
-
resnet_eps=resnet_eps,
|
53 |
-
resnet_act_fn=resnet_act_fn,
|
54 |
-
resnet_groups=resnet_groups,
|
55 |
-
downsample_padding=downsample_padding,
|
56 |
-
cross_attention_dim=cross_attention_dim,
|
57 |
-
attn_num_head_channels=attn_num_head_channels,
|
58 |
-
dual_cross_attention=dual_cross_attention,
|
59 |
-
use_linear_projection=use_linear_projection,
|
60 |
-
only_cross_attention=only_cross_attention,
|
61 |
-
upcast_attention=upcast_attention,
|
62 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
63 |
-
)
|
64 |
-
raise ValueError(f"{down_block_type} does not exist.")
|
65 |
-
|
66 |
-
|
67 |
-
def get_up_block(
|
68 |
-
up_block_type,
|
69 |
-
num_layers,
|
70 |
-
in_channels,
|
71 |
-
out_channels,
|
72 |
-
prev_output_channel,
|
73 |
-
temb_channels,
|
74 |
-
add_upsample,
|
75 |
-
resnet_eps,
|
76 |
-
resnet_act_fn,
|
77 |
-
attn_num_head_channels,
|
78 |
-
resnet_groups=None,
|
79 |
-
cross_attention_dim=None,
|
80 |
-
dual_cross_attention=False,
|
81 |
-
use_linear_projection=False,
|
82 |
-
only_cross_attention=False,
|
83 |
-
upcast_attention=False,
|
84 |
-
resnet_time_scale_shift="default",
|
85 |
-
):
|
86 |
-
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
|
87 |
-
if up_block_type == "UpBlock3D":
|
88 |
-
return UpBlock3D(
|
89 |
-
num_layers=num_layers,
|
90 |
-
in_channels=in_channels,
|
91 |
-
out_channels=out_channels,
|
92 |
-
prev_output_channel=prev_output_channel,
|
93 |
-
temb_channels=temb_channels,
|
94 |
-
add_upsample=add_upsample,
|
95 |
-
resnet_eps=resnet_eps,
|
96 |
-
resnet_act_fn=resnet_act_fn,
|
97 |
-
resnet_groups=resnet_groups,
|
98 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
99 |
-
)
|
100 |
-
elif up_block_type == "CrossAttnUpBlock3D":
|
101 |
-
if cross_attention_dim is None:
|
102 |
-
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
|
103 |
-
return CrossAttnUpBlock3D(
|
104 |
-
num_layers=num_layers,
|
105 |
-
in_channels=in_channels,
|
106 |
-
out_channels=out_channels,
|
107 |
-
prev_output_channel=prev_output_channel,
|
108 |
-
temb_channels=temb_channels,
|
109 |
-
add_upsample=add_upsample,
|
110 |
-
resnet_eps=resnet_eps,
|
111 |
-
resnet_act_fn=resnet_act_fn,
|
112 |
-
resnet_groups=resnet_groups,
|
113 |
-
cross_attention_dim=cross_attention_dim,
|
114 |
-
attn_num_head_channels=attn_num_head_channels,
|
115 |
-
dual_cross_attention=dual_cross_attention,
|
116 |
-
use_linear_projection=use_linear_projection,
|
117 |
-
only_cross_attention=only_cross_attention,
|
118 |
-
upcast_attention=upcast_attention,
|
119 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
120 |
-
)
|
121 |
-
raise ValueError(f"{up_block_type} does not exist.")
|
122 |
-
|
123 |
-
|
124 |
-
class UNetMidBlock3DCrossAttn(nn.Module):
|
125 |
-
def __init__(
|
126 |
-
self,
|
127 |
-
in_channels: int,
|
128 |
-
temb_channels: int,
|
129 |
-
dropout: float = 0.0,
|
130 |
-
num_layers: int = 1,
|
131 |
-
resnet_eps: float = 1e-6,
|
132 |
-
resnet_time_scale_shift: str = "default",
|
133 |
-
resnet_act_fn: str = "swish",
|
134 |
-
resnet_groups: int = 32,
|
135 |
-
resnet_pre_norm: bool = True,
|
136 |
-
attn_num_head_channels=1,
|
137 |
-
output_scale_factor=1.0,
|
138 |
-
cross_attention_dim=1280,
|
139 |
-
dual_cross_attention=False,
|
140 |
-
use_linear_projection=False,
|
141 |
-
upcast_attention=False,
|
142 |
-
):
|
143 |
-
super().__init__()
|
144 |
-
|
145 |
-
self.has_cross_attention = True
|
146 |
-
self.attn_num_head_channels = attn_num_head_channels
|
147 |
-
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
|
148 |
-
|
149 |
-
# there is always at least one resnet
|
150 |
-
resnets = [
|
151 |
-
ResnetBlock3D(
|
152 |
-
in_channels=in_channels,
|
153 |
-
out_channels=in_channels,
|
154 |
-
temb_channels=temb_channels,
|
155 |
-
eps=resnet_eps,
|
156 |
-
groups=resnet_groups,
|
157 |
-
dropout=dropout,
|
158 |
-
time_embedding_norm=resnet_time_scale_shift,
|
159 |
-
non_linearity=resnet_act_fn,
|
160 |
-
output_scale_factor=output_scale_factor,
|
161 |
-
pre_norm=resnet_pre_norm,
|
162 |
-
)
|
163 |
-
]
|
164 |
-
attentions = []
|
165 |
-
|
166 |
-
for _ in range(num_layers):
|
167 |
-
if dual_cross_attention:
|
168 |
-
raise NotImplementedError
|
169 |
-
attentions.append(
|
170 |
-
Transformer3DModel(
|
171 |
-
attn_num_head_channels,
|
172 |
-
in_channels // attn_num_head_channels,
|
173 |
-
in_channels=in_channels,
|
174 |
-
num_layers=1,
|
175 |
-
cross_attention_dim=cross_attention_dim,
|
176 |
-
norm_num_groups=resnet_groups,
|
177 |
-
use_linear_projection=use_linear_projection,
|
178 |
-
upcast_attention=upcast_attention,
|
179 |
-
)
|
180 |
-
)
|
181 |
-
resnets.append(
|
182 |
-
ResnetBlock3D(
|
183 |
-
in_channels=in_channels,
|
184 |
-
out_channels=in_channels,
|
185 |
-
temb_channels=temb_channels,
|
186 |
-
eps=resnet_eps,
|
187 |
-
groups=resnet_groups,
|
188 |
-
dropout=dropout,
|
189 |
-
time_embedding_norm=resnet_time_scale_shift,
|
190 |
-
non_linearity=resnet_act_fn,
|
191 |
-
output_scale_factor=output_scale_factor,
|
192 |
-
pre_norm=resnet_pre_norm,
|
193 |
-
)
|
194 |
-
)
|
195 |
-
|
196 |
-
self.attentions = nn.ModuleList(attentions)
|
197 |
-
self.resnets = nn.ModuleList(resnets)
|
198 |
-
|
199 |
-
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
|
200 |
-
hidden_states = self.resnets[0](hidden_states, temb)
|
201 |
-
for attn, resnet in zip(self.attentions, self.resnets[1:]):
|
202 |
-
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
|
203 |
-
hidden_states = resnet(hidden_states, temb)
|
204 |
-
|
205 |
-
return hidden_states
|
206 |
-
|
207 |
-
|
208 |
-
class CrossAttnDownBlock3D(nn.Module):
|
209 |
-
def __init__(
|
210 |
-
self,
|
211 |
-
in_channels: int,
|
212 |
-
out_channels: int,
|
213 |
-
temb_channels: int,
|
214 |
-
dropout: float = 0.0,
|
215 |
-
num_layers: int = 1,
|
216 |
-
resnet_eps: float = 1e-6,
|
217 |
-
resnet_time_scale_shift: str = "default",
|
218 |
-
resnet_act_fn: str = "swish",
|
219 |
-
resnet_groups: int = 32,
|
220 |
-
resnet_pre_norm: bool = True,
|
221 |
-
attn_num_head_channels=1,
|
222 |
-
cross_attention_dim=1280,
|
223 |
-
output_scale_factor=1.0,
|
224 |
-
downsample_padding=1,
|
225 |
-
add_downsample=True,
|
226 |
-
dual_cross_attention=False,
|
227 |
-
use_linear_projection=False,
|
228 |
-
only_cross_attention=False,
|
229 |
-
upcast_attention=False,
|
230 |
-
):
|
231 |
-
super().__init__()
|
232 |
-
resnets = []
|
233 |
-
attentions = []
|
234 |
-
|
235 |
-
self.has_cross_attention = True
|
236 |
-
self.attn_num_head_channels = attn_num_head_channels
|
237 |
-
|
238 |
-
for i in range(num_layers):
|
239 |
-
in_channels = in_channels if i == 0 else out_channels
|
240 |
-
resnets.append(
|
241 |
-
ResnetBlock3D(
|
242 |
-
in_channels=in_channels,
|
243 |
-
out_channels=out_channels,
|
244 |
-
temb_channels=temb_channels,
|
245 |
-
eps=resnet_eps,
|
246 |
-
groups=resnet_groups,
|
247 |
-
dropout=dropout,
|
248 |
-
time_embedding_norm=resnet_time_scale_shift,
|
249 |
-
non_linearity=resnet_act_fn,
|
250 |
-
output_scale_factor=output_scale_factor,
|
251 |
-
pre_norm=resnet_pre_norm,
|
252 |
-
)
|
253 |
-
)
|
254 |
-
if dual_cross_attention:
|
255 |
-
raise NotImplementedError
|
256 |
-
attentions.append(
|
257 |
-
Transformer3DModel(
|
258 |
-
attn_num_head_channels,
|
259 |
-
out_channels // attn_num_head_channels,
|
260 |
-
in_channels=out_channels,
|
261 |
-
num_layers=1,
|
262 |
-
cross_attention_dim=cross_attention_dim,
|
263 |
-
norm_num_groups=resnet_groups,
|
264 |
-
use_linear_projection=use_linear_projection,
|
265 |
-
only_cross_attention=only_cross_attention,
|
266 |
-
upcast_attention=upcast_attention,
|
267 |
-
)
|
268 |
-
)
|
269 |
-
self.attentions = nn.ModuleList(attentions)
|
270 |
-
self.resnets = nn.ModuleList(resnets)
|
271 |
-
|
272 |
-
if add_downsample:
|
273 |
-
self.downsamplers = nn.ModuleList(
|
274 |
-
[
|
275 |
-
Downsample3D(
|
276 |
-
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
|
277 |
-
)
|
278 |
-
]
|
279 |
-
)
|
280 |
-
else:
|
281 |
-
self.downsamplers = None
|
282 |
-
|
283 |
-
self.gradient_checkpointing = False
|
284 |
-
|
285 |
-
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
|
286 |
-
output_states = ()
|
287 |
-
|
288 |
-
for resnet, attn in zip(self.resnets, self.attentions):
|
289 |
-
if self.training and self.gradient_checkpointing:
|
290 |
-
|
291 |
-
def create_custom_forward(module, return_dict=None):
|
292 |
-
def custom_forward(*inputs):
|
293 |
-
if return_dict is not None:
|
294 |
-
return module(*inputs, return_dict=return_dict)
|
295 |
-
else:
|
296 |
-
return module(*inputs)
|
297 |
-
|
298 |
-
return custom_forward
|
299 |
-
|
300 |
-
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
|
301 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
302 |
-
create_custom_forward(attn, return_dict=False),
|
303 |
-
hidden_states,
|
304 |
-
encoder_hidden_states,
|
305 |
-
)[0]
|
306 |
-
else:
|
307 |
-
hidden_states = resnet(hidden_states, temb)
|
308 |
-
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
|
309 |
-
|
310 |
-
output_states += (hidden_states,)
|
311 |
-
|
312 |
-
if self.downsamplers is not None:
|
313 |
-
for downsampler in self.downsamplers:
|
314 |
-
hidden_states = downsampler(hidden_states)
|
315 |
-
|
316 |
-
output_states += (hidden_states,)
|
317 |
-
|
318 |
-
return hidden_states, output_states
|
319 |
-
|
320 |
-
|
321 |
-
class DownBlock3D(nn.Module):
|
322 |
-
def __init__(
|
323 |
-
self,
|
324 |
-
in_channels: int,
|
325 |
-
out_channels: int,
|
326 |
-
temb_channels: int,
|
327 |
-
dropout: float = 0.0,
|
328 |
-
num_layers: int = 1,
|
329 |
-
resnet_eps: float = 1e-6,
|
330 |
-
resnet_time_scale_shift: str = "default",
|
331 |
-
resnet_act_fn: str = "swish",
|
332 |
-
resnet_groups: int = 32,
|
333 |
-
resnet_pre_norm: bool = True,
|
334 |
-
output_scale_factor=1.0,
|
335 |
-
add_downsample=True,
|
336 |
-
downsample_padding=1,
|
337 |
-
):
|
338 |
-
super().__init__()
|
339 |
-
resnets = []
|
340 |
-
|
341 |
-
for i in range(num_layers):
|
342 |
-
in_channels = in_channels if i == 0 else out_channels
|
343 |
-
resnets.append(
|
344 |
-
ResnetBlock3D(
|
345 |
-
in_channels=in_channels,
|
346 |
-
out_channels=out_channels,
|
347 |
-
temb_channels=temb_channels,
|
348 |
-
eps=resnet_eps,
|
349 |
-
groups=resnet_groups,
|
350 |
-
dropout=dropout,
|
351 |
-
time_embedding_norm=resnet_time_scale_shift,
|
352 |
-
non_linearity=resnet_act_fn,
|
353 |
-
output_scale_factor=output_scale_factor,
|
354 |
-
pre_norm=resnet_pre_norm,
|
355 |
-
)
|
356 |
-
)
|
357 |
-
|
358 |
-
self.resnets = nn.ModuleList(resnets)
|
359 |
-
|
360 |
-
if add_downsample:
|
361 |
-
self.downsamplers = nn.ModuleList(
|
362 |
-
[
|
363 |
-
Downsample3D(
|
364 |
-
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
|
365 |
-
)
|
366 |
-
]
|
367 |
-
)
|
368 |
-
else:
|
369 |
-
self.downsamplers = None
|
370 |
-
|
371 |
-
self.gradient_checkpointing = False
|
372 |
-
|
373 |
-
def forward(self, hidden_states, temb=None):
|
374 |
-
output_states = ()
|
375 |
-
|
376 |
-
for resnet in self.resnets:
|
377 |
-
if self.training and self.gradient_checkpointing:
|
378 |
-
|
379 |
-
def create_custom_forward(module):
|
380 |
-
def custom_forward(*inputs):
|
381 |
-
return module(*inputs)
|
382 |
-
|
383 |
-
return custom_forward
|
384 |
-
|
385 |
-
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
|
386 |
-
else:
|
387 |
-
hidden_states = resnet(hidden_states, temb)
|
388 |
-
|
389 |
-
output_states += (hidden_states,)
|
390 |
-
|
391 |
-
if self.downsamplers is not None:
|
392 |
-
for downsampler in self.downsamplers:
|
393 |
-
hidden_states = downsampler(hidden_states)
|
394 |
-
|
395 |
-
output_states += (hidden_states,)
|
396 |
-
|
397 |
-
return hidden_states, output_states
|
398 |
-
|
399 |
-
|
400 |
-
class CrossAttnUpBlock3D(nn.Module):
|
401 |
-
def __init__(
|
402 |
-
self,
|
403 |
-
in_channels: int,
|
404 |
-
out_channels: int,
|
405 |
-
prev_output_channel: int,
|
406 |
-
temb_channels: int,
|
407 |
-
dropout: float = 0.0,
|
408 |
-
num_layers: int = 1,
|
409 |
-
resnet_eps: float = 1e-6,
|
410 |
-
resnet_time_scale_shift: str = "default",
|
411 |
-
resnet_act_fn: str = "swish",
|
412 |
-
resnet_groups: int = 32,
|
413 |
-
resnet_pre_norm: bool = True,
|
414 |
-
attn_num_head_channels=1,
|
415 |
-
cross_attention_dim=1280,
|
416 |
-
output_scale_factor=1.0,
|
417 |
-
add_upsample=True,
|
418 |
-
dual_cross_attention=False,
|
419 |
-
use_linear_projection=False,
|
420 |
-
only_cross_attention=False,
|
421 |
-
upcast_attention=False,
|
422 |
-
):
|
423 |
-
super().__init__()
|
424 |
-
resnets = []
|
425 |
-
attentions = []
|
426 |
-
|
427 |
-
self.has_cross_attention = True
|
428 |
-
self.attn_num_head_channels = attn_num_head_channels
|
429 |
-
|
430 |
-
for i in range(num_layers):
|
431 |
-
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
|
432 |
-
resnet_in_channels = prev_output_channel if i == 0 else out_channels
|
433 |
-
|
434 |
-
resnets.append(
|
435 |
-
ResnetBlock3D(
|
436 |
-
in_channels=resnet_in_channels + res_skip_channels,
|
437 |
-
out_channels=out_channels,
|
438 |
-
temb_channels=temb_channels,
|
439 |
-
eps=resnet_eps,
|
440 |
-
groups=resnet_groups,
|
441 |
-
dropout=dropout,
|
442 |
-
time_embedding_norm=resnet_time_scale_shift,
|
443 |
-
non_linearity=resnet_act_fn,
|
444 |
-
output_scale_factor=output_scale_factor,
|
445 |
-
pre_norm=resnet_pre_norm,
|
446 |
-
)
|
447 |
-
)
|
448 |
-
if dual_cross_attention:
|
449 |
-
raise NotImplementedError
|
450 |
-
attentions.append(
|
451 |
-
Transformer3DModel(
|
452 |
-
attn_num_head_channels,
|
453 |
-
out_channels // attn_num_head_channels,
|
454 |
-
in_channels=out_channels,
|
455 |
-
num_layers=1,
|
456 |
-
cross_attention_dim=cross_attention_dim,
|
457 |
-
norm_num_groups=resnet_groups,
|
458 |
-
use_linear_projection=use_linear_projection,
|
459 |
-
only_cross_attention=only_cross_attention,
|
460 |
-
upcast_attention=upcast_attention,
|
461 |
-
)
|
462 |
-
)
|
463 |
-
|
464 |
-
self.attentions = nn.ModuleList(attentions)
|
465 |
-
self.resnets = nn.ModuleList(resnets)
|
466 |
-
|
467 |
-
if add_upsample:
|
468 |
-
self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
|
469 |
-
else:
|
470 |
-
self.upsamplers = None
|
471 |
-
|
472 |
-
self.gradient_checkpointing = False
|
473 |
-
|
474 |
-
def forward(
|
475 |
-
self,
|
476 |
-
hidden_states,
|
477 |
-
res_hidden_states_tuple,
|
478 |
-
temb=None,
|
479 |
-
encoder_hidden_states=None,
|
480 |
-
upsample_size=None,
|
481 |
-
attention_mask=None,
|
482 |
-
):
|
483 |
-
for resnet, attn in zip(self.resnets, self.attentions):
|
484 |
-
# pop res hidden states
|
485 |
-
res_hidden_states = res_hidden_states_tuple[-1]
|
486 |
-
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
487 |
-
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
488 |
-
|
489 |
-
if self.training and self.gradient_checkpointing:
|
490 |
-
|
491 |
-
def create_custom_forward(module, return_dict=None):
|
492 |
-
def custom_forward(*inputs):
|
493 |
-
if return_dict is not None:
|
494 |
-
return module(*inputs, return_dict=return_dict)
|
495 |
-
else:
|
496 |
-
return module(*inputs)
|
497 |
-
|
498 |
-
return custom_forward
|
499 |
-
|
500 |
-
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
|
501 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
502 |
-
create_custom_forward(attn, return_dict=False),
|
503 |
-
hidden_states,
|
504 |
-
encoder_hidden_states,
|
505 |
-
)[0]
|
506 |
-
else:
|
507 |
-
hidden_states = resnet(hidden_states, temb)
|
508 |
-
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
|
509 |
-
|
510 |
-
if self.upsamplers is not None:
|
511 |
-
for upsampler in self.upsamplers:
|
512 |
-
hidden_states = upsampler(hidden_states, upsample_size)
|
513 |
-
|
514 |
-
return hidden_states
|
515 |
-
|
516 |
-
|
517 |
-
class UpBlock3D(nn.Module):
|
518 |
-
def __init__(
|
519 |
-
self,
|
520 |
-
in_channels: int,
|
521 |
-
prev_output_channel: int,
|
522 |
-
out_channels: int,
|
523 |
-
temb_channels: int,
|
524 |
-
dropout: float = 0.0,
|
525 |
-
num_layers: int = 1,
|
526 |
-
resnet_eps: float = 1e-6,
|
527 |
-
resnet_time_scale_shift: str = "default",
|
528 |
-
resnet_act_fn: str = "swish",
|
529 |
-
resnet_groups: int = 32,
|
530 |
-
resnet_pre_norm: bool = True,
|
531 |
-
output_scale_factor=1.0,
|
532 |
-
add_upsample=True,
|
533 |
-
):
|
534 |
-
super().__init__()
|
535 |
-
resnets = []
|
536 |
-
|
537 |
-
for i in range(num_layers):
|
538 |
-
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
|
539 |
-
resnet_in_channels = prev_output_channel if i == 0 else out_channels
|
540 |
-
|
541 |
-
resnets.append(
|
542 |
-
ResnetBlock3D(
|
543 |
-
in_channels=resnet_in_channels + res_skip_channels,
|
544 |
-
out_channels=out_channels,
|
545 |
-
temb_channels=temb_channels,
|
546 |
-
eps=resnet_eps,
|
547 |
-
groups=resnet_groups,
|
548 |
-
dropout=dropout,
|
549 |
-
time_embedding_norm=resnet_time_scale_shift,
|
550 |
-
non_linearity=resnet_act_fn,
|
551 |
-
output_scale_factor=output_scale_factor,
|
552 |
-
pre_norm=resnet_pre_norm,
|
553 |
-
)
|
554 |
-
)
|
555 |
-
|
556 |
-
self.resnets = nn.ModuleList(resnets)
|
557 |
-
|
558 |
-
if add_upsample:
|
559 |
-
self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
|
560 |
-
else:
|
561 |
-
self.upsamplers = None
|
562 |
-
|
563 |
-
self.gradient_checkpointing = False
|
564 |
-
|
565 |
-
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
|
566 |
-
for resnet in self.resnets:
|
567 |
-
# pop res hidden states
|
568 |
-
res_hidden_states = res_hidden_states_tuple[-1]
|
569 |
-
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
570 |
-
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
571 |
-
|
572 |
-
if self.training and self.gradient_checkpointing:
|
573 |
-
|
574 |
-
def create_custom_forward(module):
|
575 |
-
def custom_forward(*inputs):
|
576 |
-
return module(*inputs)
|
577 |
-
|
578 |
-
return custom_forward
|
579 |
-
|
580 |
-
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
|
581 |
-
else:
|
582 |
-
hidden_states = resnet(hidden_states, temb)
|
583 |
-
|
584 |
-
if self.upsamplers is not None:
|
585 |
-
for upsampler in self.upsamplers:
|
586 |
-
hidden_states = upsampler(hidden_states, upsample_size)
|
587 |
-
|
588 |
-
return hidden_states
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/models.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
"""Utilities for defining models
|
2 |
-
"""
|
3 |
-
|
4 |
-
import operator
|
5 |
-
from typing import Any, Callable, Type
|
6 |
-
|
7 |
-
|
8 |
-
class KeyBasedCompareMixin:
|
9 |
-
"""Provides comparison capabilities that is based on a key"""
|
10 |
-
|
11 |
-
__slots__ = ["_compare_key", "_defining_class"]
|
12 |
-
|
13 |
-
def __init__(self, key: Any, defining_class: Type["KeyBasedCompareMixin"]) -> None:
|
14 |
-
self._compare_key = key
|
15 |
-
self._defining_class = defining_class
|
16 |
-
|
17 |
-
def __hash__(self) -> int:
|
18 |
-
return hash(self._compare_key)
|
19 |
-
|
20 |
-
def __lt__(self, other: Any) -> bool:
|
21 |
-
return self._compare(other, operator.__lt__)
|
22 |
-
|
23 |
-
def __le__(self, other: Any) -> bool:
|
24 |
-
return self._compare(other, operator.__le__)
|
25 |
-
|
26 |
-
def __gt__(self, other: Any) -> bool:
|
27 |
-
return self._compare(other, operator.__gt__)
|
28 |
-
|
29 |
-
def __ge__(self, other: Any) -> bool:
|
30 |
-
return self._compare(other, operator.__ge__)
|
31 |
-
|
32 |
-
def __eq__(self, other: Any) -> bool:
|
33 |
-
return self._compare(other, operator.__eq__)
|
34 |
-
|
35 |
-
def _compare(self, other: Any, method: Callable[[Any, Any], bool]) -> bool:
|
36 |
-
if not isinstance(other, self._defining_class):
|
37 |
-
return NotImplemented
|
38 |
-
|
39 |
-
return method(self._compare_key, other._compare_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/before_sleep.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
# Copyright 2016 Julien Danjou
|
2 |
-
# Copyright 2016 Joshua Harlow
|
3 |
-
# Copyright 2013-2014 Ray Holder
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
|
17 |
-
import typing
|
18 |
-
|
19 |
-
from pip._vendor.tenacity import _utils
|
20 |
-
|
21 |
-
if typing.TYPE_CHECKING:
|
22 |
-
import logging
|
23 |
-
|
24 |
-
from pip._vendor.tenacity import RetryCallState
|
25 |
-
|
26 |
-
|
27 |
-
def before_sleep_nothing(retry_state: "RetryCallState") -> None:
|
28 |
-
"""Before call strategy that does nothing."""
|
29 |
-
|
30 |
-
|
31 |
-
def before_sleep_log(
|
32 |
-
logger: "logging.Logger",
|
33 |
-
log_level: int,
|
34 |
-
exc_info: bool = False,
|
35 |
-
) -> typing.Callable[["RetryCallState"], None]:
|
36 |
-
"""Before call strategy that logs to some logger the attempt."""
|
37 |
-
|
38 |
-
def log_it(retry_state: "RetryCallState") -> None:
|
39 |
-
local_exc_info: BaseException | bool | None
|
40 |
-
|
41 |
-
if retry_state.outcome is None:
|
42 |
-
raise RuntimeError("log_it() called before outcome was set")
|
43 |
-
|
44 |
-
if retry_state.next_action is None:
|
45 |
-
raise RuntimeError("log_it() called before next_action was set")
|
46 |
-
|
47 |
-
if retry_state.outcome.failed:
|
48 |
-
ex = retry_state.outcome.exception()
|
49 |
-
verb, value = "raised", f"{ex.__class__.__name__}: {ex}"
|
50 |
-
|
51 |
-
if exc_info:
|
52 |
-
local_exc_info = retry_state.outcome.exception()
|
53 |
-
else:
|
54 |
-
local_exc_info = False
|
55 |
-
else:
|
56 |
-
verb, value = "returned", retry_state.outcome.result()
|
57 |
-
local_exc_info = False # exc_info does not apply when no exception
|
58 |
-
|
59 |
-
if retry_state.fn is None:
|
60 |
-
# NOTE(sileht): can't really happen, but we must please mypy
|
61 |
-
fn_name = "<unknown>"
|
62 |
-
else:
|
63 |
-
fn_name = _utils.get_callback_name(retry_state.fn)
|
64 |
-
|
65 |
-
logger.log(
|
66 |
-
log_level,
|
67 |
-
f"Retrying {fn_name} " f"in {retry_state.next_action.sleep} seconds as it {verb} {value}.",
|
68 |
-
exc_info=local_exc_info,
|
69 |
-
)
|
70 |
-
|
71 |
-
return log_it
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Autopixel/blurry-faces/README.md
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Blurry Faces
|
3 |
-
emoji: 🙈
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
license: apache-2.0
|
10 |
-
duplicated_from: frapochetti/blurry-faces
|
11 |
-
---
|
12 |
-
|
13 |
-
# Configuration
|
14 |
-
|
15 |
-
`title`: _string_
|
16 |
-
Display title for the Space
|
17 |
-
|
18 |
-
`emoji`: _string_
|
19 |
-
Space emoji (emoji-only character allowed)
|
20 |
-
|
21 |
-
`colorFrom`: _string_
|
22 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
23 |
-
|
24 |
-
`colorTo`: _string_
|
25 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
26 |
-
|
27 |
-
`sdk`: _string_
|
28 |
-
Can be either `gradio`, `streamlit`, or `static`
|
29 |
-
|
30 |
-
`sdk_version` : _string_
|
31 |
-
Only applicable for `streamlit` SDK.
|
32 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
33 |
-
|
34 |
-
`app_file`: _string_
|
35 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
|
36 |
-
Path is relative to the root of the repository.
|
37 |
-
|
38 |
-
`models`: _List[string]_
|
39 |
-
HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
|
40 |
-
Will be parsed automatically from your code if not specified here.
|
41 |
-
|
42 |
-
`datasets`: _List[string]_
|
43 |
-
HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
|
44 |
-
Will be parsed automatically from your code if not specified here.
|
45 |
-
|
46 |
-
`pinned`: _boolean_
|
47 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/editings/sefa.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import numpy as np
|
3 |
-
from tqdm import tqdm
|
4 |
-
|
5 |
-
|
6 |
-
def edit(generator, latents, indices, semantics=1, start_distance=-15.0, end_distance=15.0, num_samples=1, step=11):
|
7 |
-
|
8 |
-
layers, boundaries, values = factorize_weight(generator, indices)
|
9 |
-
codes = latents.detach().cpu().numpy() # (1,18,512)
|
10 |
-
|
11 |
-
# Generate visualization pages.
|
12 |
-
distances = np.linspace(start_distance, end_distance, step)
|
13 |
-
num_sam = num_samples
|
14 |
-
num_sem = semantics
|
15 |
-
|
16 |
-
edited_latents = []
|
17 |
-
for sem_id in tqdm(range(num_sem), desc='Semantic ', leave=False):
|
18 |
-
boundary = boundaries[sem_id:sem_id + 1]
|
19 |
-
for sam_id in tqdm(range(num_sam), desc='Sample ', leave=False):
|
20 |
-
code = codes[sam_id:sam_id + 1]
|
21 |
-
for col_id, d in enumerate(distances, start=1):
|
22 |
-
temp_code = code.copy()
|
23 |
-
temp_code[:, layers, :] += boundary * d
|
24 |
-
edited_latents.append(torch.from_numpy(temp_code).float().cuda())
|
25 |
-
return torch.cat(edited_latents)
|
26 |
-
|
27 |
-
|
28 |
-
def factorize_weight(g_ema, layers='all'):
|
29 |
-
|
30 |
-
weights = []
|
31 |
-
if layers == 'all' or 0 in layers:
|
32 |
-
weight = g_ema.conv1.conv.modulation.weight.T
|
33 |
-
weights.append(weight.cpu().detach().numpy())
|
34 |
-
|
35 |
-
if layers == 'all':
|
36 |
-
layers = list(range(g_ema.num_layers - 1))
|
37 |
-
else:
|
38 |
-
layers = [l - 1 for l in layers if l != 0]
|
39 |
-
|
40 |
-
for idx in layers:
|
41 |
-
weight = g_ema.convs[idx].conv.modulation.weight.T
|
42 |
-
weights.append(weight.cpu().detach().numpy())
|
43 |
-
weight = np.concatenate(weights, axis=1).astype(np.float32)
|
44 |
-
weight = weight / np.linalg.norm(weight, axis=0, keepdims=True)
|
45 |
-
eigen_values, eigen_vectors = np.linalg.eig(weight.dot(weight.T))
|
46 |
-
return layers, eigen_vectors.T, eigen_values
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BAAI/AltDiffusion/style.css
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
.gradio-container {
|
2 |
-
font-family: 'IBM Plex Sans', sans-serif;
|
3 |
-
}
|
4 |
-
.gr-button {
|
5 |
-
color: white;
|
6 |
-
/* border-color: black; */
|
7 |
-
/* background: black; */
|
8 |
-
background: rgb(60, 145, 238);
|
9 |
-
}
|
10 |
-
/* input[type='range'] {
|
11 |
-
accent-color: rgb(60, 145, 238);
|
12 |
-
}
|
13 |
-
.dark input[type='range'] {
|
14 |
-
accent-color: #dfdfdf;
|
15 |
-
} */
|
16 |
-
.container {
|
17 |
-
max-width: 900px;
|
18 |
-
margin: auto;
|
19 |
-
padding-top: 1.5rem;
|
20 |
-
}
|
21 |
-
#gallery {
|
22 |
-
min-height: 22rem;
|
23 |
-
margin-bottom: 15px;
|
24 |
-
margin-left: auto;
|
25 |
-
margin-right: auto;
|
26 |
-
border-bottom-right-radius: .5rem !important;
|
27 |
-
border-bottom-left-radius: .5rem !important;
|
28 |
-
}
|
29 |
-
#gallery>div>.h-full {
|
30 |
-
min-height: 20rem;
|
31 |
-
}
|
32 |
-
.details:hover {
|
33 |
-
text-decoration: underline;
|
34 |
-
}
|
35 |
-
.gr-button {
|
36 |
-
white-space: nowrap;
|
37 |
-
}
|
38 |
-
/* .gr-button:focus {
|
39 |
-
border-color: rgb(147 197 253 / var(--tw-border-opacity));
|
40 |
-
outline: none;
|
41 |
-
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
42 |
-
--tw-border-opacity: 1;
|
43 |
-
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
44 |
-
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
45 |
-
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
|
46 |
-
--tw-ring-opacity: .5;
|
47 |
-
} */
|
48 |
-
.footer {
|
49 |
-
margin-bottom: 45px;
|
50 |
-
margin-top: 20px;
|
51 |
-
/* text-align: center; */
|
52 |
-
border-bottom: 1px solid #e5e5e5;
|
53 |
-
}
|
54 |
-
.footer>p {
|
55 |
-
font-size: .8rem;
|
56 |
-
display: inline-block;
|
57 |
-
padding: 0 10px;
|
58 |
-
transform: translateY(10px);
|
59 |
-
background: white;
|
60 |
-
}
|
61 |
-
.footer>p>h4 {
|
62 |
-
font-size: .20rem;
|
63 |
-
display: inline-block;
|
64 |
-
padding: 0 10px;
|
65 |
-
transform: translateY(10px);
|
66 |
-
background: white;
|
67 |
-
font-weight: bold;
|
68 |
-
}
|
69 |
-
.dark .footer {
|
70 |
-
/* border-color: #303030; */
|
71 |
-
border-color: rgb(60, 145, 238);
|
72 |
-
}
|
73 |
-
.dark .footer>p {
|
74 |
-
/* background: #0b0f19; */
|
75 |
-
background: rgb(60, 145, 238);
|
76 |
-
}
|
77 |
-
.prompt h4{
|
78 |
-
margin: 1.25em 0 .25em 0;
|
79 |
-
font-weight: bold;
|
80 |
-
font-size: 115%;
|
81 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/julius/resample.py
DELETED
@@ -1,216 +0,0 @@
|
|
1 |
-
# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
|
2 |
-
# Author: adefossez, 2020
|
3 |
-
"""
|
4 |
-
Differentiable, Pytorch based resampling.
|
5 |
-
Implementation of Julius O. Smith algorithm for resampling.
|
6 |
-
See https://ccrma.stanford.edu/~jos/resample/ for details.
|
7 |
-
This implementation is specially optimized for when new_sr / old_sr is a fraction
|
8 |
-
with a small numerator and denominator when removing the gcd (e.g. new_sr = 700, old_sr = 500).
|
9 |
-
|
10 |
-
Very similar to [bmcfee/resampy](https://github.com/bmcfee/resampy) except this implementation
|
11 |
-
is optimized for the case mentioned before, while resampy is slower but more general.
|
12 |
-
|
13 |
-
"""
|
14 |
-
|
15 |
-
import math
|
16 |
-
from typing import Optional
|
17 |
-
|
18 |
-
import torch
|
19 |
-
from torch.nn import functional as F
|
20 |
-
|
21 |
-
from .core import sinc
|
22 |
-
from .utils import simple_repr
|
23 |
-
|
24 |
-
|
25 |
-
class ResampleFrac(torch.nn.Module):
|
26 |
-
"""
|
27 |
-
Resampling from the sample rate `old_sr` to `new_sr`.
|
28 |
-
"""
|
29 |
-
def __init__(self, old_sr: int, new_sr: int, zeros: int = 24, rolloff: float = 0.945):
|
30 |
-
"""
|
31 |
-
Args:
|
32 |
-
old_sr (int): sample rate of the input signal x.
|
33 |
-
new_sr (int): sample rate of the output.
|
34 |
-
zeros (int): number of zero crossing to keep in the sinc filter.
|
35 |
-
rolloff (float): use a lowpass filter that is `rolloff * new_sr / 2`,
|
36 |
-
to ensure sufficient margin due to the imperfection of the FIR filter used.
|
37 |
-
Lowering this value will reduce anti-aliasing, but will reduce some of the
|
38 |
-
highest frequencies.
|
39 |
-
|
40 |
-
Shape:
|
41 |
-
|
42 |
-
- Input: `[*, T]`
|
43 |
-
- Output: `[*, T']` with `T' = int(new_sr * T / old_sr)
|
44 |
-
|
45 |
-
|
46 |
-
.. caution::
|
47 |
-
After dividing `old_sr` and `new_sr` by their GCD, both should be small
|
48 |
-
for this implementation to be fast.
|
49 |
-
|
50 |
-
>>> import torch
|
51 |
-
>>> resample = ResampleFrac(4, 5)
|
52 |
-
>>> x = torch.randn(1000)
|
53 |
-
>>> print(len(resample(x)))
|
54 |
-
1250
|
55 |
-
"""
|
56 |
-
super().__init__()
|
57 |
-
if not isinstance(old_sr, int) or not isinstance(new_sr, int):
|
58 |
-
raise ValueError("old_sr and new_sr should be integers")
|
59 |
-
gcd = math.gcd(old_sr, new_sr)
|
60 |
-
self.old_sr = old_sr // gcd
|
61 |
-
self.new_sr = new_sr // gcd
|
62 |
-
self.zeros = zeros
|
63 |
-
self.rolloff = rolloff
|
64 |
-
|
65 |
-
self._init_kernels()
|
66 |
-
|
67 |
-
def _init_kernels(self):
|
68 |
-
if self.old_sr == self.new_sr:
|
69 |
-
return
|
70 |
-
|
71 |
-
kernels = []
|
72 |
-
sr = min(self.new_sr, self.old_sr)
|
73 |
-
# rolloff will perform antialiasing filtering by removing the highest frequencies.
|
74 |
-
# At first I thought I only needed this when downsampling, but when upsampling
|
75 |
-
# you will get edge artifacts without this, the edge is equivalent to zero padding,
|
76 |
-
# which will add high freq artifacts.
|
77 |
-
sr *= self.rolloff
|
78 |
-
|
79 |
-
# The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor)
|
80 |
-
# using the sinc interpolation formula:
|
81 |
-
# x(t) = sum_i x[i] sinc(pi * old_sr * (i / old_sr - t))
|
82 |
-
# We can then sample the function x(t) with a different sample rate:
|
83 |
-
# y[j] = x(j / new_sr)
|
84 |
-
# or,
|
85 |
-
# y[j] = sum_i x[i] sinc(pi * old_sr * (i / old_sr - j / new_sr))
|
86 |
-
|
87 |
-
# We see here that y[j] is the convolution of x[i] with a specific filter, for which
|
88 |
-
# we take an FIR approximation, stopping when we see at least `zeros` zeros crossing.
|
89 |
-
# But y[j+1] is going to have a different set of weights and so on, until y[j + new_sr].
|
90 |
-
# Indeed:
|
91 |
-
# y[j + new_sr] = sum_i x[i] sinc(pi * old_sr * ((i / old_sr - (j + new_sr) / new_sr))
|
92 |
-
# = sum_i x[i] sinc(pi * old_sr * ((i - old_sr) / old_sr - j / new_sr))
|
93 |
-
# = sum_i x[i + old_sr] sinc(pi * old_sr * (i / old_sr - j / new_sr))
|
94 |
-
# so y[j+new_sr] uses the same filter as y[j], but on a shifted version of x by `old_sr`.
|
95 |
-
# This will explain the F.conv1d after, with a stride of old_sr.
|
96 |
-
self._width = math.ceil(self.zeros * self.old_sr / sr)
|
97 |
-
# If old_sr is still big after GCD reduction, most filters will be very unbalanced, i.e.,
|
98 |
-
# they will have a lot of almost zero values to the left or to the right...
|
99 |
-
# There is probably a way to evaluate those filters more efficiently, but this is kept for
|
100 |
-
# future work.
|
101 |
-
idx = torch.arange(-self._width, self._width + self.old_sr).float()
|
102 |
-
for i in range(self.new_sr):
|
103 |
-
t = (-i/self.new_sr + idx/self.old_sr) * sr
|
104 |
-
t = t.clamp_(-self.zeros, self.zeros)
|
105 |
-
t *= math.pi
|
106 |
-
window = torch.cos(t/self.zeros/2)**2
|
107 |
-
kernel = sinc(t) * window
|
108 |
-
# Renormalize kernel to ensure a constant signal is preserved.
|
109 |
-
kernel.div_(kernel.sum())
|
110 |
-
kernels.append(kernel)
|
111 |
-
|
112 |
-
self.register_buffer("kernel", torch.stack(kernels).view(self.new_sr, 1, -1))
|
113 |
-
|
114 |
-
def forward(self, x: torch.Tensor, output_length: Optional[int] = None, full: bool = False):
|
115 |
-
"""
|
116 |
-
Resample x.
|
117 |
-
Args:
|
118 |
-
x (Tensor): signal to resample, time should be the last dimension
|
119 |
-
output_length (None or int): This can be set to the desired output length
|
120 |
-
(last dimension). Allowed values are between 0 and
|
121 |
-
ceil(length * new_sr / old_sr). When None (default) is specified, the
|
122 |
-
floored output length will be used. In order to select the largest possible
|
123 |
-
size, use the `full` argument.
|
124 |
-
full (bool): return the longest possible output from the input. This can be useful
|
125 |
-
if you chain resampling operations, and want to give the `output_length` only
|
126 |
-
for the last one, while passing `full=True` to all the other ones.
|
127 |
-
"""
|
128 |
-
if self.old_sr == self.new_sr:
|
129 |
-
return x
|
130 |
-
shape = x.shape
|
131 |
-
length = x.shape[-1]
|
132 |
-
x = x.reshape(-1, length)
|
133 |
-
x = F.pad(x[:, None], (self._width, self._width + self.old_sr), mode='replicate')
|
134 |
-
ys = F.conv1d(x, self.kernel, stride=self.old_sr) # type: ignore
|
135 |
-
y = ys.transpose(1, 2).reshape(list(shape[:-1]) + [-1])
|
136 |
-
|
137 |
-
float_output_length = self.new_sr * length / self.old_sr
|
138 |
-
max_output_length = int(math.ceil(float_output_length))
|
139 |
-
default_output_length = int(float_output_length)
|
140 |
-
if output_length is None:
|
141 |
-
output_length = max_output_length if full else default_output_length
|
142 |
-
elif output_length < 0 or output_length > max_output_length:
|
143 |
-
raise ValueError(f"output_length must be between 0 and {max_output_length}")
|
144 |
-
else:
|
145 |
-
if full:
|
146 |
-
raise ValueError("You cannot pass both full=True and output_length")
|
147 |
-
return y[..., :output_length]
|
148 |
-
|
149 |
-
def __repr__(self):
|
150 |
-
return simple_repr(self)
|
151 |
-
|
152 |
-
|
153 |
-
def resample_frac(x: torch.Tensor, old_sr: int, new_sr: int,
|
154 |
-
zeros: int = 24, rolloff: float = 0.945,
|
155 |
-
output_length: Optional[int] = None, full: bool = False):
|
156 |
-
"""
|
157 |
-
Functional version of `ResampleFrac`, refer to its documentation for more information.
|
158 |
-
|
159 |
-
..warning::
|
160 |
-
If you call repeatidly this functions with the same sample rates, then the
|
161 |
-
resampling kernel will be recomputed everytime. For best performance, you should use
|
162 |
-
and cache an instance of `ResampleFrac`.
|
163 |
-
"""
|
164 |
-
return ResampleFrac(old_sr, new_sr, zeros, rolloff).to(x)(x, output_length, full)
|
165 |
-
|
166 |
-
|
167 |
-
# Easier implementations for downsampling and upsampling by a factor of 2
|
168 |
-
# Kept for testing and reference
|
169 |
-
|
170 |
-
def _kernel_upsample2_downsample2(zeros):
|
171 |
-
# Kernel for upsampling and downsampling by a factor of 2. Interestingly,
|
172 |
-
# it is the same kernel used for both.
|
173 |
-
win = torch.hann_window(4 * zeros + 1, periodic=False)
|
174 |
-
winodd = win[1::2]
|
175 |
-
t = torch.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
|
176 |
-
t *= math.pi
|
177 |
-
kernel = (sinc(t) * winodd).view(1, 1, -1)
|
178 |
-
return kernel
|
179 |
-
|
180 |
-
|
181 |
-
def _upsample2(x, zeros=24):
|
182 |
-
"""
|
183 |
-
Upsample x by a factor of two. The output will be exactly twice as long as the input.
|
184 |
-
Args:
|
185 |
-
x (Tensor): signal to upsample, time should be the last dimension
|
186 |
-
zeros (int): number of zero crossing to keep in the sinc filter.
|
187 |
-
|
188 |
-
This function is kept only for reference, you should use the more generic `resample_frac`
|
189 |
-
one. This function does not perform anti-aliasing filtering.
|
190 |
-
"""
|
191 |
-
*other, time = x.shape
|
192 |
-
kernel = _kernel_upsample2_downsample2(zeros).to(x)
|
193 |
-
out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(*other, time)
|
194 |
-
y = torch.stack([x, out], dim=-1)
|
195 |
-
return y.view(*other, -1)
|
196 |
-
|
197 |
-
|
198 |
-
def _downsample2(x, zeros=24):
|
199 |
-
"""
|
200 |
-
Downsample x by a factor of two. The output length is half of the input, ceiled.
|
201 |
-
Args:
|
202 |
-
x (Tensor): signal to downsample, time should be the last dimension
|
203 |
-
zeros (int): number of zero crossing to keep in the sinc filter.
|
204 |
-
|
205 |
-
This function is kept only for reference, you should use the more generic `resample_frac`
|
206 |
-
one. This function does not perform anti-aliasing filtering.
|
207 |
-
"""
|
208 |
-
if x.shape[-1] % 2 != 0:
|
209 |
-
x = F.pad(x, (0, 1))
|
210 |
-
xeven = x[..., ::2]
|
211 |
-
xodd = x[..., 1::2]
|
212 |
-
*other, time = xodd.shape
|
213 |
-
kernel = _kernel_upsample2_downsample2(zeros).to(x)
|
214 |
-
out = xeven + F.conv1d(xodd.view(-1, 1, time), kernel, padding=zeros)[..., :-1].view(
|
215 |
-
*other, time)
|
216 |
-
return out.view(*other, -1).mul(0.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/__main__.py
DELETED
@@ -1,274 +0,0 @@
|
|
1 |
-
import colorsys
|
2 |
-
import io
|
3 |
-
from time import process_time
|
4 |
-
|
5 |
-
from pip._vendor.rich import box
|
6 |
-
from pip._vendor.rich.color import Color
|
7 |
-
from pip._vendor.rich.console import Console, ConsoleOptions, Group, RenderableType, RenderResult
|
8 |
-
from pip._vendor.rich.markdown import Markdown
|
9 |
-
from pip._vendor.rich.measure import Measurement
|
10 |
-
from pip._vendor.rich.pretty import Pretty
|
11 |
-
from pip._vendor.rich.segment import Segment
|
12 |
-
from pip._vendor.rich.style import Style
|
13 |
-
from pip._vendor.rich.syntax import Syntax
|
14 |
-
from pip._vendor.rich.table import Table
|
15 |
-
from pip._vendor.rich.text import Text
|
16 |
-
|
17 |
-
|
18 |
-
class ColorBox:
|
19 |
-
def __rich_console__(
|
20 |
-
self, console: Console, options: ConsoleOptions
|
21 |
-
) -> RenderResult:
|
22 |
-
for y in range(0, 5):
|
23 |
-
for x in range(options.max_width):
|
24 |
-
h = x / options.max_width
|
25 |
-
l = 0.1 + ((y / 5) * 0.7)
|
26 |
-
r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
|
27 |
-
r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
|
28 |
-
bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
|
29 |
-
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
|
30 |
-
yield Segment("▄", Style(color=color, bgcolor=bgcolor))
|
31 |
-
yield Segment.line()
|
32 |
-
|
33 |
-
def __rich_measure__(
|
34 |
-
self, console: "Console", options: ConsoleOptions
|
35 |
-
) -> Measurement:
|
36 |
-
return Measurement(1, options.max_width)
|
37 |
-
|
38 |
-
|
39 |
-
def make_test_card() -> Table:
|
40 |
-
"""Get a renderable that demonstrates a number of features."""
|
41 |
-
table = Table.grid(padding=1, pad_edge=True)
|
42 |
-
table.title = "Rich features"
|
43 |
-
table.add_column("Feature", no_wrap=True, justify="center", style="bold red")
|
44 |
-
table.add_column("Demonstration")
|
45 |
-
|
46 |
-
color_table = Table(
|
47 |
-
box=None,
|
48 |
-
expand=False,
|
49 |
-
show_header=False,
|
50 |
-
show_edge=False,
|
51 |
-
pad_edge=False,
|
52 |
-
)
|
53 |
-
color_table.add_row(
|
54 |
-
(
|
55 |
-
"✓ [bold green]4-bit color[/]\n"
|
56 |
-
"✓ [bold blue]8-bit color[/]\n"
|
57 |
-
"✓ [bold magenta]Truecolor (16.7 million)[/]\n"
|
58 |
-
"✓ [bold yellow]Dumb terminals[/]\n"
|
59 |
-
"✓ [bold cyan]Automatic color conversion"
|
60 |
-
),
|
61 |
-
ColorBox(),
|
62 |
-
)
|
63 |
-
|
64 |
-
table.add_row("Colors", color_table)
|
65 |
-
|
66 |
-
table.add_row(
|
67 |
-
"Styles",
|
68 |
-
"All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].",
|
69 |
-
)
|
70 |
-
|
71 |
-
lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus."
|
72 |
-
lorem_table = Table.grid(padding=1, collapse_padding=True)
|
73 |
-
lorem_table.pad_edge = False
|
74 |
-
lorem_table.add_row(
|
75 |
-
Text(lorem, justify="left", style="green"),
|
76 |
-
Text(lorem, justify="center", style="yellow"),
|
77 |
-
Text(lorem, justify="right", style="blue"),
|
78 |
-
Text(lorem, justify="full", style="red"),
|
79 |
-
)
|
80 |
-
table.add_row(
|
81 |
-
"Text",
|
82 |
-
Group(
|
83 |
-
Text.from_markup(
|
84 |
-
"""Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n"""
|
85 |
-
),
|
86 |
-
lorem_table,
|
87 |
-
),
|
88 |
-
)
|
89 |
-
|
90 |
-
def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table:
|
91 |
-
table = Table(show_header=False, pad_edge=False, box=None, expand=True)
|
92 |
-
table.add_column("1", ratio=1)
|
93 |
-
table.add_column("2", ratio=1)
|
94 |
-
table.add_row(renderable1, renderable2)
|
95 |
-
return table
|
96 |
-
|
97 |
-
table.add_row(
|
98 |
-
"Asian\nlanguage\nsupport",
|
99 |
-
":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다",
|
100 |
-
)
|
101 |
-
|
102 |
-
markup_example = (
|
103 |
-
"[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! "
|
104 |
-
":+1: :apple: :ant: :bear: :baguette_bread: :bus: "
|
105 |
-
)
|
106 |
-
table.add_row("Markup", markup_example)
|
107 |
-
|
108 |
-
example_table = Table(
|
109 |
-
show_edge=False,
|
110 |
-
show_header=True,
|
111 |
-
expand=False,
|
112 |
-
row_styles=["none", "dim"],
|
113 |
-
box=box.SIMPLE,
|
114 |
-
)
|
115 |
-
example_table.add_column("[green]Date", style="green", no_wrap=True)
|
116 |
-
example_table.add_column("[blue]Title", style="blue")
|
117 |
-
example_table.add_column(
|
118 |
-
"[cyan]Production Budget",
|
119 |
-
style="cyan",
|
120 |
-
justify="right",
|
121 |
-
no_wrap=True,
|
122 |
-
)
|
123 |
-
example_table.add_column(
|
124 |
-
"[magenta]Box Office",
|
125 |
-
style="magenta",
|
126 |
-
justify="right",
|
127 |
-
no_wrap=True,
|
128 |
-
)
|
129 |
-
example_table.add_row(
|
130 |
-
"Dec 20, 2019",
|
131 |
-
"Star Wars: The Rise of Skywalker",
|
132 |
-
"$275,000,000",
|
133 |
-
"$375,126,118",
|
134 |
-
)
|
135 |
-
example_table.add_row(
|
136 |
-
"May 25, 2018",
|
137 |
-
"[b]Solo[/]: A Star Wars Story",
|
138 |
-
"$275,000,000",
|
139 |
-
"$393,151,347",
|
140 |
-
)
|
141 |
-
example_table.add_row(
|
142 |
-
"Dec 15, 2017",
|
143 |
-
"Star Wars Ep. VIII: The Last Jedi",
|
144 |
-
"$262,000,000",
|
145 |
-
"[bold]$1,332,539,889[/bold]",
|
146 |
-
)
|
147 |
-
example_table.add_row(
|
148 |
-
"May 19, 1999",
|
149 |
-
"Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
|
150 |
-
"$115,000,000",
|
151 |
-
"$1,027,044,677",
|
152 |
-
)
|
153 |
-
|
154 |
-
table.add_row("Tables", example_table)
|
155 |
-
|
156 |
-
code = '''\
|
157 |
-
def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
|
158 |
-
"""Iterate and generate a tuple with a flag for last value."""
|
159 |
-
iter_values = iter(values)
|
160 |
-
try:
|
161 |
-
previous_value = next(iter_values)
|
162 |
-
except StopIteration:
|
163 |
-
return
|
164 |
-
for value in iter_values:
|
165 |
-
yield False, previous_value
|
166 |
-
previous_value = value
|
167 |
-
yield True, previous_value'''
|
168 |
-
|
169 |
-
pretty_data = {
|
170 |
-
"foo": [
|
171 |
-
3.1427,
|
172 |
-
(
|
173 |
-
"Paul Atreides",
|
174 |
-
"Vladimir Harkonnen",
|
175 |
-
"Thufir Hawat",
|
176 |
-
),
|
177 |
-
],
|
178 |
-
"atomic": (False, True, None),
|
179 |
-
}
|
180 |
-
table.add_row(
|
181 |
-
"Syntax\nhighlighting\n&\npretty\nprinting",
|
182 |
-
comparison(
|
183 |
-
Syntax(code, "python3", line_numbers=True, indent_guides=True),
|
184 |
-
Pretty(pretty_data, indent_guides=True),
|
185 |
-
),
|
186 |
-
)
|
187 |
-
|
188 |
-
markdown_example = """\
|
189 |
-
# Markdown
|
190 |
-
|
191 |
-
Supports much of the *markdown* __syntax__!
|
192 |
-
|
193 |
-
- Headers
|
194 |
-
- Basic formatting: **bold**, *italic*, `code`
|
195 |
-
- Block quotes
|
196 |
-
- Lists, and more...
|
197 |
-
"""
|
198 |
-
table.add_row(
|
199 |
-
"Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example))
|
200 |
-
)
|
201 |
-
|
202 |
-
table.add_row(
|
203 |
-
"+more!",
|
204 |
-
"""Progress bars, columns, styled logging handler, tracebacks, etc...""",
|
205 |
-
)
|
206 |
-
return table
|
207 |
-
|
208 |
-
|
209 |
-
if __name__ == "__main__": # pragma: no cover
|
210 |
-
|
211 |
-
console = Console(
|
212 |
-
file=io.StringIO(),
|
213 |
-
force_terminal=True,
|
214 |
-
)
|
215 |
-
test_card = make_test_card()
|
216 |
-
|
217 |
-
# Print once to warm cache
|
218 |
-
start = process_time()
|
219 |
-
console.print(test_card)
|
220 |
-
pre_cache_taken = round((process_time() - start) * 1000.0, 1)
|
221 |
-
|
222 |
-
console.file = io.StringIO()
|
223 |
-
|
224 |
-
start = process_time()
|
225 |
-
console.print(test_card)
|
226 |
-
taken = round((process_time() - start) * 1000.0, 1)
|
227 |
-
|
228 |
-
c = Console(record=True)
|
229 |
-
c.print(test_card)
|
230 |
-
|
231 |
-
print(f"rendered in {pre_cache_taken}ms (cold cache)")
|
232 |
-
print(f"rendered in {taken}ms (warm cache)")
|
233 |
-
|
234 |
-
from pip._vendor.rich.panel import Panel
|
235 |
-
|
236 |
-
console = Console()
|
237 |
-
|
238 |
-
sponsor_message = Table.grid(padding=1)
|
239 |
-
sponsor_message.add_column(style="green", justify="right")
|
240 |
-
sponsor_message.add_column(no_wrap=True)
|
241 |
-
|
242 |
-
sponsor_message.add_row(
|
243 |
-
"Textualize",
|
244 |
-
"[u blue link=https://github.com/textualize]https://github.com/textualize",
|
245 |
-
)
|
246 |
-
sponsor_message.add_row(
|
247 |
-
"Twitter",
|
248 |
-
"[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan",
|
249 |
-
)
|
250 |
-
|
251 |
-
intro_message = Text.from_markup(
|
252 |
-
"""\
|
253 |
-
We hope you enjoy using Rich!
|
254 |
-
|
255 |
-
Rich is maintained with [red]:heart:[/] by [link=https://www.textualize.io]Textualize.io[/]
|
256 |
-
|
257 |
-
- Will McGugan"""
|
258 |
-
)
|
259 |
-
|
260 |
-
message = Table.grid(padding=2)
|
261 |
-
message.add_column()
|
262 |
-
message.add_column(no_wrap=True)
|
263 |
-
message.add_row(intro_message, sponsor_message)
|
264 |
-
|
265 |
-
console.print(
|
266 |
-
Panel.fit(
|
267 |
-
message,
|
268 |
-
box=box.ROUNDED,
|
269 |
-
padding=(1, 2),
|
270 |
-
title="[b red]Thanks for trying out Rich!",
|
271 |
-
border_style="bright_blue",
|
272 |
-
),
|
273 |
-
justify="center",
|
274 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/model_download/yolov5_model_p5_all.sh
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
cd ./yolov5
|
2 |
-
|
3 |
-
# 下载YOLOv5模型
|
4 |
-
wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n.pt
|
5 |
-
wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt
|
6 |
-
wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m.pt
|
7 |
-
wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l.pt
|
8 |
-
wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/merge.h
DELETED
@@ -1,1018 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
j * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
#pragma once
|
28 |
-
|
29 |
-
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
30 |
-
#include <thrust/detail/cstdint.h>
|
31 |
-
#include <thrust/detail/temporary_array.h>
|
32 |
-
#include <thrust/system/cuda/detail/util.h>
|
33 |
-
|
34 |
-
#include <thrust/system/cuda/detail/execution_policy.h>
|
35 |
-
#include <thrust/system/cuda/detail/util.h>
|
36 |
-
#include <thrust/system/cuda/detail/core/agent_launcher.h>
|
37 |
-
#include <thrust/system/cuda/detail/core/util.h>
|
38 |
-
#include <thrust/system/cuda/detail/par_to_seq.h>
|
39 |
-
#include <thrust/merge.h>
|
40 |
-
#include <thrust/extrema.h>
|
41 |
-
#include <thrust/pair.h>
|
42 |
-
#include <thrust/detail/mpl/math.h>
|
43 |
-
#include <thrust/distance.h>
|
44 |
-
|
45 |
-
|
46 |
-
namespace thrust
|
47 |
-
{
|
48 |
-
namespace cuda_cub {
|
49 |
-
|
50 |
-
namespace __merge {
|
51 |
-
|
52 |
-
template <class KeysIt1,
|
53 |
-
class KeysIt2,
|
54 |
-
class Size,
|
55 |
-
class BinaryPred>
|
56 |
-
Size THRUST_DEVICE_FUNCTION
|
57 |
-
merge_path(KeysIt1 keys1,
|
58 |
-
KeysIt2 keys2,
|
59 |
-
Size keys1_count,
|
60 |
-
Size keys2_count,
|
61 |
-
Size diag,
|
62 |
-
BinaryPred binary_pred)
|
63 |
-
{
|
64 |
-
typedef typename iterator_traits<KeysIt1>::value_type key1_type;
|
65 |
-
typedef typename iterator_traits<KeysIt2>::value_type key2_type;
|
66 |
-
|
67 |
-
Size keys1_begin = thrust::max<Size>(0, diag - keys2_count);
|
68 |
-
Size keys1_end = thrust::min<Size>(diag, keys1_count);
|
69 |
-
|
70 |
-
while (keys1_begin < keys1_end)
|
71 |
-
{
|
72 |
-
Size mid = (keys1_begin + keys1_end) >> 1;
|
73 |
-
key1_type key1 = keys1[mid];
|
74 |
-
key2_type key2 = keys2[diag - 1 - mid];
|
75 |
-
bool pred = binary_pred(key2, key1);
|
76 |
-
if (pred)
|
77 |
-
{
|
78 |
-
keys1_end = mid;
|
79 |
-
}
|
80 |
-
else
|
81 |
-
{
|
82 |
-
keys1_begin = mid+1;
|
83 |
-
}
|
84 |
-
}
|
85 |
-
return keys1_begin;
|
86 |
-
}
|
87 |
-
|
88 |
-
template <class It, class T2, class CompareOp, int ITEMS_PER_THREAD>
|
89 |
-
THRUST_DEVICE_FUNCTION void
|
90 |
-
serial_merge(It keys_shared,
|
91 |
-
int keys1_beg,
|
92 |
-
int keys2_beg,
|
93 |
-
int keys1_count,
|
94 |
-
int keys2_count,
|
95 |
-
T2 (&output)[ITEMS_PER_THREAD],
|
96 |
-
int (&indices)[ITEMS_PER_THREAD],
|
97 |
-
CompareOp compare_op)
|
98 |
-
{
|
99 |
-
int keys1_end = keys1_beg + keys1_count;
|
100 |
-
int keys2_end = keys2_beg + keys2_count;
|
101 |
-
|
102 |
-
typedef typename iterator_value<It>::type key_type;
|
103 |
-
|
104 |
-
key_type key1 = keys_shared[keys1_beg];
|
105 |
-
key_type key2 = keys_shared[keys2_beg];
|
106 |
-
|
107 |
-
|
108 |
-
#pragma unroll
|
109 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
110 |
-
{
|
111 |
-
bool p = (keys2_beg < keys2_end) &&
|
112 |
-
((keys1_beg >= keys1_end) ||
|
113 |
-
compare_op(key2,key1));
|
114 |
-
|
115 |
-
output[ITEM] = p ? key2 : key1;
|
116 |
-
indices[ITEM] = p ? keys2_beg++ : keys1_beg++;
|
117 |
-
|
118 |
-
if (p)
|
119 |
-
{
|
120 |
-
key2 = keys_shared[keys2_beg];
|
121 |
-
}
|
122 |
-
else
|
123 |
-
{
|
124 |
-
key1 = keys_shared[keys1_beg];
|
125 |
-
}
|
126 |
-
}
|
127 |
-
}
|
128 |
-
|
129 |
-
template <int _BLOCK_THREADS,
|
130 |
-
int _ITEMS_PER_THREAD = 1,
|
131 |
-
cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT,
|
132 |
-
cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_LDG,
|
133 |
-
cub::BlockStoreAlgorithm _STORE_ALGORITHM = cub::BLOCK_STORE_DIRECT>
|
134 |
-
struct PtxPolicy
|
135 |
-
{
|
136 |
-
enum
|
137 |
-
{
|
138 |
-
BLOCK_THREADS = _BLOCK_THREADS,
|
139 |
-
ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
|
140 |
-
ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD,
|
141 |
-
};
|
142 |
-
|
143 |
-
static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM;
|
144 |
-
static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
|
145 |
-
static const cub::BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM;
|
146 |
-
}; // PtxPolicy
|
147 |
-
|
148 |
-
template <class KeysIt1,
|
149 |
-
class KeysIt2,
|
150 |
-
class Size,
|
151 |
-
class CompareOp>
|
152 |
-
struct PartitionAgent
|
153 |
-
{
|
154 |
-
template <class Arch>
|
155 |
-
struct PtxPlan : PtxPolicy<256> {};
|
156 |
-
|
157 |
-
typedef core::specialize_plan<PtxPlan> ptx_plan;
|
158 |
-
|
159 |
-
THRUST_AGENT_ENTRY(KeysIt1 keys1,
|
160 |
-
KeysIt2 keys2,
|
161 |
-
Size keys1_count,
|
162 |
-
Size keys2_count,
|
163 |
-
Size num_partitions,
|
164 |
-
Size* merge_partitions,
|
165 |
-
CompareOp compare_op,
|
166 |
-
int items_per_tile,
|
167 |
-
char* /*shmem*/)
|
168 |
-
{
|
169 |
-
Size partition_idx = blockDim.x * blockIdx.x + threadIdx.x;
|
170 |
-
if (partition_idx < num_partitions)
|
171 |
-
{
|
172 |
-
Size partition_at = thrust::min(partition_idx * items_per_tile,
|
173 |
-
keys1_count + keys2_count);
|
174 |
-
Size partition_diag = merge_path(keys1,
|
175 |
-
keys2,
|
176 |
-
keys1_count,
|
177 |
-
keys2_count,
|
178 |
-
partition_at,
|
179 |
-
compare_op);
|
180 |
-
merge_partitions[partition_idx] = partition_diag;
|
181 |
-
}
|
182 |
-
}
|
183 |
-
}; // struct PartitionAgent
|
184 |
-
|
185 |
-
|
186 |
-
template <class Arch, class TSize>
|
187 |
-
struct Tuning;
|
188 |
-
|
189 |
-
namespace mpl = thrust::detail::mpl::math;
|
190 |
-
|
191 |
-
template<size_t NOMINAL_4B_ITEMS_PER_THREAD, size_t INPUT_SIZE>
|
192 |
-
struct items_per_thread
|
193 |
-
{
|
194 |
-
enum
|
195 |
-
{
|
196 |
-
ITEMS_PER_THREAD =
|
197 |
-
mpl::min<
|
198 |
-
int,
|
199 |
-
NOMINAL_4B_ITEMS_PER_THREAD,
|
200 |
-
mpl::max<
|
201 |
-
int,
|
202 |
-
1,
|
203 |
-
(NOMINAL_4B_ITEMS_PER_THREAD * 4 / INPUT_SIZE)>::value>::value,
|
204 |
-
value = mpl::is_odd<size_t, ITEMS_PER_THREAD>::value
|
205 |
-
? ITEMS_PER_THREAD
|
206 |
-
: ITEMS_PER_THREAD + 1
|
207 |
-
};
|
208 |
-
};
|
209 |
-
|
210 |
-
template<class TSize>
|
211 |
-
struct Tuning<sm30,TSize>
|
212 |
-
{
|
213 |
-
const static int INPUT_SIZE = TSize::value;
|
214 |
-
enum
|
215 |
-
{
|
216 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 7,
|
217 |
-
ITEMS_PER_THREAD = items_per_thread<NOMINAL_4B_ITEMS_PER_THREAD,
|
218 |
-
INPUT_SIZE>::value
|
219 |
-
};
|
220 |
-
|
221 |
-
typedef PtxPolicy<128,
|
222 |
-
ITEMS_PER_THREAD,
|
223 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
224 |
-
cub::LOAD_DEFAULT,
|
225 |
-
cub::BLOCK_STORE_WARP_TRANSPOSE>
|
226 |
-
type;
|
227 |
-
}; // Tuning sm300
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
template<class TSize>
|
232 |
-
struct Tuning<sm60,TSize> : Tuning<sm30,TSize>
|
233 |
-
{
|
234 |
-
enum
|
235 |
-
{
|
236 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 15,
|
237 |
-
ITEMS_PER_THREAD = items_per_thread<NOMINAL_4B_ITEMS_PER_THREAD,
|
238 |
-
Tuning::INPUT_SIZE>::value
|
239 |
-
};
|
240 |
-
|
241 |
-
|
242 |
-
typedef PtxPolicy<512,
|
243 |
-
ITEMS_PER_THREAD,
|
244 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
245 |
-
cub::LOAD_DEFAULT,
|
246 |
-
cub::BLOCK_STORE_WARP_TRANSPOSE>
|
247 |
-
type;
|
248 |
-
}; // Tuning sm52
|
249 |
-
|
250 |
-
template<class TSize>
|
251 |
-
struct Tuning<sm52,TSize> : Tuning<sm30,TSize>
|
252 |
-
{
|
253 |
-
enum
|
254 |
-
{
|
255 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 13,
|
256 |
-
ITEMS_PER_THREAD = items_per_thread<NOMINAL_4B_ITEMS_PER_THREAD,
|
257 |
-
Tuning::INPUT_SIZE>::value
|
258 |
-
};
|
259 |
-
|
260 |
-
typedef PtxPolicy<512,
|
261 |
-
ITEMS_PER_THREAD,
|
262 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
263 |
-
cub::LOAD_LDG,
|
264 |
-
cub::BLOCK_STORE_WARP_TRANSPOSE>
|
265 |
-
type;
|
266 |
-
}; // Tuning sm52
|
267 |
-
|
268 |
-
template<class TSize>
|
269 |
-
struct Tuning<sm35,TSize> : Tuning<sm30,TSize>
|
270 |
-
{
|
271 |
-
const static int INPUT_SIZE = TSize::value;
|
272 |
-
enum
|
273 |
-
{
|
274 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 11,
|
275 |
-
ITEMS_PER_THREAD = items_per_thread<NOMINAL_4B_ITEMS_PER_THREAD,
|
276 |
-
Tuning::INPUT_SIZE>::value
|
277 |
-
};
|
278 |
-
|
279 |
-
|
280 |
-
typedef PtxPolicy<256,
|
281 |
-
ITEMS_PER_THREAD,
|
282 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
283 |
-
cub::LOAD_LDG,
|
284 |
-
cub::BLOCK_STORE_WARP_TRANSPOSE>
|
285 |
-
type;
|
286 |
-
}; // Tuning sm350
|
287 |
-
|
288 |
-
|
289 |
-
template<size_t VALUE>
|
290 |
-
struct integer_constant : thrust::detail::integral_constant<size_t, VALUE> {};
|
291 |
-
|
292 |
-
template <class KeysIt1,
|
293 |
-
class KeysIt2,
|
294 |
-
class ItemsIt1,
|
295 |
-
class ItemsIt2,
|
296 |
-
class Size,
|
297 |
-
class KeysOutputIt,
|
298 |
-
class ItemsOutputIt,
|
299 |
-
class CompareOp,
|
300 |
-
class MERGE_ITEMS>
|
301 |
-
struct MergeAgent
|
302 |
-
{
|
303 |
-
typedef typename iterator_traits<KeysIt1>::value_type key1_type;
|
304 |
-
typedef typename iterator_traits<KeysIt2>::value_type key2_type;
|
305 |
-
typedef typename iterator_traits<ItemsIt1>::value_type item1_type;
|
306 |
-
typedef typename iterator_traits<ItemsIt2>::value_type item2_type;
|
307 |
-
|
308 |
-
typedef key1_type key_type;
|
309 |
-
typedef item1_type item_type;
|
310 |
-
|
311 |
-
typedef typename thrust::detail::conditional<
|
312 |
-
MERGE_ITEMS::value,
|
313 |
-
integer_constant<sizeof(key_type) + sizeof(item_type)>,
|
314 |
-
integer_constant<sizeof(key_type)> >::type tuning_type;
|
315 |
-
|
316 |
-
|
317 |
-
template <class Arch>
|
318 |
-
struct PtxPlan : Tuning<Arch, tuning_type>::type
|
319 |
-
{
|
320 |
-
typedef Tuning<Arch,tuning_type> tuning;
|
321 |
-
|
322 |
-
typedef typename core::LoadIterator<PtxPlan, KeysIt1>::type KeysLoadIt1;
|
323 |
-
typedef typename core::LoadIterator<PtxPlan, KeysIt2>::type KeysLoadIt2;
|
324 |
-
typedef typename core::LoadIterator<PtxPlan, ItemsIt1>::type ItemsLoadIt1;
|
325 |
-
typedef typename core::LoadIterator<PtxPlan, ItemsIt2>::type ItemsLoadIt2;
|
326 |
-
|
327 |
-
typedef typename core::BlockLoad<PtxPlan, KeysLoadIt1>::type BlockLoadKeys1;
|
328 |
-
typedef typename core::BlockLoad<PtxPlan, KeysLoadIt2>::type BlockLoadKeys2;
|
329 |
-
typedef typename core::BlockLoad<PtxPlan, ItemsLoadIt1>::type BlockLoadItems1;
|
330 |
-
typedef typename core::BlockLoad<PtxPlan, ItemsLoadIt2>::type BlockLoadItems2;
|
331 |
-
|
332 |
-
typedef typename core::BlockStore<PtxPlan,
|
333 |
-
KeysOutputIt,
|
334 |
-
key_type>::type BlockStoreKeys;
|
335 |
-
typedef typename core::BlockStore<PtxPlan,
|
336 |
-
ItemsOutputIt,
|
337 |
-
item_type>::type BlockStoreItems;
|
338 |
-
|
339 |
-
// gather required temporary storage in a union
|
340 |
-
//
|
341 |
-
union TempStorage
|
342 |
-
{
|
343 |
-
typename BlockLoadKeys1::TempStorage load_keys1;
|
344 |
-
typename BlockLoadKeys2::TempStorage load_keys2;
|
345 |
-
typename BlockLoadItems1::TempStorage load_items1;
|
346 |
-
typename BlockLoadItems2::TempStorage load_items2;
|
347 |
-
typename BlockStoreKeys::TempStorage store_keys;
|
348 |
-
typename BlockStoreItems::TempStorage store_items;
|
349 |
-
|
350 |
-
core::uninitialized_array<item_type, PtxPlan::ITEMS_PER_TILE + 1> items_shared;
|
351 |
-
core::uninitialized_array<key_type, PtxPlan::ITEMS_PER_TILE + 1> keys_shared;
|
352 |
-
}; // union TempStorage
|
353 |
-
}; // struct PtxPlan
|
354 |
-
|
355 |
-
typedef typename core::specialize_plan_msvc10_war<PtxPlan>::type::type ptx_plan;
|
356 |
-
|
357 |
-
typedef typename ptx_plan::KeysLoadIt1 KeysLoadIt1;
|
358 |
-
typedef typename ptx_plan::KeysLoadIt2 KeysLoadIt2;
|
359 |
-
typedef typename ptx_plan::ItemsLoadIt1 ItemsLoadIt1;
|
360 |
-
typedef typename ptx_plan::ItemsLoadIt2 ItemsLoadIt2;
|
361 |
-
typedef typename ptx_plan::BlockLoadKeys1 BlockLoadKeys1;
|
362 |
-
typedef typename ptx_plan::BlockLoadKeys2 BlockLoadKeys2;
|
363 |
-
typedef typename ptx_plan::BlockLoadItems1 BlockLoadItems1;
|
364 |
-
typedef typename ptx_plan::BlockLoadItems2 BlockLoadItems2;
|
365 |
-
typedef typename ptx_plan::BlockStoreKeys BlockStoreKeys;
|
366 |
-
typedef typename ptx_plan::BlockStoreItems BlockStoreItems;
|
367 |
-
typedef typename ptx_plan::TempStorage TempStorage;
|
368 |
-
|
369 |
-
enum
|
370 |
-
{
|
371 |
-
ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD,
|
372 |
-
BLOCK_THREADS = ptx_plan::BLOCK_THREADS,
|
373 |
-
ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE
|
374 |
-
};
|
375 |
-
|
376 |
-
struct impl
|
377 |
-
{
|
378 |
-
//---------------------------------------------------------------------
|
379 |
-
// Per thread data
|
380 |
-
//---------------------------------------------------------------------
|
381 |
-
|
382 |
-
TempStorage& storage;
|
383 |
-
KeysLoadIt1 keys1_in;
|
384 |
-
KeysLoadIt2 keys2_in;
|
385 |
-
ItemsLoadIt1 items1_in;
|
386 |
-
ItemsLoadIt2 items2_in;
|
387 |
-
Size keys1_count;
|
388 |
-
Size keys2_count;
|
389 |
-
KeysOutputIt keys_out;
|
390 |
-
ItemsOutputIt items_out;
|
391 |
-
CompareOp compare_op;
|
392 |
-
Size* merge_partitions;
|
393 |
-
|
394 |
-
//---------------------------------------------------------------------
|
395 |
-
// Utility functions
|
396 |
-
//---------------------------------------------------------------------
|
397 |
-
|
398 |
-
template <bool IS_FULL_TILE, class T, class It1, class It2>
|
399 |
-
THRUST_DEVICE_FUNCTION void
|
400 |
-
gmem_to_reg(T (&output)[ITEMS_PER_THREAD],
|
401 |
-
It1 input1,
|
402 |
-
It2 input2,
|
403 |
-
int count1,
|
404 |
-
int count2)
|
405 |
-
{
|
406 |
-
if (IS_FULL_TILE)
|
407 |
-
{
|
408 |
-
#pragma unroll
|
409 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
410 |
-
{
|
411 |
-
int idx = BLOCK_THREADS * ITEM + threadIdx.x;
|
412 |
-
if (idx < count1)
|
413 |
-
output[ITEM] = input1[idx];
|
414 |
-
else
|
415 |
-
output[ITEM] = input2[idx - count1];
|
416 |
-
}
|
417 |
-
}
|
418 |
-
else
|
419 |
-
{
|
420 |
-
#pragma unroll
|
421 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
422 |
-
{
|
423 |
-
int idx = BLOCK_THREADS * ITEM + threadIdx.x;
|
424 |
-
if (idx < count1 + count2)
|
425 |
-
{
|
426 |
-
if (idx < count1)
|
427 |
-
output[ITEM] = input1[idx];
|
428 |
-
else
|
429 |
-
output[ITEM] = input2[idx - count1];
|
430 |
-
}
|
431 |
-
}
|
432 |
-
}
|
433 |
-
}
|
434 |
-
|
435 |
-
template <class T, class It>
|
436 |
-
THRUST_DEVICE_FUNCTION void
|
437 |
-
reg_to_shared(It output,
|
438 |
-
T (&input)[ITEMS_PER_THREAD])
|
439 |
-
{
|
440 |
-
#pragma unroll
|
441 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
442 |
-
{
|
443 |
-
int idx = BLOCK_THREADS * ITEM + threadIdx.x;
|
444 |
-
output[idx] = input[ITEM];
|
445 |
-
}
|
446 |
-
}
|
447 |
-
|
448 |
-
//---------------------------------------------------------------------
|
449 |
-
// Tile processing
|
450 |
-
//---------------------------------------------------------------------
|
451 |
-
|
452 |
-
template <bool IS_FULL_TILE>
|
453 |
-
void THRUST_DEVICE_FUNCTION
|
454 |
-
consume_tile(Size tile_idx,
|
455 |
-
Size tile_base,
|
456 |
-
int num_remaining)
|
457 |
-
{
|
458 |
-
using core::sync_threadblock;
|
459 |
-
using core::uninitialized_array;
|
460 |
-
|
461 |
-
Size partition_beg = merge_partitions[tile_idx + 0];
|
462 |
-
Size partition_end = merge_partitions[tile_idx + 1];
|
463 |
-
|
464 |
-
Size diag0 = ITEMS_PER_TILE * tile_idx;
|
465 |
-
Size diag1 = thrust::min(keys1_count + keys2_count, diag0 + ITEMS_PER_TILE);
|
466 |
-
|
467 |
-
// compute bounding box for keys1 & keys2
|
468 |
-
//
|
469 |
-
Size keys1_beg = partition_beg;
|
470 |
-
Size keys1_end = partition_end;
|
471 |
-
Size keys2_beg = diag0 - keys1_beg;
|
472 |
-
Size keys2_end = diag1 - keys1_end;
|
473 |
-
|
474 |
-
// number of keys per tile
|
475 |
-
//
|
476 |
-
int num_keys1 = static_cast<int>(keys1_end - keys1_beg);
|
477 |
-
int num_keys2 = static_cast<int>(keys2_end - keys2_beg);
|
478 |
-
|
479 |
-
key_type keys_loc[ITEMS_PER_THREAD];
|
480 |
-
gmem_to_reg<IS_FULL_TILE>(keys_loc,
|
481 |
-
keys1_in + keys1_beg,
|
482 |
-
keys2_in + keys2_beg,
|
483 |
-
num_keys1,
|
484 |
-
num_keys2);
|
485 |
-
reg_to_shared(&storage.keys_shared[0], keys_loc);
|
486 |
-
|
487 |
-
sync_threadblock();
|
488 |
-
|
489 |
-
// use binary search in shared memory
|
490 |
-
// to find merge path for each of thread
|
491 |
-
// we can use int type here, because the number of
|
492 |
-
// items in shared memory is limited
|
493 |
-
//
|
494 |
-
int diag0_loc = min<int>(num_keys1 + num_keys2,
|
495 |
-
ITEMS_PER_THREAD * threadIdx.x);
|
496 |
-
|
497 |
-
int keys1_beg_loc = merge_path(&storage.keys_shared[0],
|
498 |
-
&storage.keys_shared[num_keys1],
|
499 |
-
num_keys1,
|
500 |
-
num_keys2,
|
501 |
-
diag0_loc,
|
502 |
-
compare_op);
|
503 |
-
int keys1_end_loc = num_keys1;
|
504 |
-
int keys2_beg_loc = diag0_loc - keys1_beg_loc;
|
505 |
-
int keys2_end_loc = num_keys2;
|
506 |
-
|
507 |
-
int num_keys1_loc = keys1_end_loc - keys1_beg_loc;
|
508 |
-
int num_keys2_loc = keys2_end_loc - keys2_beg_loc;
|
509 |
-
|
510 |
-
// perform serial merge
|
511 |
-
//
|
512 |
-
int indices[ITEMS_PER_THREAD];
|
513 |
-
|
514 |
-
serial_merge(&storage.keys_shared[0],
|
515 |
-
keys1_beg_loc,
|
516 |
-
keys2_beg_loc + num_keys1,
|
517 |
-
num_keys1_loc,
|
518 |
-
num_keys2_loc,
|
519 |
-
keys_loc,
|
520 |
-
indices,
|
521 |
-
compare_op);
|
522 |
-
|
523 |
-
sync_threadblock();
|
524 |
-
|
525 |
-
// write keys
|
526 |
-
//
|
527 |
-
if (IS_FULL_TILE)
|
528 |
-
{
|
529 |
-
BlockStoreKeys(storage.store_keys)
|
530 |
-
.Store(keys_out + tile_base, keys_loc);
|
531 |
-
}
|
532 |
-
else
|
533 |
-
{
|
534 |
-
BlockStoreKeys(storage.store_keys)
|
535 |
-
.Store(keys_out + tile_base, keys_loc, num_remaining);
|
536 |
-
}
|
537 |
-
|
538 |
-
// if items are provided, merge them
|
539 |
-
if (MERGE_ITEMS::value)
|
540 |
-
{
|
541 |
-
item_type items_loc[ITEMS_PER_THREAD];
|
542 |
-
gmem_to_reg<IS_FULL_TILE>(items_loc,
|
543 |
-
items1_in + keys1_beg,
|
544 |
-
items2_in + keys2_beg,
|
545 |
-
num_keys1,
|
546 |
-
num_keys2);
|
547 |
-
|
548 |
-
sync_threadblock();
|
549 |
-
|
550 |
-
reg_to_shared(&storage.items_shared[0], items_loc);
|
551 |
-
|
552 |
-
sync_threadblock();
|
553 |
-
|
554 |
-
// gather items from shared mem
|
555 |
-
//
|
556 |
-
#pragma unroll
|
557 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
558 |
-
{
|
559 |
-
items_loc[ITEM] = storage.items_shared[indices[ITEM]];
|
560 |
-
}
|
561 |
-
|
562 |
-
sync_threadblock();
|
563 |
-
|
564 |
-
// write form reg to gmem
|
565 |
-
//
|
566 |
-
if (IS_FULL_TILE)
|
567 |
-
{
|
568 |
-
BlockStoreItems(storage.store_items)
|
569 |
-
.Store(items_out + tile_base, items_loc);
|
570 |
-
}
|
571 |
-
else
|
572 |
-
{
|
573 |
-
BlockStoreItems(storage.store_items)
|
574 |
-
.Store(items_out + tile_base, items_loc, num_remaining);
|
575 |
-
}
|
576 |
-
}
|
577 |
-
}
|
578 |
-
|
579 |
-
//---------------------------------------------------------------------
|
580 |
-
// Constructor
|
581 |
-
//---------------------------------------------------------------------
|
582 |
-
|
583 |
-
THRUST_DEVICE_FUNCTION
|
584 |
-
impl(TempStorage& storage_,
|
585 |
-
KeysLoadIt1 keys1_in_,
|
586 |
-
KeysLoadIt2 keys2_in_,
|
587 |
-
ItemsLoadIt1 items1_in_,
|
588 |
-
ItemsLoadIt2 items2_in_,
|
589 |
-
Size keys1_count_,
|
590 |
-
Size keys2_count_,
|
591 |
-
KeysOutputIt keys_out_,
|
592 |
-
ItemsOutputIt items_out_,
|
593 |
-
CompareOp compare_op_,
|
594 |
-
Size* merge_partitions_)
|
595 |
-
: storage(storage_),
|
596 |
-
keys1_in(keys1_in_),
|
597 |
-
keys2_in(keys2_in_),
|
598 |
-
items1_in(items1_in_),
|
599 |
-
items2_in(items2_in_),
|
600 |
-
keys1_count(keys1_count_),
|
601 |
-
keys2_count(keys2_count_),
|
602 |
-
keys_out(keys_out_),
|
603 |
-
items_out(items_out_),
|
604 |
-
compare_op(compare_op_),
|
605 |
-
merge_partitions(merge_partitions_)
|
606 |
-
{
|
607 |
-
// XXX with 8.5 chaging type to Size (or long long) results in error!
|
608 |
-
int tile_idx = blockIdx.x;
|
609 |
-
Size tile_base = tile_idx * ITEMS_PER_TILE;
|
610 |
-
int items_in_tile = static_cast<int>(
|
611 |
-
min<Size>(ITEMS_PER_TILE,
|
612 |
-
keys1_count + keys2_count - tile_base));
|
613 |
-
if (items_in_tile == ITEMS_PER_TILE)
|
614 |
-
{
|
615 |
-
// full tile
|
616 |
-
consume_tile<true>(tile_idx,
|
617 |
-
tile_base,
|
618 |
-
ITEMS_PER_TILE);
|
619 |
-
}
|
620 |
-
else
|
621 |
-
{
|
622 |
-
// partial tile
|
623 |
-
consume_tile<false>(tile_idx,
|
624 |
-
tile_base,
|
625 |
-
items_in_tile);
|
626 |
-
}
|
627 |
-
}
|
628 |
-
}; // struct impl
|
629 |
-
|
630 |
-
//---------------------------------------------------------------------
|
631 |
-
// Agent entry point
|
632 |
-
//---------------------------------------------------------------------
|
633 |
-
|
634 |
-
THRUST_AGENT_ENTRY(KeysIt1 keys1_in,
|
635 |
-
KeysIt2 keys2_in,
|
636 |
-
ItemsIt1 items1_in,
|
637 |
-
ItemsIt2 items2_in,
|
638 |
-
Size keys1_count,
|
639 |
-
Size keys2_count,
|
640 |
-
KeysOutputIt keys_out,
|
641 |
-
ItemsOutputIt items_out,
|
642 |
-
CompareOp compare_op,
|
643 |
-
Size* merge_partitions,
|
644 |
-
char* shmem)
|
645 |
-
{
|
646 |
-
TempStorage& storage = *reinterpret_cast<TempStorage*>(shmem);
|
647 |
-
|
648 |
-
impl(storage,
|
649 |
-
core::make_load_iterator(ptx_plan(), keys1_in),
|
650 |
-
core::make_load_iterator(ptx_plan(), keys2_in),
|
651 |
-
core::make_load_iterator(ptx_plan(), items1_in),
|
652 |
-
core::make_load_iterator(ptx_plan(), items2_in),
|
653 |
-
keys1_count,
|
654 |
-
keys2_count,
|
655 |
-
keys_out,
|
656 |
-
items_out,
|
657 |
-
compare_op,
|
658 |
-
merge_partitions);
|
659 |
-
}
|
660 |
-
}; // struct MergeAgent;
|
661 |
-
|
662 |
-
//---------------------------------------------------------------------
|
663 |
-
// Two-step internal API
|
664 |
-
//---------------------------------------------------------------------
|
665 |
-
|
666 |
-
template <class MERGE_ITEMS,
|
667 |
-
class KeysIt1,
|
668 |
-
class KeysIt2,
|
669 |
-
class ItemsIt1,
|
670 |
-
class ItemsIt2,
|
671 |
-
class Size,
|
672 |
-
class KeysOutputIt,
|
673 |
-
class ItemsOutputIt,
|
674 |
-
class CompareOp>
|
675 |
-
cudaError_t THRUST_RUNTIME_FUNCTION
|
676 |
-
doit_step(void* d_temp_storage,
|
677 |
-
size_t& temp_storage_bytes,
|
678 |
-
KeysIt1 keys1,
|
679 |
-
KeysIt2 keys2,
|
680 |
-
ItemsIt1 items1,
|
681 |
-
ItemsIt2 items2,
|
682 |
-
Size num_keys1,
|
683 |
-
Size num_keys2,
|
684 |
-
KeysOutputIt keys_result,
|
685 |
-
ItemsOutputIt items_result,
|
686 |
-
CompareOp compare_op,
|
687 |
-
cudaStream_t stream,
|
688 |
-
bool debug_sync)
|
689 |
-
{
|
690 |
-
if (num_keys1 + num_keys2 == 0)
|
691 |
-
return cudaErrorNotSupported;
|
692 |
-
|
693 |
-
using core::AgentPlan;
|
694 |
-
using core::get_agent_plan;
|
695 |
-
typedef core::AgentLauncher<
|
696 |
-
MergeAgent<KeysIt1,
|
697 |
-
KeysIt2,
|
698 |
-
ItemsIt1,
|
699 |
-
ItemsIt2,
|
700 |
-
Size,
|
701 |
-
KeysOutputIt,
|
702 |
-
ItemsOutputIt,
|
703 |
-
CompareOp,
|
704 |
-
MERGE_ITEMS> >
|
705 |
-
merge_agent;
|
706 |
-
|
707 |
-
typedef core::AgentLauncher<
|
708 |
-
PartitionAgent<KeysIt1,
|
709 |
-
KeysIt2,
|
710 |
-
Size,
|
711 |
-
CompareOp> >
|
712 |
-
partition_agent;
|
713 |
-
|
714 |
-
cudaError_t status = cudaSuccess;
|
715 |
-
|
716 |
-
AgentPlan partition_plan = partition_agent::get_plan();
|
717 |
-
AgentPlan merge_plan = merge_agent::get_plan(stream);
|
718 |
-
|
719 |
-
int tile_size = merge_plan.items_per_tile;
|
720 |
-
Size num_tiles = (num_keys1 + num_keys2 + tile_size - 1) / tile_size;
|
721 |
-
|
722 |
-
size_t temp_storage1 = (1 + num_tiles) * sizeof(Size);
|
723 |
-
size_t temp_storage2 = core::vshmem_size(merge_plan.shared_memory_size,
|
724 |
-
num_tiles);
|
725 |
-
|
726 |
-
void* allocations[2] = {NULL, NULL};
|
727 |
-
size_t allocation_sizes[2] = {temp_storage1, temp_storage2};
|
728 |
-
|
729 |
-
status = core::alias_storage(d_temp_storage,
|
730 |
-
temp_storage_bytes,
|
731 |
-
allocations,
|
732 |
-
allocation_sizes);
|
733 |
-
CUDA_CUB_RET_IF_FAIL(status);
|
734 |
-
|
735 |
-
if (d_temp_storage == NULL)
|
736 |
-
{
|
737 |
-
return status;
|
738 |
-
}
|
739 |
-
|
740 |
-
// partition data into work balanced tiles
|
741 |
-
Size* merge_partitions = (Size*)allocations[0];
|
742 |
-
char* vshmem_ptr = temp_storage2 > 0 ? (char*)allocations[1] : NULL;
|
743 |
-
|
744 |
-
{
|
745 |
-
Size num_partitions = num_tiles + 1;
|
746 |
-
|
747 |
-
partition_agent(partition_plan, num_partitions, stream, "partition agent", debug_sync)
|
748 |
-
.launch(keys1,
|
749 |
-
keys2,
|
750 |
-
num_keys1,
|
751 |
-
num_keys2,
|
752 |
-
num_partitions,
|
753 |
-
merge_partitions,
|
754 |
-
compare_op,
|
755 |
-
merge_plan.items_per_tile);
|
756 |
-
CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
|
757 |
-
}
|
758 |
-
|
759 |
-
merge_agent(merge_plan, num_keys1 + num_keys2, stream, vshmem_ptr, "merge agent", debug_sync)
|
760 |
-
.launch(keys1,
|
761 |
-
keys2,
|
762 |
-
items1,
|
763 |
-
items2,
|
764 |
-
num_keys1,
|
765 |
-
num_keys2,
|
766 |
-
keys_result,
|
767 |
-
items_result,
|
768 |
-
compare_op,
|
769 |
-
merge_partitions);
|
770 |
-
CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
|
771 |
-
|
772 |
-
return status;
|
773 |
-
}
|
774 |
-
|
775 |
-
template <typename MERGE_ITEMS,
|
776 |
-
typename Derived,
|
777 |
-
typename KeysIt1,
|
778 |
-
typename KeysIt2,
|
779 |
-
typename ItemsIt1,
|
780 |
-
typename ItemsIt2,
|
781 |
-
typename KeysOutputIt,
|
782 |
-
typename ItemsOutputIt,
|
783 |
-
typename CompareOp>
|
784 |
-
THRUST_RUNTIME_FUNCTION
|
785 |
-
pair<KeysOutputIt, ItemsOutputIt>
|
786 |
-
merge(execution_policy<Derived>& policy,
|
787 |
-
KeysIt1 keys1_first,
|
788 |
-
KeysIt1 keys1_last,
|
789 |
-
KeysIt2 keys2_first,
|
790 |
-
KeysIt2 keys2_last,
|
791 |
-
ItemsIt1 items1_first,
|
792 |
-
ItemsIt2 items2_first,
|
793 |
-
KeysOutputIt keys_result,
|
794 |
-
ItemsOutputIt items_result,
|
795 |
-
CompareOp compare_op)
|
796 |
-
{
|
797 |
-
typedef typename iterator_traits<KeysIt1>::difference_type size_type;
|
798 |
-
|
799 |
-
size_type num_keys1
|
800 |
-
= static_cast<size_type>(thrust::distance(keys1_first, keys1_last));
|
801 |
-
size_type num_keys2
|
802 |
-
= static_cast<size_type>(thrust::distance(keys2_first, keys2_last));
|
803 |
-
|
804 |
-
size_type const count = num_keys1 + num_keys2;
|
805 |
-
|
806 |
-
if (count == 0)
|
807 |
-
return thrust::make_pair(keys_result, items_result);
|
808 |
-
|
809 |
-
size_t storage_size = 0;
|
810 |
-
cudaStream_t stream = cuda_cub::stream(policy);
|
811 |
-
bool debug_sync = THRUST_DEBUG_SYNC_FLAG;
|
812 |
-
|
813 |
-
cudaError_t status;
|
814 |
-
status = doit_step<MERGE_ITEMS>(NULL,
|
815 |
-
storage_size,
|
816 |
-
keys1_first,
|
817 |
-
keys2_first,
|
818 |
-
items1_first,
|
819 |
-
items2_first,
|
820 |
-
num_keys1,
|
821 |
-
num_keys2,
|
822 |
-
keys_result,
|
823 |
-
items_result,
|
824 |
-
compare_op,
|
825 |
-
stream,
|
826 |
-
debug_sync);
|
827 |
-
cuda_cub::throw_on_error(status, "merge: failed on 1st step");
|
828 |
-
|
829 |
-
// Allocate temporary storage.
|
830 |
-
thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
|
831 |
-
tmp(policy, storage_size);
|
832 |
-
void *ptr = static_cast<void*>(tmp.data().get());
|
833 |
-
|
834 |
-
status = doit_step<MERGE_ITEMS>(ptr,
|
835 |
-
storage_size,
|
836 |
-
keys1_first,
|
837 |
-
keys2_first,
|
838 |
-
items1_first,
|
839 |
-
items2_first,
|
840 |
-
num_keys1,
|
841 |
-
num_keys2,
|
842 |
-
keys_result,
|
843 |
-
items_result,
|
844 |
-
compare_op,
|
845 |
-
stream,
|
846 |
-
debug_sync);
|
847 |
-
cuda_cub::throw_on_error(status, "merge: failed on 2nd step");
|
848 |
-
|
849 |
-
status = cuda_cub::synchronize(policy);
|
850 |
-
cuda_cub::throw_on_error(status, "merge: failed to synchronize");
|
851 |
-
|
852 |
-
return thrust::make_pair(keys_result + count, items_result + count);
|
853 |
-
}
|
854 |
-
} // namespace __merge
|
855 |
-
|
856 |
-
|
857 |
-
//-------------------------
|
858 |
-
// Thrust API entry points
|
859 |
-
//-------------------------
|
860 |
-
|
861 |
-
|
862 |
-
__thrust_exec_check_disable__
|
863 |
-
template <class Derived,
|
864 |
-
class KeysIt1,
|
865 |
-
class KeysIt2,
|
866 |
-
class ResultIt,
|
867 |
-
class CompareOp>
|
868 |
-
ResultIt __host__ __device__
|
869 |
-
merge(execution_policy<Derived>& policy,
|
870 |
-
KeysIt1 keys1_first,
|
871 |
-
KeysIt1 keys1_last,
|
872 |
-
KeysIt2 keys2_first,
|
873 |
-
KeysIt2 keys2_last,
|
874 |
-
ResultIt result,
|
875 |
-
CompareOp compare_op)
|
876 |
-
|
877 |
-
{
|
878 |
-
ResultIt ret = result;
|
879 |
-
if (__THRUST_HAS_CUDART__)
|
880 |
-
{
|
881 |
-
typedef typename thrust::iterator_value<KeysIt1>::type keys_type;
|
882 |
-
//
|
883 |
-
keys_type* null_ = NULL;
|
884 |
-
//
|
885 |
-
ret = __merge::merge<thrust::detail::false_type>(policy,
|
886 |
-
keys1_first,
|
887 |
-
keys1_last,
|
888 |
-
keys2_first,
|
889 |
-
keys2_last,
|
890 |
-
null_,
|
891 |
-
null_,
|
892 |
-
result,
|
893 |
-
null_,
|
894 |
-
compare_op)
|
895 |
-
.first;
|
896 |
-
}
|
897 |
-
else
|
898 |
-
{
|
899 |
-
#if !__THRUST_HAS_CUDART__
|
900 |
-
ret = thrust::merge(cvt_to_seq(derived_cast(policy)),
|
901 |
-
keys1_first,
|
902 |
-
keys1_last,
|
903 |
-
keys2_first,
|
904 |
-
keys2_last,
|
905 |
-
result,
|
906 |
-
compare_op);
|
907 |
-
#endif
|
908 |
-
}
|
909 |
-
return ret;
|
910 |
-
}
|
911 |
-
|
912 |
-
template <class Derived, class KeysIt1, class KeysIt2, class ResultIt>
|
913 |
-
ResultIt __host__ __device__
|
914 |
-
merge(execution_policy<Derived>& policy,
|
915 |
-
KeysIt1 keys1_first,
|
916 |
-
KeysIt1 keys1_last,
|
917 |
-
KeysIt2 keys2_first,
|
918 |
-
KeysIt2 keys2_last,
|
919 |
-
ResultIt result)
|
920 |
-
{
|
921 |
-
typedef typename thrust::iterator_value<KeysIt1>::type keys_type;
|
922 |
-
return cuda_cub::merge(policy,
|
923 |
-
keys1_first,
|
924 |
-
keys1_last,
|
925 |
-
keys2_first,
|
926 |
-
keys2_last,
|
927 |
-
result,
|
928 |
-
less<keys_type>());
|
929 |
-
}
|
930 |
-
|
931 |
-
__thrust_exec_check_disable__
|
932 |
-
template <class Derived,
|
933 |
-
class KeysIt1,
|
934 |
-
class KeysIt2,
|
935 |
-
class ItemsIt1,
|
936 |
-
class ItemsIt2,
|
937 |
-
class KeysOutputIt,
|
938 |
-
class ItemsOutputIt,
|
939 |
-
class CompareOp>
|
940 |
-
pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
|
941 |
-
merge_by_key(execution_policy<Derived> &policy,
|
942 |
-
KeysIt1 keys1_first,
|
943 |
-
KeysIt1 keys1_last,
|
944 |
-
KeysIt2 keys2_first,
|
945 |
-
KeysIt2 keys2_last,
|
946 |
-
ItemsIt1 items1_first,
|
947 |
-
ItemsIt2 items2_first,
|
948 |
-
KeysOutputIt keys_result,
|
949 |
-
ItemsOutputIt items_result,
|
950 |
-
CompareOp compare_op)
|
951 |
-
{
|
952 |
-
pair<KeysOutputIt, ItemsOutputIt> ret = thrust::make_pair(keys_result, items_result);
|
953 |
-
if (__THRUST_HAS_CUDART__)
|
954 |
-
{
|
955 |
-
return __merge::merge<thrust::detail::true_type>(policy,
|
956 |
-
keys1_first,
|
957 |
-
keys1_last,
|
958 |
-
keys2_first,
|
959 |
-
keys2_last,
|
960 |
-
items1_first,
|
961 |
-
items2_first,
|
962 |
-
keys_result,
|
963 |
-
items_result,
|
964 |
-
compare_op);
|
965 |
-
}
|
966 |
-
else
|
967 |
-
{
|
968 |
-
#if !__THRUST_HAS_CUDART__
|
969 |
-
ret = thrust::merge_by_key(cvt_to_seq(derived_cast(policy)),
|
970 |
-
keys1_first,
|
971 |
-
keys1_last,
|
972 |
-
keys2_first,
|
973 |
-
keys2_last,
|
974 |
-
items1_first,
|
975 |
-
items2_first,
|
976 |
-
keys_result,
|
977 |
-
items_result,
|
978 |
-
compare_op);
|
979 |
-
#endif
|
980 |
-
}
|
981 |
-
return ret;
|
982 |
-
}
|
983 |
-
|
984 |
-
template <class Derived,
|
985 |
-
class KeysIt1,
|
986 |
-
class KeysIt2,
|
987 |
-
class ItemsIt1,
|
988 |
-
class ItemsIt2,
|
989 |
-
class KeysOutputIt,
|
990 |
-
class ItemsOutputIt>
|
991 |
-
pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
|
992 |
-
merge_by_key(execution_policy<Derived> &policy,
|
993 |
-
KeysIt1 keys1_first,
|
994 |
-
KeysIt1 keys1_last,
|
995 |
-
KeysIt2 keys2_first,
|
996 |
-
KeysIt2 keys2_last,
|
997 |
-
ItemsIt1 items1_first,
|
998 |
-
ItemsIt2 items2_first,
|
999 |
-
KeysOutputIt keys_result,
|
1000 |
-
ItemsOutputIt items_result)
|
1001 |
-
{
|
1002 |
-
typedef typename thrust::iterator_value<KeysIt1>::type keys_type;
|
1003 |
-
return cuda_cub::merge_by_key(policy,
|
1004 |
-
keys1_first,
|
1005 |
-
keys1_last,
|
1006 |
-
keys2_first,
|
1007 |
-
keys2_last,
|
1008 |
-
items1_first,
|
1009 |
-
items2_first,
|
1010 |
-
keys_result,
|
1011 |
-
items_result,
|
1012 |
-
thrust::less<keys_type>());
|
1013 |
-
}
|
1014 |
-
|
1015 |
-
|
1016 |
-
} // namespace cuda_cub
|
1017 |
-
} // end namespace thrust
|
1018 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/adjacent_difference.h
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file adjacent_difference.h
|
19 |
-
* \brief Sequential implementation of adjacent_difference.
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/iterator/iterator_traits.h>
|
26 |
-
#include <thrust/system/detail/sequential/execution_policy.h>
|
27 |
-
|
28 |
-
namespace thrust
|
29 |
-
{
|
30 |
-
namespace system
|
31 |
-
{
|
32 |
-
namespace detail
|
33 |
-
{
|
34 |
-
namespace sequential
|
35 |
-
{
|
36 |
-
|
37 |
-
|
38 |
-
__thrust_exec_check_disable__
|
39 |
-
template<typename DerivedPolicy,
|
40 |
-
typename InputIterator,
|
41 |
-
typename OutputIterator,
|
42 |
-
typename BinaryFunction>
|
43 |
-
__host__ __device__
|
44 |
-
OutputIterator adjacent_difference(sequential::execution_policy<DerivedPolicy> &,
|
45 |
-
InputIterator first,
|
46 |
-
InputIterator last,
|
47 |
-
OutputIterator result,
|
48 |
-
BinaryFunction binary_op)
|
49 |
-
{
|
50 |
-
typedef typename thrust::iterator_traits<InputIterator>::value_type InputType;
|
51 |
-
|
52 |
-
if(first == last)
|
53 |
-
return result;
|
54 |
-
|
55 |
-
InputType curr = *first;
|
56 |
-
|
57 |
-
*result = curr;
|
58 |
-
|
59 |
-
while(++first != last)
|
60 |
-
{
|
61 |
-
InputType next = *first;
|
62 |
-
*(++result) = binary_op(next, curr);
|
63 |
-
curr = next;
|
64 |
-
}
|
65 |
-
|
66 |
-
return ++result;
|
67 |
-
}
|
68 |
-
|
69 |
-
|
70 |
-
} // end namespace sequential
|
71 |
-
} // end namespace detail
|
72 |
-
} // end namespace system
|
73 |
-
} // end namespace thrust
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/layers/aspp.py
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
from copy import deepcopy
|
4 |
-
import fvcore.nn.weight_init as weight_init
|
5 |
-
import torch
|
6 |
-
from torch import nn
|
7 |
-
from torch.nn import functional as F
|
8 |
-
|
9 |
-
from .batch_norm import get_norm
|
10 |
-
from .blocks import DepthwiseSeparableConv2d
|
11 |
-
from .wrappers import Conv2d
|
12 |
-
|
13 |
-
|
14 |
-
class ASPP(nn.Module):
|
15 |
-
"""
|
16 |
-
Atrous Spatial Pyramid Pooling (ASPP).
|
17 |
-
"""
|
18 |
-
|
19 |
-
def __init__(
|
20 |
-
self,
|
21 |
-
in_channels,
|
22 |
-
out_channels,
|
23 |
-
dilations,
|
24 |
-
*,
|
25 |
-
norm,
|
26 |
-
activation,
|
27 |
-
pool_kernel_size=None,
|
28 |
-
dropout: float = 0.0,
|
29 |
-
use_depthwise_separable_conv=False,
|
30 |
-
):
|
31 |
-
"""
|
32 |
-
Args:
|
33 |
-
in_channels (int): number of input channels for ASPP.
|
34 |
-
out_channels (int): number of output channels.
|
35 |
-
dilations (list): a list of 3 dilations in ASPP.
|
36 |
-
norm (str or callable): normalization for all conv layers.
|
37 |
-
See :func:`layers.get_norm` for supported format. norm is
|
38 |
-
applied to all conv layers except the conv following
|
39 |
-
global average pooling.
|
40 |
-
activation (callable): activation function.
|
41 |
-
pool_kernel_size (tuple, list): the average pooling size (kh, kw)
|
42 |
-
for image pooling layer in ASPP. If set to None, it always
|
43 |
-
performs global average pooling. If not None, it must be
|
44 |
-
divisible by the shape of inputs in forward(). It is recommended
|
45 |
-
to use a fixed input feature size in training, and set this
|
46 |
-
option to match this size, so that it performs global average
|
47 |
-
pooling in training, and the size of the pooling window stays
|
48 |
-
consistent in inference.
|
49 |
-
dropout (float): apply dropout on the output of ASPP. It is used in
|
50 |
-
the official DeepLab implementation with a rate of 0.1:
|
51 |
-
https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa
|
52 |
-
use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d
|
53 |
-
for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`.
|
54 |
-
"""
|
55 |
-
super(ASPP, self).__init__()
|
56 |
-
assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations))
|
57 |
-
self.pool_kernel_size = pool_kernel_size
|
58 |
-
self.dropout = dropout
|
59 |
-
use_bias = norm == ""
|
60 |
-
self.convs = nn.ModuleList()
|
61 |
-
# conv 1x1
|
62 |
-
self.convs.append(
|
63 |
-
Conv2d(
|
64 |
-
in_channels,
|
65 |
-
out_channels,
|
66 |
-
kernel_size=1,
|
67 |
-
bias=use_bias,
|
68 |
-
norm=get_norm(norm, out_channels),
|
69 |
-
activation=deepcopy(activation),
|
70 |
-
)
|
71 |
-
)
|
72 |
-
weight_init.c2_xavier_fill(self.convs[-1])
|
73 |
-
# atrous convs
|
74 |
-
for dilation in dilations:
|
75 |
-
if use_depthwise_separable_conv:
|
76 |
-
self.convs.append(
|
77 |
-
DepthwiseSeparableConv2d(
|
78 |
-
in_channels,
|
79 |
-
out_channels,
|
80 |
-
kernel_size=3,
|
81 |
-
padding=dilation,
|
82 |
-
dilation=dilation,
|
83 |
-
norm1=norm,
|
84 |
-
activation1=deepcopy(activation),
|
85 |
-
norm2=norm,
|
86 |
-
activation2=deepcopy(activation),
|
87 |
-
)
|
88 |
-
)
|
89 |
-
else:
|
90 |
-
self.convs.append(
|
91 |
-
Conv2d(
|
92 |
-
in_channels,
|
93 |
-
out_channels,
|
94 |
-
kernel_size=3,
|
95 |
-
padding=dilation,
|
96 |
-
dilation=dilation,
|
97 |
-
bias=use_bias,
|
98 |
-
norm=get_norm(norm, out_channels),
|
99 |
-
activation=deepcopy(activation),
|
100 |
-
)
|
101 |
-
)
|
102 |
-
weight_init.c2_xavier_fill(self.convs[-1])
|
103 |
-
# image pooling
|
104 |
-
# We do not add BatchNorm because the spatial resolution is 1x1,
|
105 |
-
# the original TF implementation has BatchNorm.
|
106 |
-
if pool_kernel_size is None:
|
107 |
-
image_pooling = nn.Sequential(
|
108 |
-
nn.AdaptiveAvgPool2d(1),
|
109 |
-
Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
|
110 |
-
)
|
111 |
-
else:
|
112 |
-
image_pooling = nn.Sequential(
|
113 |
-
nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1),
|
114 |
-
Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
|
115 |
-
)
|
116 |
-
weight_init.c2_xavier_fill(image_pooling[1])
|
117 |
-
self.convs.append(image_pooling)
|
118 |
-
|
119 |
-
self.project = Conv2d(
|
120 |
-
5 * out_channels,
|
121 |
-
out_channels,
|
122 |
-
kernel_size=1,
|
123 |
-
bias=use_bias,
|
124 |
-
norm=get_norm(norm, out_channels),
|
125 |
-
activation=deepcopy(activation),
|
126 |
-
)
|
127 |
-
weight_init.c2_xavier_fill(self.project)
|
128 |
-
|
129 |
-
def forward(self, x):
|
130 |
-
size = x.shape[-2:]
|
131 |
-
if self.pool_kernel_size is not None:
|
132 |
-
if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]:
|
133 |
-
raise ValueError(
|
134 |
-
"`pool_kernel_size` must be divisible by the shape of inputs. "
|
135 |
-
"Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size)
|
136 |
-
)
|
137 |
-
res = []
|
138 |
-
for conv in self.convs:
|
139 |
-
res.append(conv(x))
|
140 |
-
res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False)
|
141 |
-
res = torch.cat(res, dim=1)
|
142 |
-
res = self.project(res)
|
143 |
-
res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res
|
144 |
-
return res
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CXD200/QSign/Dockerfile
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
#程序开源地址 https://github.com/fuqiuluo/unidbg-fetch-qsign
|
2 |
-
|
3 |
-
FROM openjdk:11.0-jdk
|
4 |
-
|
5 |
-
ENV TZ Asia/Shanghai
|
6 |
-
|
7 |
-
WORKDIR /app
|
8 |
-
|
9 |
-
COPY unidbg-fetch-qsign /app
|
10 |
-
|
11 |
-
CMD bash bin/unidbg-fetch-qsign --host=0.0.0.0 --port=7860 --count=5 --library=txlib --android_id=
|
12 |
-
|
13 |
-
EXPOSE 7860
|
14 |
-
|
15 |
-
#抱脸推荐项目 https://github.com/CikeyQi/QQsign_docs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/MpoImagePlugin.py
DELETED
@@ -1,197 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# The Python Imaging Library.
|
3 |
-
# $Id$
|
4 |
-
#
|
5 |
-
# MPO file handling
|
6 |
-
#
|
7 |
-
# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the
|
8 |
-
# Camera & Imaging Products Association)
|
9 |
-
#
|
10 |
-
# The multi-picture object combines multiple JPEG images (with a modified EXIF
|
11 |
-
# data format) into a single file. While it can theoretically be used much like
|
12 |
-
# a GIF animation, it is commonly used to represent 3D photographs and is (as
|
13 |
-
# of this writing) the most commonly used format by 3D cameras.
|
14 |
-
#
|
15 |
-
# History:
|
16 |
-
# 2014-03-13 Feneric Created
|
17 |
-
#
|
18 |
-
# See the README file for information on usage and redistribution.
|
19 |
-
#
|
20 |
-
|
21 |
-
import itertools
|
22 |
-
import os
|
23 |
-
import struct
|
24 |
-
|
25 |
-
from . import (
|
26 |
-
ExifTags,
|
27 |
-
Image,
|
28 |
-
ImageFile,
|
29 |
-
ImageSequence,
|
30 |
-
JpegImagePlugin,
|
31 |
-
TiffImagePlugin,
|
32 |
-
)
|
33 |
-
from ._binary import i16be as i16
|
34 |
-
from ._binary import o32le
|
35 |
-
|
36 |
-
# def _accept(prefix):
|
37 |
-
# return JpegImagePlugin._accept(prefix)
|
38 |
-
|
39 |
-
|
40 |
-
def _save(im, fp, filename):
|
41 |
-
JpegImagePlugin._save(im, fp, filename)
|
42 |
-
|
43 |
-
|
44 |
-
def _save_all(im, fp, filename):
|
45 |
-
append_images = im.encoderinfo.get("append_images", [])
|
46 |
-
if not append_images:
|
47 |
-
try:
|
48 |
-
animated = im.is_animated
|
49 |
-
except AttributeError:
|
50 |
-
animated = False
|
51 |
-
if not animated:
|
52 |
-
_save(im, fp, filename)
|
53 |
-
return
|
54 |
-
|
55 |
-
mpf_offset = 28
|
56 |
-
offsets = []
|
57 |
-
for imSequence in itertools.chain([im], append_images):
|
58 |
-
for im_frame in ImageSequence.Iterator(imSequence):
|
59 |
-
if not offsets:
|
60 |
-
# APP2 marker
|
61 |
-
im_frame.encoderinfo["extra"] = (
|
62 |
-
b"\xFF\xE2" + struct.pack(">H", 6 + 82) + b"MPF\0" + b" " * 82
|
63 |
-
)
|
64 |
-
exif = im_frame.encoderinfo.get("exif")
|
65 |
-
if isinstance(exif, Image.Exif):
|
66 |
-
exif = exif.tobytes()
|
67 |
-
im_frame.encoderinfo["exif"] = exif
|
68 |
-
if exif:
|
69 |
-
mpf_offset += 4 + len(exif)
|
70 |
-
|
71 |
-
JpegImagePlugin._save(im_frame, fp, filename)
|
72 |
-
offsets.append(fp.tell())
|
73 |
-
else:
|
74 |
-
im_frame.save(fp, "JPEG")
|
75 |
-
offsets.append(fp.tell() - offsets[-1])
|
76 |
-
|
77 |
-
ifd = TiffImagePlugin.ImageFileDirectory_v2()
|
78 |
-
ifd[0xB000] = b"0100"
|
79 |
-
ifd[0xB001] = len(offsets)
|
80 |
-
|
81 |
-
mpentries = b""
|
82 |
-
data_offset = 0
|
83 |
-
for i, size in enumerate(offsets):
|
84 |
-
if i == 0:
|
85 |
-
mptype = 0x030000 # Baseline MP Primary Image
|
86 |
-
else:
|
87 |
-
mptype = 0x000000 # Undefined
|
88 |
-
mpentries += struct.pack("<LLLHH", mptype, size, data_offset, 0, 0)
|
89 |
-
if i == 0:
|
90 |
-
data_offset -= mpf_offset
|
91 |
-
data_offset += size
|
92 |
-
ifd[0xB002] = mpentries
|
93 |
-
|
94 |
-
fp.seek(mpf_offset)
|
95 |
-
fp.write(b"II\x2A\x00" + o32le(8) + ifd.tobytes(8))
|
96 |
-
fp.seek(0, os.SEEK_END)
|
97 |
-
|
98 |
-
|
99 |
-
##
|
100 |
-
# Image plugin for MPO images.
|
101 |
-
|
102 |
-
|
103 |
-
class MpoImageFile(JpegImagePlugin.JpegImageFile):
|
104 |
-
format = "MPO"
|
105 |
-
format_description = "MPO (CIPA DC-007)"
|
106 |
-
_close_exclusive_fp_after_loading = False
|
107 |
-
|
108 |
-
def _open(self):
|
109 |
-
self.fp.seek(0) # prep the fp in order to pass the JPEG test
|
110 |
-
JpegImagePlugin.JpegImageFile._open(self)
|
111 |
-
self._after_jpeg_open()
|
112 |
-
|
113 |
-
def _after_jpeg_open(self, mpheader=None):
|
114 |
-
self._initial_size = self.size
|
115 |
-
self.mpinfo = mpheader if mpheader is not None else self._getmp()
|
116 |
-
self.n_frames = self.mpinfo[0xB001]
|
117 |
-
self.__mpoffsets = [
|
118 |
-
mpent["DataOffset"] + self.info["mpoffset"] for mpent in self.mpinfo[0xB002]
|
119 |
-
]
|
120 |
-
self.__mpoffsets[0] = 0
|
121 |
-
# Note that the following assertion will only be invalid if something
|
122 |
-
# gets broken within JpegImagePlugin.
|
123 |
-
assert self.n_frames == len(self.__mpoffsets)
|
124 |
-
del self.info["mpoffset"] # no longer needed
|
125 |
-
self.is_animated = self.n_frames > 1
|
126 |
-
self._fp = self.fp # FIXME: hack
|
127 |
-
self._fp.seek(self.__mpoffsets[0]) # get ready to read first frame
|
128 |
-
self.__frame = 0
|
129 |
-
self.offset = 0
|
130 |
-
# for now we can only handle reading and individual frame extraction
|
131 |
-
self.readonly = 1
|
132 |
-
|
133 |
-
def load_seek(self, pos):
|
134 |
-
self._fp.seek(pos)
|
135 |
-
|
136 |
-
def seek(self, frame):
|
137 |
-
if not self._seek_check(frame):
|
138 |
-
return
|
139 |
-
self.fp = self._fp
|
140 |
-
self.offset = self.__mpoffsets[frame]
|
141 |
-
|
142 |
-
self.fp.seek(self.offset + 2) # skip SOI marker
|
143 |
-
segment = self.fp.read(2)
|
144 |
-
if not segment:
|
145 |
-
msg = "No data found for frame"
|
146 |
-
raise ValueError(msg)
|
147 |
-
self._size = self._initial_size
|
148 |
-
if i16(segment) == 0xFFE1: # APP1
|
149 |
-
n = i16(self.fp.read(2)) - 2
|
150 |
-
self.info["exif"] = ImageFile._safe_read(self.fp, n)
|
151 |
-
self._reload_exif()
|
152 |
-
|
153 |
-
mptype = self.mpinfo[0xB002][frame]["Attribute"]["MPType"]
|
154 |
-
if mptype.startswith("Large Thumbnail"):
|
155 |
-
exif = self.getexif().get_ifd(ExifTags.IFD.Exif)
|
156 |
-
if 40962 in exif and 40963 in exif:
|
157 |
-
self._size = (exif[40962], exif[40963])
|
158 |
-
elif "exif" in self.info:
|
159 |
-
del self.info["exif"]
|
160 |
-
self._reload_exif()
|
161 |
-
|
162 |
-
self.tile = [("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))]
|
163 |
-
self.__frame = frame
|
164 |
-
|
165 |
-
def tell(self):
|
166 |
-
return self.__frame
|
167 |
-
|
168 |
-
@staticmethod
|
169 |
-
def adopt(jpeg_instance, mpheader=None):
|
170 |
-
"""
|
171 |
-
Transform the instance of JpegImageFile into
|
172 |
-
an instance of MpoImageFile.
|
173 |
-
After the call, the JpegImageFile is extended
|
174 |
-
to be an MpoImageFile.
|
175 |
-
|
176 |
-
This is essentially useful when opening a JPEG
|
177 |
-
file that reveals itself as an MPO, to avoid
|
178 |
-
double call to _open.
|
179 |
-
"""
|
180 |
-
jpeg_instance.__class__ = MpoImageFile
|
181 |
-
jpeg_instance._after_jpeg_open(mpheader)
|
182 |
-
return jpeg_instance
|
183 |
-
|
184 |
-
|
185 |
-
# ---------------------------------------------------------------------
|
186 |
-
# Registry stuff
|
187 |
-
|
188 |
-
# Note that since MPO shares a factory with JPEG, we do not need to do a
|
189 |
-
# separate registration for it here.
|
190 |
-
# Image.register_open(MpoImageFile.format,
|
191 |
-
# JpegImagePlugin.jpeg_factory, _accept)
|
192 |
-
Image.register_save(MpoImageFile.format, _save)
|
193 |
-
Image.register_save_all(MpoImageFile.format, _save_all)
|
194 |
-
|
195 |
-
Image.register_extension(MpoImageFile.format, ".mpo")
|
196 |
-
|
197 |
-
Image.register_mime(MpoImageFile.format, "image/mpo")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/pytest_plugin.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from contextlib import contextmanager
|
4 |
-
from inspect import isasyncgenfunction, iscoroutinefunction
|
5 |
-
from typing import Any, Dict, Generator, Tuple, cast
|
6 |
-
|
7 |
-
import pytest
|
8 |
-
import sniffio
|
9 |
-
|
10 |
-
from ._core._eventloop import get_all_backends, get_asynclib
|
11 |
-
from .abc import TestRunner
|
12 |
-
|
13 |
-
_current_runner: TestRunner | None = None
|
14 |
-
|
15 |
-
|
16 |
-
def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
|
17 |
-
if isinstance(backend, str):
|
18 |
-
return backend, {}
|
19 |
-
elif isinstance(backend, tuple) and len(backend) == 2:
|
20 |
-
if isinstance(backend[0], str) and isinstance(backend[1], dict):
|
21 |
-
return cast(Tuple[str, Dict[str, Any]], backend)
|
22 |
-
|
23 |
-
raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
|
24 |
-
|
25 |
-
|
26 |
-
@contextmanager
|
27 |
-
def get_runner(
|
28 |
-
backend_name: str, backend_options: dict[str, Any]
|
29 |
-
) -> Generator[TestRunner, object, None]:
|
30 |
-
global _current_runner
|
31 |
-
if _current_runner:
|
32 |
-
yield _current_runner
|
33 |
-
return
|
34 |
-
|
35 |
-
asynclib = get_asynclib(backend_name)
|
36 |
-
token = None
|
37 |
-
if sniffio.current_async_library_cvar.get(None) is None:
|
38 |
-
# Since we're in control of the event loop, we can cache the name of the async library
|
39 |
-
token = sniffio.current_async_library_cvar.set(backend_name)
|
40 |
-
|
41 |
-
try:
|
42 |
-
backend_options = backend_options or {}
|
43 |
-
with asynclib.TestRunner(**backend_options) as runner:
|
44 |
-
_current_runner = runner
|
45 |
-
yield runner
|
46 |
-
finally:
|
47 |
-
_current_runner = None
|
48 |
-
if token:
|
49 |
-
sniffio.current_async_library_cvar.reset(token)
|
50 |
-
|
51 |
-
|
52 |
-
def pytest_configure(config: Any) -> None:
|
53 |
-
config.addinivalue_line(
|
54 |
-
"markers",
|
55 |
-
"anyio: mark the (coroutine function) test to be run "
|
56 |
-
"asynchronously via anyio.",
|
57 |
-
)
|
58 |
-
|
59 |
-
|
60 |
-
def pytest_fixture_setup(fixturedef: Any, request: Any) -> None:
|
61 |
-
def wrapper(*args, anyio_backend, **kwargs): # type: ignore[no-untyped-def]
|
62 |
-
backend_name, backend_options = extract_backend_and_options(anyio_backend)
|
63 |
-
if has_backend_arg:
|
64 |
-
kwargs["anyio_backend"] = anyio_backend
|
65 |
-
|
66 |
-
with get_runner(backend_name, backend_options) as runner:
|
67 |
-
if isasyncgenfunction(func):
|
68 |
-
yield from runner.run_asyncgen_fixture(func, kwargs)
|
69 |
-
else:
|
70 |
-
yield runner.run_fixture(func, kwargs)
|
71 |
-
|
72 |
-
# Only apply this to coroutine functions and async generator functions in requests that involve
|
73 |
-
# the anyio_backend fixture
|
74 |
-
func = fixturedef.func
|
75 |
-
if isasyncgenfunction(func) or iscoroutinefunction(func):
|
76 |
-
if "anyio_backend" in request.fixturenames:
|
77 |
-
has_backend_arg = "anyio_backend" in fixturedef.argnames
|
78 |
-
fixturedef.func = wrapper
|
79 |
-
if not has_backend_arg:
|
80 |
-
fixturedef.argnames += ("anyio_backend",)
|
81 |
-
|
82 |
-
|
83 |
-
@pytest.hookimpl(tryfirst=True)
|
84 |
-
def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None:
|
85 |
-
if collector.istestfunction(obj, name):
|
86 |
-
inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
|
87 |
-
if iscoroutinefunction(inner_func):
|
88 |
-
marker = collector.get_closest_marker("anyio")
|
89 |
-
own_markers = getattr(obj, "pytestmark", ())
|
90 |
-
if marker or any(marker.name == "anyio" for marker in own_markers):
|
91 |
-
pytest.mark.usefixtures("anyio_backend")(obj)
|
92 |
-
|
93 |
-
|
94 |
-
@pytest.hookimpl(tryfirst=True)
|
95 |
-
def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
|
96 |
-
def run_with_hypothesis(**kwargs: Any) -> None:
|
97 |
-
with get_runner(backend_name, backend_options) as runner:
|
98 |
-
runner.run_test(original_func, kwargs)
|
99 |
-
|
100 |
-
backend = pyfuncitem.funcargs.get("anyio_backend")
|
101 |
-
if backend:
|
102 |
-
backend_name, backend_options = extract_backend_and_options(backend)
|
103 |
-
|
104 |
-
if hasattr(pyfuncitem.obj, "hypothesis"):
|
105 |
-
# Wrap the inner test function unless it's already wrapped
|
106 |
-
original_func = pyfuncitem.obj.hypothesis.inner_test
|
107 |
-
if original_func.__qualname__ != run_with_hypothesis.__qualname__:
|
108 |
-
if iscoroutinefunction(original_func):
|
109 |
-
pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
|
110 |
-
|
111 |
-
return None
|
112 |
-
|
113 |
-
if iscoroutinefunction(pyfuncitem.obj):
|
114 |
-
funcargs = pyfuncitem.funcargs
|
115 |
-
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
|
116 |
-
with get_runner(backend_name, backend_options) as runner:
|
117 |
-
runner.run_test(pyfuncitem.obj, testargs)
|
118 |
-
|
119 |
-
return True
|
120 |
-
|
121 |
-
return None
|
122 |
-
|
123 |
-
|
124 |
-
@pytest.fixture(params=get_all_backends())
|
125 |
-
def anyio_backend(request: Any) -> Any:
|
126 |
-
return request.param
|
127 |
-
|
128 |
-
|
129 |
-
@pytest.fixture
|
130 |
-
def anyio_backend_name(anyio_backend: Any) -> str:
|
131 |
-
if isinstance(anyio_backend, str):
|
132 |
-
return anyio_backend
|
133 |
-
else:
|
134 |
-
return anyio_backend[0]
|
135 |
-
|
136 |
-
|
137 |
-
@pytest.fixture
|
138 |
-
def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
|
139 |
-
if isinstance(anyio_backend, str):
|
140 |
-
return {}
|
141 |
-
else:
|
142 |
-
return anyio_backend[1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dotenv/ipython.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
from IPython.core.magic import Magics, line_magic, magics_class # type: ignore
|
2 |
-
from IPython.core.magic_arguments import (argument, magic_arguments, # type: ignore
|
3 |
-
parse_argstring) # type: ignore
|
4 |
-
|
5 |
-
from .main import find_dotenv, load_dotenv
|
6 |
-
|
7 |
-
|
8 |
-
@magics_class
|
9 |
-
class IPythonDotEnv(Magics):
|
10 |
-
|
11 |
-
@magic_arguments()
|
12 |
-
@argument(
|
13 |
-
'-o', '--override', action='store_true',
|
14 |
-
help="Indicate to override existing variables"
|
15 |
-
)
|
16 |
-
@argument(
|
17 |
-
'-v', '--verbose', action='store_true',
|
18 |
-
help="Indicate function calls to be verbose"
|
19 |
-
)
|
20 |
-
@argument('dotenv_path', nargs='?', type=str, default='.env',
|
21 |
-
help='Search in increasingly higher folders for the `dotenv_path`')
|
22 |
-
@line_magic
|
23 |
-
def dotenv(self, line):
|
24 |
-
args = parse_argstring(self.dotenv, line)
|
25 |
-
# Locate the .env file
|
26 |
-
dotenv_path = args.dotenv_path
|
27 |
-
try:
|
28 |
-
dotenv_path = find_dotenv(dotenv_path, True, True)
|
29 |
-
except IOError:
|
30 |
-
print("cannot find .env file")
|
31 |
-
return
|
32 |
-
|
33 |
-
# Load the .env file
|
34 |
-
load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
|
35 |
-
|
36 |
-
|
37 |
-
def load_ipython_extension(ipython):
|
38 |
-
"""Register the %dotenv magic."""
|
39 |
-
ipython.register_magics(IPythonDotEnv)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dagfinn1962/stablediffusion-models/images.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import matplotlib.pyplot as plt
|
3 |
-
import matplotlib.image as mpimg
|
4 |
-
%matplotlib inline
|
5 |
-
|
6 |
-
images = []
|
7 |
-
for img_path in sorted(glob.glob('brain.png'), reverse=True):
|
8 |
-
images.append(mpimg.imread(img_path))
|
9 |
-
|
10 |
-
images = images[:15]
|
11 |
-
|
12 |
-
plt.figure(figsize=(20,10))
|
13 |
-
|
14 |
-
columns = 5
|
15 |
-
for i, image in enumerate(images):
|
16 |
-
ax = plt.subplot(len(images) / columns + 1, columns, i + 1)
|
17 |
-
ax.axes.xaxis.set_visible(False)
|
18 |
-
ax.axes.yaxis.set_visible(False)
|
19 |
-
ax.axis('off')
|
20 |
-
plt.imshow(image)
|
21 |
-
gc.collect()
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/commands/twitter.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import tweepy
|
4 |
-
from dotenv import load_dotenv
|
5 |
-
|
6 |
-
load_dotenv()
|
7 |
-
|
8 |
-
|
9 |
-
def send_tweet(tweet_text):
|
10 |
-
consumer_key = os.environ.get("TW_CONSUMER_KEY")
|
11 |
-
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
|
12 |
-
access_token = os.environ.get("TW_ACCESS_TOKEN")
|
13 |
-
access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
|
14 |
-
# Authenticate to Twitter
|
15 |
-
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
|
16 |
-
auth.set_access_token(access_token, access_token_secret)
|
17 |
-
|
18 |
-
# Create API object
|
19 |
-
api = tweepy.API(auth)
|
20 |
-
|
21 |
-
# Send tweet
|
22 |
-
try:
|
23 |
-
api.update_status(tweet_text)
|
24 |
-
print("Tweet sent successfully!")
|
25 |
-
except tweepy.TweepyException as e:
|
26 |
-
print("Error sending tweet: {}".format(e.reason))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-comic-generation/src/app/layouts/new_layouts.tsx
DELETED
@@ -1,273 +0,0 @@
|
|
1 |
-
"use client"
|
2 |
-
|
3 |
-
import { Panel } from "@/app/interface/panel"
|
4 |
-
import { pick } from "@/lib/pick"
|
5 |
-
import { Grid } from "@/app/interface/grid"
|
6 |
-
|
7 |
-
export function Layout1() {
|
8 |
-
return (
|
9 |
-
<Grid className="grid-cols-2 grid-rows-4">
|
10 |
-
<div className="bg-stone-100">
|
11 |
-
<Panel
|
12 |
-
panel={0}
|
13 |
-
width={1024}
|
14 |
-
height={768}
|
15 |
-
/>
|
16 |
-
</div>
|
17 |
-
<div className="bg-zinc-100 row-span-2">
|
18 |
-
<Panel
|
19 |
-
panel={1}
|
20 |
-
width={512}
|
21 |
-
height={1024}
|
22 |
-
/>
|
23 |
-
</div>
|
24 |
-
<div className="bg-gray-100 row-span-2 col-span-1">
|
25 |
-
<Panel
|
26 |
-
panel={2}
|
27 |
-
width={512}
|
28 |
-
height={1024}
|
29 |
-
/>
|
30 |
-
</div>
|
31 |
-
<div className="bg-slate-100">
|
32 |
-
<Panel
|
33 |
-
panel={3}
|
34 |
-
width={1024}
|
35 |
-
height={768}
|
36 |
-
/>
|
37 |
-
</div>
|
38 |
-
<div className="bg-slate-100 row-span-1 col-span-2">
|
39 |
-
<Panel
|
40 |
-
panel={4}
|
41 |
-
width={1024}
|
42 |
-
height={768}
|
43 |
-
/>
|
44 |
-
</div>
|
45 |
-
</Grid>
|
46 |
-
)
|
47 |
-
}
|
48 |
-
|
49 |
-
export function Layout2() {
|
50 |
-
return (
|
51 |
-
<Grid className="grid-cols-2 grid-rows-3">
|
52 |
-
<div className="bg-gray-100 row-span-2 col-span-1">
|
53 |
-
<Panel
|
54 |
-
panel={0}
|
55 |
-
width={768}
|
56 |
-
height={1024}
|
57 |
-
/>
|
58 |
-
</div>
|
59 |
-
<div className="bg-gray-100 row-span-1 col-span-1">
|
60 |
-
<Panel
|
61 |
-
panel={1}
|
62 |
-
width={1024}
|
63 |
-
height={1024}
|
64 |
-
/>
|
65 |
-
</div>
|
66 |
-
<div className="bg-slate-100">
|
67 |
-
<Panel
|
68 |
-
panel={2}
|
69 |
-
width={1024}
|
70 |
-
height={768}
|
71 |
-
/>
|
72 |
-
</div>
|
73 |
-
<div className="bg-stone-100">
|
74 |
-
<Panel
|
75 |
-
panel={3}
|
76 |
-
width={1024}
|
77 |
-
height={768}
|
78 |
-
/>
|
79 |
-
</div>
|
80 |
-
<div className="bg-zinc-100 row-span-1 col-span-1">
|
81 |
-
<Panel
|
82 |
-
panel={4}
|
83 |
-
width={1024}
|
84 |
-
height={768}
|
85 |
-
/>
|
86 |
-
</div>
|
87 |
-
</Grid>
|
88 |
-
)
|
89 |
-
}
|
90 |
-
|
91 |
-
export function Layout3() {
|
92 |
-
return (
|
93 |
-
<Grid className="grid-cols-5 grid-rows-2">
|
94 |
-
<div className="bg-zinc-100 col-span-3">
|
95 |
-
<Panel
|
96 |
-
panel={0}
|
97 |
-
width={1024}
|
98 |
-
height={1024}
|
99 |
-
/>
|
100 |
-
</div>
|
101 |
-
<div className="bg-gray-100 col-span-2 row-span-1">
|
102 |
-
<Panel
|
103 |
-
panel={1}
|
104 |
-
width={512}
|
105 |
-
height={1024}
|
106 |
-
/>
|
107 |
-
</div>
|
108 |
-
<div className="bg-gray-100 col-span-2 row-span-1">
|
109 |
-
<Panel
|
110 |
-
panel={2}
|
111 |
-
width={512}
|
112 |
-
height={1024}
|
113 |
-
/>
|
114 |
-
</div>
|
115 |
-
<div className="col-span-3 grid grid-cols-2 gap-2">
|
116 |
-
<div className="bg-stone-100">
|
117 |
-
<Panel
|
118 |
-
panel={3}
|
119 |
-
width={512}
|
120 |
-
height={1024}
|
121 |
-
/>
|
122 |
-
</div>
|
123 |
-
<div className="bg-slate-100">
|
124 |
-
<Panel
|
125 |
-
panel={4}
|
126 |
-
width={512}
|
127 |
-
height={1024}
|
128 |
-
/>
|
129 |
-
</div>
|
130 |
-
</div>
|
131 |
-
</Grid>
|
132 |
-
)
|
133 |
-
}
|
134 |
-
|
135 |
-
export function Layout4() {
|
136 |
-
return (
|
137 |
-
<Grid className="grid-cols-2 grid-rows-3">
|
138 |
-
<div className="bg-slate-100 row-span-2">
|
139 |
-
<Panel
|
140 |
-
panel={0}
|
141 |
-
width={768}
|
142 |
-
height={1024}
|
143 |
-
/>
|
144 |
-
</div>
|
145 |
-
<div className="bg-gray-100 row-span-1 col-span-1">
|
146 |
-
<Panel
|
147 |
-
panel={1}
|
148 |
-
width={1024}
|
149 |
-
height={768}
|
150 |
-
/>
|
151 |
-
</div>
|
152 |
-
<div className="bg-zinc-100 row-span-2">
|
153 |
-
<Panel
|
154 |
-
panel={2}
|
155 |
-
width={1024}
|
156 |
-
height={768}
|
157 |
-
/>
|
158 |
-
</div>
|
159 |
-
<div className="bg-stone-100">
|
160 |
-
<Panel
|
161 |
-
panel={3}
|
162 |
-
width={768}
|
163 |
-
height={1024}
|
164 |
-
/>
|
165 |
-
</div>
|
166 |
-
</Grid>
|
167 |
-
)
|
168 |
-
}
|
169 |
-
|
170 |
-
|
171 |
-
export function Layout5() {
|
172 |
-
return (
|
173 |
-
<Grid className="grid-cols-3 grid-rows-3">
|
174 |
-
<div className="bg-zinc-100 col-span-2 row-span-1">
|
175 |
-
<Panel
|
176 |
-
panel={0}
|
177 |
-
width={1024}
|
178 |
-
height={512}
|
179 |
-
/>
|
180 |
-
</div>
|
181 |
-
<div className="bg-zinc-100 col-span-1 row-span-1">
|
182 |
-
<Panel
|
183 |
-
panel={1}
|
184 |
-
width={1024}
|
185 |
-
height={768}
|
186 |
-
/>
|
187 |
-
</div>
|
188 |
-
<div className="bg-stone-100 row-span-1 col-span-1">
|
189 |
-
<Panel
|
190 |
-
panel={2}
|
191 |
-
width={768}
|
192 |
-
height={1024}
|
193 |
-
/>
|
194 |
-
</div>
|
195 |
-
<div className="bg-slate-100 row-span-1 col-span-2">
|
196 |
-
<Panel
|
197 |
-
panel={3}
|
198 |
-
width={1024}
|
199 |
-
height={768}
|
200 |
-
/>
|
201 |
-
</div>
|
202 |
-
<div className="bg-slate-100 row-span-1 col-span-3">
|
203 |
-
<Panel
|
204 |
-
panel={4}
|
205 |
-
width={1024}
|
206 |
-
height={1024}
|
207 |
-
/>
|
208 |
-
</div>
|
209 |
-
</Grid>
|
210 |
-
)
|
211 |
-
}
|
212 |
-
|
213 |
-
export function Layout6() {
|
214 |
-
return (
|
215 |
-
<Grid className="grid-cols-3 grid-rows-3">
|
216 |
-
<div className="bg-zinc-100 col-span-2 row-span-1">
|
217 |
-
<Panel
|
218 |
-
panel={0}
|
219 |
-
width={1024}
|
220 |
-
height={512}
|
221 |
-
/>
|
222 |
-
</div>
|
223 |
-
<div className="bg-zinc-100 col-span-1 row-span-1">
|
224 |
-
<Panel
|
225 |
-
panel={1}
|
226 |
-
width={768}
|
227 |
-
height={1024}
|
228 |
-
/>
|
229 |
-
</div>
|
230 |
-
<div className="bg-stone-100 row-span-1 col-span-1">
|
231 |
-
<Panel
|
232 |
-
panel={2}
|
233 |
-
width={768}
|
234 |
-
height={1024}
|
235 |
-
/>
|
236 |
-
</div>
|
237 |
-
<div className="bg-slate-100 row-span-2 col-span-2">
|
238 |
-
<Panel
|
239 |
-
panel={3}
|
240 |
-
width={1024}
|
241 |
-
height={1024}
|
242 |
-
/>
|
243 |
-
</div>
|
244 |
-
<div className="bg-slate-100 row-span-1 col-span-1">
|
245 |
-
<Panel
|
246 |
-
panel={3}
|
247 |
-
width={768}
|
248 |
-
height={1024}
|
249 |
-
/>
|
250 |
-
</div>
|
251 |
-
</Grid>
|
252 |
-
)
|
253 |
-
}
|
254 |
-
|
255 |
-
// export const layouts = { Layout1, Layout2, Layout3, Layout4, Layout5, Layout6 }
|
256 |
-
export const allLayouts = {
|
257 |
-
// Layout1,
|
258 |
-
// Layout2,
|
259 |
-
// Layout3,
|
260 |
-
// Layout4,
|
261 |
-
Layout5,
|
262 |
-
// Layout6
|
263 |
-
}
|
264 |
-
|
265 |
-
export type LayoutName = keyof typeof allLayouts
|
266 |
-
|
267 |
-
export function getRandomLayoutName(): LayoutName {
|
268 |
-
return pick(Object.keys(allLayouts) as LayoutName[]) as LayoutName
|
269 |
-
}
|
270 |
-
|
271 |
-
export function getRandomLayoutNames(): LayoutName[] {
|
272 |
-
return Object.keys(allLayouts).sort(() => Math.random() - 0.5) as LayoutName[]
|
273 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Djacon/emotion_detection/static/analytics.html
DELETED
@@ -1,301 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
-
|
4 |
-
<head>
|
5 |
-
<meta charset="utf-8">
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1">
|
7 |
-
<link rel="canonical" href="">
|
8 |
-
<title>Text2Feature | Analytics</title>
|
9 |
-
<link rel="stylesheet" href="files/css/main.css">
|
10 |
-
<link rel="icon" type="image/svg+xml" href="files/images/favicon.svg">
|
11 |
-
<script defer src="files/js/main.js"></script>
|
12 |
-
|
13 |
-
<script src="https://d3js.org/d3.v6.min.js"></script>
|
14 |
-
<script src="https://d3js.org/d3-fetch.v3.min.js"></script>
|
15 |
-
</head>
|
16 |
-
|
17 |
-
<body class="overflow-hidden">
|
18 |
-
<!-- Google tag (gtag.js) -->
|
19 |
-
<script async src="https://www.googletagmanager.com/gtag/js?id=G-B751Q3XBFC"></script>
|
20 |
-
<script>
|
21 |
-
window.dataLayer = window.dataLayer || [];
|
22 |
-
function gtag(){dataLayer.push(arguments);}
|
23 |
-
gtag('js', new Date());
|
24 |
-
|
25 |
-
gtag('config', 'G-B751Q3XBFC');
|
26 |
-
</script>
|
27 |
-
|
28 |
-
<div x-data="{ sidebarOpen: false }" class="relative flex h-screen text-gray-800 bg-white font-roboto">
|
29 |
-
<div x-cloak :class="sidebarOpen ? 'block' : 'hidden'" @click="sidebarOpen = false"
|
30 |
-
class="fixed inset-0 z-20 transition-opacity bg-black opacity-50 lg:hidden"></div>
|
31 |
-
|
32 |
-
<div x-cloak :class="sidebarOpen ? 'translate-x-0 ease-in' : '-translate-x-full ease-out'"
|
33 |
-
class="fixed inset-y-0 left-0 z-30 w-64 px-4 overflow-y-auto transition duration-200 transform bg-white border-r border-gray-100 lg:translate-x-0 lg:relative lg:inset-0 ">
|
34 |
-
<div class="mt-8">
|
35 |
-
<a href="/" class="flex items-center">
|
36 |
-
<img class="w-auto h-8 no-invert" src="files/images/favicon.svg" alt="logo">
|
37 |
-
<span class="mx-3 mt-1 font-medium text-lg">Text2<span class="no-invert" style="color: #ffa116">Feature</span></span>
|
38 |
-
</a>
|
39 |
-
</div>
|
40 |
-
|
41 |
-
<hr class="my-6 border-gray-100">
|
42 |
-
|
43 |
-
<nav class="space-y-8">
|
44 |
-
<div class="space-y-4">
|
45 |
-
<h3 class="px-4 text-sm tracking-wider text-gray-400 uppercase">PAGES</h3>
|
46 |
-
|
47 |
-
<a class="flex items-center px-4 py-2 text-gray-500 transition-colors duration-200 transform rounded-lg hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
48 |
-
href="/">
|
49 |
-
<svg xmlns="http://www.w3.org/2000/svg" class="w-6 h-6" fill="none" viewBox="0 0 24 24"
|
50 |
-
stroke="currentColor">
|
51 |
-
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
|
52 |
-
d="M3 12l2-2m0 0l7-7 7 7M5 10v10a1 1 0 001 1h3m10-11l2 2m-2-2v10a1 1 0 01-1 1h-3m-6 0a1 1 0 001-1v-4a1 1 0 011-1h2a1 1 0 011 1v4a1 1 0 001 1m-6 0h6" />
|
53 |
-
</svg>
|
54 |
-
<span class="mx-3 font-medium capitalize">Homepage</span>
|
55 |
-
</a>
|
56 |
-
|
57 |
-
<a class="flex items-center px-4 py-2 text-gray-500 transition-colors duration-300 transform rounded-lg hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
58 |
-
href="text_summarizer">
|
59 |
-
<svg xmlns="http://www.w3.org/2000/svg" class="w-6 h-6" fill="none" viewBox="0 0 24 24"
|
60 |
-
stroke="currentColor" stroke-width="2">
|
61 |
-
<path stroke-linecap="round" stroke-linejoin="round"
|
62 |
-
d="M4 7v10c0 2.21 3.582 4 8 4s8-1.79 8-4V7M4 7c0 2.21 3.582 4 8 4s8-1.79 8-4M4 7c0-2.21 3.582-4 8-4s8 1.79 8 4m0 5c0 2.21-3.582 4-8 4s-8-1.79-8-4" />
|
63 |
-
</svg>
|
64 |
-
<span class="mx-3 font-medium capitalize">Text Summarizer</span>
|
65 |
-
</a>
|
66 |
-
|
67 |
-
<a class="flex items-center px-4 py-2 text-gray-500 transition-colors duration-300 transform rounded-lg hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
68 |
-
href="emotion_detection">
|
69 |
-
<svg width="24px" height="24px" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
70 |
-
<path d="M9 16C9.85038 16.6303 10.8846 17 12 17C13.1154 17 14.1496 16.6303 15 16" stroke="#1C274C" stroke-width="1.5" stroke-linecap="round"/>
|
71 |
-
<path d="M16 10.5C16 11.3284 15.5523 12 15 12C14.4477 12 14 11.3284 14 10.5C14 9.67157 14.4477 9 15 9C15.5523 9 16 9.67157 16 10.5Z" fill="#1C274C"/>
|
72 |
-
<ellipse cx="9" cy="10.5" rx="1" ry="1.5" fill="#1C274C"/>
|
73 |
-
<path d="M7 3.33782C8.47087 2.48697 10.1786 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22C6.47715 22 2 17.5228 2 12C2 10.1786 2.48697 8.47087 3.33782 7" stroke="#1C274C" stroke-width="1.5" stroke-linecap="round"/>
|
74 |
-
</svg>
|
75 |
-
<span class="mx-3 font-medium capitalize">Emotion Detection</span>
|
76 |
-
</a>
|
77 |
-
|
78 |
-
<a class="flex items-center px-4 py-2 text-gray-600 transition-colors duration-300 transform bg-gray-200 rounded-lg bg-opacity-40"
|
79 |
-
href="javascript:void 0">
|
80 |
-
<svg fill="currentColor" width="24px" height="24px" viewBox="0 0 32 32" version="1.1" xmlns="http://www.w3.org/2000/svg">
|
81 |
-
<g id="SVGRepo_bgCarrier" stroke-width="0"></g>
|
82 |
-
<g id="SVGRepo_tracerCarrier" stroke-linecap="round" stroke-linejoin="round"></g>
|
83 |
-
<g id="SVGRepo_iconCarrier">
|
84 |
-
<path d="M29.5 7c-1.381 0-2.5 1.12-2.5 2.5 0 0.284 0.058 0.551 0.144 0.805l-6.094 5.247c-0.427-0.341-0.961-0.553-1.55-0.553-0.68 0-1.294 0.273-1.744 0.713l-4.774-2.39c-0.093-1.296-1.162-2.323-2.482-2.323-1.38 0-2.5 1.12-2.5 2.5 0 0.378 0.090 0.732 0.24 1.053l-4.867 5.612c-0.273-0.102-0.564-0.166-0.873-0.166-1.381 0-2.5 1.119-2.5 2.5s1.119 2.5 2.5 2.5c1.381 0 2.5-1.119 2.5-2.5 0-0.332-0.068-0.649-0.186-0.939l4.946-5.685c0.236 0.073 0.48 0.124 0.74 0.124 0.727 0 1.377-0.316 1.834-0.813l4.669 2.341c0.017 1.367 1.127 2.471 2.497 2.471 1.381 0 2.5-1.119 2.5-2.5 0-0.044-0.011-0.086-0.013-0.13l6.503-5.587c0.309 0.137 0.649 0.216 1.010 0.216 1.381 0 2.5-1.119 2.5-2.5s-1.119-2.5-2.5-2.5z"></path>
|
85 |
-
</g>
|
86 |
-
</svg>
|
87 |
-
<span class="mx-3 font-medium capitalize">Analytics</span>
|
88 |
-
</a>
|
89 |
-
|
90 |
-
<a class="flex items-center px-4 py-2 text-gray-500 transition-colors duration-300 transform rounded-lg hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
91 |
-
href="">
|
92 |
-
<svg class="w-6 h-6" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
93 |
-
<path
|
94 |
-
d="M13 10V14H19V10H13ZM11 10H5V14H11V10ZM13 19H19V16H13V19ZM11 19V16H5V19H11ZM13 5V8H19V5H13ZM11 5H5V8H11V5ZM19 3C20.1046 3 21 3.89543 21 5V19C21 20.1046 20.1046 21 19 21H5C3.89543 21 3 20.1046 3 19V5C3 3.89543 3.89543 3 5 3H19Z"
|
95 |
-
fill="currentColor"></path>
|
96 |
-
</svg>
|
97 |
-
<span class="mx-3 font-medium capitalize">Project 4</span>
|
98 |
-
</a>
|
99 |
-
</div>
|
100 |
-
|
101 |
-
<div class="space-y-4">
|
102 |
-
<h3 class="px-4 text-sm tracking-wider text-gray-400 uppercase">OTHER</h3>
|
103 |
-
|
104 |
-
<a class="flex items-center px-4 py-2 text-gray-500 transition-colors duration-300 transform rounded-lg hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
105 |
-
href="https://djacon.github.io">
|
106 |
-
<svg width="24" height="24" viewBox="0 0 512 512" xmlns="http://www.w3.org/2000/svg">
|
107 |
-
<path
|
108 |
-
d="M474.89,300.41a121.43,121.43,0,0,1-121.3,121.3H247.08V392.13H353.59a91.72,91.72,0,1,0,0-183.44H87.53L151,272.2l-20.92,20.92L30.89,193.9l99.22-99.22L151,115.6l-63.5,63.51H353.59A121.43,121.43,0,0,1,474.89,300.41Z" />
|
109 |
-
</svg>
|
110 |
-
<span class="mx-3 font-medium">Visit Main Website</span>
|
111 |
-
</a>
|
112 |
-
|
113 |
-
<button id="theme-btn" class="flex items-center px-4 py-2 text-gray-500 transition-colors duration-300 transform rounded-lg hover:text-gray-600 hover:bg-gray-100 bg-opacity-40">
|
114 |
-
<img id="img-theme" src="files/images/sun.svg" width="24" height="24">
|
115 |
-
<span id='theme-span' class="mx-3 font-medium">Set Light Theme</span>
|
116 |
-
</button>
|
117 |
-
</div>
|
118 |
-
</nav>
|
119 |
-
</div>
|
120 |
-
|
121 |
-
<div class="flex flex-col flex-1 overflow-hidden bg-gray-100">
|
122 |
-
<header class="bg-white border-b border-gray-100">
|
123 |
-
<div class="flex items-center justify-between px-4 py-4 sm:px-6">
|
124 |
-
<div class="flex items-center">
|
125 |
-
<button @click="sidebarOpen = !sidebarOpen" class="text-gray-500 lg:hidden focus:outline-none">
|
126 |
-
<svg class="w-6 h-6" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
127 |
-
<path d="M4 6H20M4 12H20M4 18H11" stroke="currentColor" stroke-width="2"
|
128 |
-
stroke-linecap="round" stroke-linejoin="round" />
|
129 |
-
</svg>
|
130 |
-
</button>
|
131 |
-
|
132 |
-
<div class="relative" x-data="{ search: '' }" @click.away="search = ''">
|
133 |
-
<div class="relative mx-4 lg:mx-0">
|
134 |
-
<span class="absolute inset-y-0 left-0 flex items-center pl-3">
|
135 |
-
<svg class="w-5 h-5 text-gray-400" viewBox="0 0 24 24" fill="none">
|
136 |
-
<path
|
137 |
-
d="M21 21L15 15M17 10C17 13.866 13.866 17 10 17C6.13401 17 3 13.866 3 10C3 6.13401 6.13401 3 10 3C13.866 3 17 6.13401 17 10Z"
|
138 |
-
stroke="currentColor" stroke-width="2" stroke-linecap="round"
|
139 |
-
stroke-linejoin="round"></path>
|
140 |
-
</svg>
|
141 |
-
</span>
|
142 |
-
|
143 |
-
<style>
|
144 |
-
#search:focus {
|
145 |
-
border: 1px solid #ffa116;
|
146 |
-
}
|
147 |
-
</style>
|
148 |
-
|
149 |
-
<input id="search" x-model="search" type="text"
|
150 |
-
class="w-44 h-10 py-2 pl-10 pr-4 text-gray-700 placeholder-gray-400 transition-all duration-150 bg-white border border-gray-200 rounded-md focus:w-80 sm:w-64 sm:focus:w-80 focus:outline-none focus:ring focus:ring-indigo-300 focus:ring-opacity-40"
|
151 |
-
placeholder="Find anything...">
|
152 |
-
</div>
|
153 |
-
|
154 |
-
<div class="absolute right-0 z-20 w-full py-2 mt-2 space-y-4 overflow-hidden bg-white rounded-md shadow-xl"
|
155 |
-
x-show="search.length > 0" x-cloak
|
156 |
-
x-transition:enter="transition ease-out duration-100 transform"
|
157 |
-
x-transition:enter-start="opacity-0 scale-95"
|
158 |
-
x-transition:enter-end="opacity-100 scale-100"
|
159 |
-
x-transition:leave="transition ease-in duration-75 transform"
|
160 |
-
x-transition:leave-start="opacity-100 scale-100"
|
161 |
-
x-transition:leave-end="opacity-0 scale-95">
|
162 |
-
|
163 |
-
<div>
|
164 |
-
<h3 class="px-5 text-xs tracking-wider text-gray-500 uppercase">Projects</h3>
|
165 |
-
<div class="mt-2">
|
166 |
-
<a class="block px-5 py-2 text-sm text-gray-700 capitalize transition-colors duration-200 transform sm:px-12 hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
167 |
-
href="text_summarizer">
|
168 |
-
Text Summarizer
|
169 |
-
</a>
|
170 |
-
|
171 |
-
<a class="block px-5 py-2 text-sm text-gray-700 capitalize transition-colors duration-200 transform sm:px-12 hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
172 |
-
href="emotion_detection">
|
173 |
-
Emotion Detection
|
174 |
-
</a>
|
175 |
-
|
176 |
-
<a class="block px-5 py-2 text-sm text-gray-700 capitalize transition-colors duration-200 transform sm:px-12 hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
177 |
-
href="javascript:void 0">
|
178 |
-
Analytics
|
179 |
-
</a>
|
180 |
-
|
181 |
-
<a class="block px-5 py-2 text-sm text-gray-700 capitalize transition-colors duration-200 transform sm:px-12 hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
182 |
-
href="">
|
183 |
-
Project 4
|
184 |
-
</a>
|
185 |
-
</div>
|
186 |
-
</div>
|
187 |
-
<div>
|
188 |
-
<h3 class="px-5 text-xs tracking-wider text-gray-500 uppercase">Other</h3>
|
189 |
-
<div class="mt-2">
|
190 |
-
<a class="block px-5 py-2 text-sm text-gray-700 capitalize transition-colors duration-200 transform sm:px-12 hover:text-gray-600 hover:bg-gray-100 bg-opacity-40"
|
191 |
-
href="https://djacon.github.io">
|
192 |
-
Visit Main Website
|
193 |
-
</a>
|
194 |
-
</div>
|
195 |
-
</div>
|
196 |
-
</div>
|
197 |
-
</div>
|
198 |
-
</div>
|
199 |
-
|
200 |
-
<div class="flex items-center">
|
201 |
-
<div x-data="{ dropdownOpen: false }" class="relative inline-block">
|
202 |
-
<button @click="dropdownOpen = ! dropdownOpen" class="relative z-10 flex items-center flex-shrink-0 text-sm text-gray-600 focus:outline-none">
|
203 |
-
<img class="flex-shrink-0 object-cover w-8 h-8 rounded-full" src="files/images/github-mark.svg" alt="github-mark">
|
204 |
-
</button>
|
205 |
-
|
206 |
-
<a href="https://github.com/Djacon/text2feature" target="_blank" class="absolute right-0 z-20 w-56 py-2 mt-2 overflow-hidden bg-white rounded-md shadow-xl rtl:right-auto rtl:left-0 hover:bg-gray-100" x-show="dropdownOpen" x-transition:enter="transition ease-out duration-100 transform" x-transition:enter-start="opacity-0 scale-95" x-transition:enter-end="opacity-100 scale-100" x-transition:leave="transition ease-in duration-75 transform" x-transition:leave-start="opacity-100 scale-100" x-transition:leave-end="opacity-0 scale-95" @click.away="dropdownOpen = false" style="display: none;">
|
207 |
-
<div class="flex items-center p-3 -mt-2 text-sm text-gray-600 transition-colors duration-200 transform">
|
208 |
-
<img class="flex-shrink-0 object-cover mx-1 rounded-full w-9 h-9" src="files/images/github-mark.svg" alt="github-mark">
|
209 |
-
<div class="mx-1">
|
210 |
-
<h1 class="text-sm font-semibold text-gray-700">Made By Djacon</h1>
|
211 |
-
<p class="text-sm text-gray-500">github.com/Djacon</p>
|
212 |
-
</div>
|
213 |
-
</div>
|
214 |
-
</a>
|
215 |
-
</div>
|
216 |
-
</div>
|
217 |
-
</div>
|
218 |
-
</header>
|
219 |
-
|
220 |
-
<main class="flex-1 overflow-y-auto">
|
221 |
-
<div class="px-4 py-8 sm:px-6">
|
222 |
-
<div>
|
223 |
-
<h1 class="text-2xl font-medium text-gray-700 sm:text-3xl">Analytics</h1>
|
224 |
-
|
225 |
-
<div class="hidden mt-3 overflow-y-auto text-sm lg:items-center lg:flex whitespace-nowrap">
|
226 |
-
<a class="text-gray-600">Pages</a>
|
227 |
-
<span class="mx-1 text-gray-500">/</span>
|
228 |
-
<a href="javascript:void 0" class="text-indigo-600 hover:underline no-invert">Analytics</a>
|
229 |
-
</div>
|
230 |
-
</div>
|
231 |
-
|
232 |
-
<div class="mt-6">
|
233 |
-
<section class="mt-6 space-y-6">
|
234 |
-
<div class="w-full p-4 bg-white xl:p-6">
|
235 |
-
<div>
|
236 |
-
<label class="flex flex-col items-center justify-center w-full h-32 mt-2 text-gray-500 border-2 rounded-md cursor-pointer hover:text-gray-600 md:h-64">
|
237 |
-
<svg class="w-8 h-8" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20">
|
238 |
-
<path d="M16.88 9.1A4 4 0 0 1 16 17H5a5 5 0 0 1-1-9.9V7a3 3 0 0 1 4.52-2.59A4.98 4.98 0 0 1 17 8c0 .38-.04.74-.12 1.1zM11 11h3l-4-4-4 4h3v3h2v-3z"></path>
|
239 |
-
</svg>
|
240 |
-
|
241 |
-
<span class="mt-4">Import Data</span>
|
242 |
-
<input id="data" type="file" class="hidden" accept=".csv">
|
243 |
-
</label>
|
244 |
-
</div>
|
245 |
-
<div class="mt-4 flex justify-between no-invert">
|
246 |
-
<button id="submit" class="w-full flex items-center justify-center py-2 px-4 rounded font-medium text-white rounded-full" style="max-height: 2.5rem;">
|
247 |
-
Analize
|
248 |
-
</button>
|
249 |
-
</div>
|
250 |
-
</div>
|
251 |
-
|
252 |
-
<div id="dropdown" class="hidden p-4 z-10 bg-white rounded-lg shadow w-full">
|
253 |
-
<div class="p-3">
|
254 |
-
<button id="showDropdown" class="font-medium text-gray-700 text-lg inline-flex items-center">
|
255 |
-
Select Columns (0 of 0)
|
256 |
-
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24"><path d="M12 15.41l-4.29-4.29a1.01 1.01 0 0 0-1.42 0 1.01 1.01 0 0 0 0 1.42l5 5a1.01 1.01 0 0 0 1.42 0l5-5a1.01 1.01 0 0 0 0-1.42 1.01 1.01 0 0 0-1.42 0L12 15.41z"/></svg>
|
257 |
-
</button>
|
258 |
-
</div>
|
259 |
-
<div id="dropdownSearch" style="display: none;">
|
260 |
-
<!-- <div class="p-3">
|
261 |
-
<label for="input-group-search text-lg"></label>
|
262 |
-
<div class="relative mt-3">
|
263 |
-
<div class="absolute inset-y-0 left-0 flex items-center pl-3 pointer-events-none">
|
264 |
-
<svg class="w-4 h-4 text-gray-500 dark:text-gray-400" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 20 20">
|
265 |
-
<path stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m19 19-4-4m0-7A7 7 0 1 1 1 8a7 7 0 0 1 14 0Z"/>
|
266 |
-
</svg>
|
267 |
-
</div>
|
268 |
-
<input type="text" id="input-group-search" class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full pl-10 p-2.5 dark:bg-gray-600 dark:border-gray-500 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500" placeholder="Search">
|
269 |
-
</div>
|
270 |
-
</div> -->
|
271 |
-
<ul id="ul-columns" class="h-48 px-3 pb-3 overflow-y-auto text-sm text-gray-700 dark:text-gray-200" aria-labelledby="dropdownSearchButton">
|
272 |
-
<li>
|
273 |
-
<div class="flex items-center p-2 rounded hover:bg-gray-100 dark:hover:bg-gray-600">
|
274 |
-
<input id="checkbox-item-main" type="checkbox" value="" class="w-4 h-4 text-blue-600 bg-gray-100 border-gray-300 rounded focus:ring-blue-500 dark:focus:ring-blue-600 dark:ring-offset-gray-700 dark:focus:ring-offset-gray-700 focus:ring-2 dark:bg-gray-600 dark:border-gray-500">
|
275 |
-
<label for="checkbox-item-main" class="w-full pl-3 text-sm font-medium text-gray-900 rounded dark:text-gray-300">All Columns</label>
|
276 |
-
</div>
|
277 |
-
</li>
|
278 |
-
</ul>
|
279 |
-
<div class="mt-4 flex justify-between no-invert">
|
280 |
-
<button id="cancel" class="w-32 sm:w-80 flex items-center justify-center py-2 px-4 rounded font-medium text-white rounded-full border-gray-100" style="background-color: white; color: black; border: 1px solid gray; max-height: 2.5rem;">
|
281 |
-
Cancel
|
282 |
-
</button>
|
283 |
-
<button id="apply" class="w-32 sm:w-80 flex items-center justify-center py-2 px-4 rounded font-medium text-white rounded-full" style="max-height: 2.5rem;">
|
284 |
-
Apply
|
285 |
-
</button>
|
286 |
-
</div>
|
287 |
-
</div>
|
288 |
-
</div>
|
289 |
-
|
290 |
-
<div id="my-charts"></div>
|
291 |
-
</section>
|
292 |
-
</div>
|
293 |
-
</div>
|
294 |
-
</main>
|
295 |
-
</div>
|
296 |
-
</div>
|
297 |
-
|
298 |
-
<script src="files/js/theme.js"></script>
|
299 |
-
<script src="files/js/analytics.js"></script>
|
300 |
-
</body>
|
301 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/PTI/models/e4e/latent_codes_pool.py
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
import torch
|
3 |
-
|
4 |
-
|
5 |
-
class LatentCodesPool:
|
6 |
-
"""This class implements latent codes buffer that stores previously generated w latent codes.
|
7 |
-
This buffer enables us to update discriminators using a history of generated w's
|
8 |
-
rather than the ones produced by the latest encoder.
|
9 |
-
"""
|
10 |
-
|
11 |
-
def __init__(self, pool_size):
|
12 |
-
"""Initialize the ImagePool class
|
13 |
-
Parameters:
|
14 |
-
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
|
15 |
-
"""
|
16 |
-
self.pool_size = pool_size
|
17 |
-
if self.pool_size > 0: # create an empty pool
|
18 |
-
self.num_ws = 0
|
19 |
-
self.ws = []
|
20 |
-
|
21 |
-
def query(self, ws):
|
22 |
-
"""Return w's from the pool.
|
23 |
-
Parameters:
|
24 |
-
ws: the latest generated w's from the generator
|
25 |
-
Returns w's from the buffer.
|
26 |
-
By 50/100, the buffer will return input w's.
|
27 |
-
By 50/100, the buffer will return w's previously stored in the buffer,
|
28 |
-
and insert the current w's to the buffer.
|
29 |
-
"""
|
30 |
-
if self.pool_size == 0: # if the buffer size is 0, do nothing
|
31 |
-
return ws
|
32 |
-
return_ws = []
|
33 |
-
for w in ws: # ws.shape: (batch, 512) or (batch, n_latent, 512)
|
34 |
-
# w = torch.unsqueeze(image.data, 0)
|
35 |
-
if w.ndim == 2:
|
36 |
-
i = random.randint(0, len(w) - 1) # apply a random latent index as a candidate
|
37 |
-
w = w[i]
|
38 |
-
self.handle_w(w, return_ws)
|
39 |
-
return_ws = torch.stack(return_ws, 0) # collect all the images and return
|
40 |
-
return return_ws
|
41 |
-
|
42 |
-
def handle_w(self, w, return_ws):
|
43 |
-
if self.num_ws < self.pool_size: # if the buffer is not full; keep inserting current codes to the buffer
|
44 |
-
self.num_ws = self.num_ws + 1
|
45 |
-
self.ws.append(w)
|
46 |
-
return_ws.append(w)
|
47 |
-
else:
|
48 |
-
p = random.uniform(0, 1)
|
49 |
-
if p > 0.5: # by 50% chance, the buffer will return a previously stored latent code, and insert the current code into the buffer
|
50 |
-
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
|
51 |
-
tmp = self.ws[random_id].clone()
|
52 |
-
self.ws[random_id] = w
|
53 |
-
return_ws.append(tmp)
|
54 |
-
else: # by another 50% chance, the buffer will return the current image
|
55 |
-
return_ws.append(w)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Eddycrack864/Applio-Inference/julius/fftconv.py
DELETED
@@ -1,183 +0,0 @@
|
|
1 |
-
# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
|
2 |
-
# Author: adefossez, 2020
|
3 |
-
|
4 |
-
"""
|
5 |
-
Implementation of a FFT based 1D convolution in PyTorch.
|
6 |
-
While FFT is used in CUDNN for small kernel sizes, it is not the case for long ones, e.g. 512.
|
7 |
-
This module implements efficient FFT based convolutions for such convolutions. A typical
|
8 |
-
application is for evaluationg FIR filters with a long receptive field, typically
|
9 |
-
evaluated with a stride of 1.
|
10 |
-
"""
|
11 |
-
from typing import Optional
|
12 |
-
|
13 |
-
import torch
|
14 |
-
try:
|
15 |
-
import torch.fft as new_fft
|
16 |
-
except ImportError:
|
17 |
-
new_fft = None # type: ignore
|
18 |
-
from torch.nn import functional as F
|
19 |
-
|
20 |
-
from .core import pad_to, unfold
|
21 |
-
from .utils import simple_repr
|
22 |
-
|
23 |
-
|
24 |
-
# This is quite verbose, but sadly needed to make TorchScript happy.
|
25 |
-
def _new_rfft(x: torch.Tensor):
|
26 |
-
z = new_fft.rfft(x, dim=-1)
|
27 |
-
return torch.view_as_real(z)
|
28 |
-
|
29 |
-
|
30 |
-
def _old_rfft(x: torch.Tensor):
|
31 |
-
return torch.rfft(x, 1) # type: ignore
|
32 |
-
|
33 |
-
|
34 |
-
def _old_irfft(x: torch.Tensor, length: int):
|
35 |
-
result = torch.irfft(x, 1, signal_sizes=(length,)) # type: ignore
|
36 |
-
return result
|
37 |
-
|
38 |
-
|
39 |
-
def _new_irfft(x: torch.Tensor, length: int):
|
40 |
-
x = torch.view_as_complex(x)
|
41 |
-
return new_fft.irfft(x, length, dim=-1)
|
42 |
-
|
43 |
-
|
44 |
-
if new_fft is None:
|
45 |
-
_rfft = _old_rfft
|
46 |
-
_irfft = _old_irfft
|
47 |
-
else:
|
48 |
-
_rfft = _new_rfft
|
49 |
-
_irfft = _new_irfft
|
50 |
-
|
51 |
-
|
52 |
-
def _compl_mul_conjugate(a: torch.Tensor, b: torch.Tensor):
|
53 |
-
"""
|
54 |
-
Given a and b two tensors of dimension 4
|
55 |
-
with the last dimension being the real and imaginary part,
|
56 |
-
returns a multiplied by the conjugate of b, the multiplication
|
57 |
-
being with respect to the second dimension.
|
58 |
-
|
59 |
-
"""
|
60 |
-
# PyTorch 1.7 supports complex number, but not for all operations.
|
61 |
-
# Once the support is widespread, this can likely go away.
|
62 |
-
|
63 |
-
op = "bcft,dct->bdft"
|
64 |
-
return torch.stack([
|
65 |
-
torch.einsum(op, a[..., 0], b[..., 0]) + torch.einsum(op, a[..., 1], b[..., 1]),
|
66 |
-
torch.einsum(op, a[..., 1], b[..., 0]) - torch.einsum(op, a[..., 0], b[..., 1])
|
67 |
-
],
|
68 |
-
dim=-1)
|
69 |
-
|
70 |
-
|
71 |
-
def fft_conv1d(
|
72 |
-
input: torch.Tensor, weight: torch.Tensor,
|
73 |
-
bias: Optional[torch.Tensor] = None, stride: int = 1, padding: int = 0,
|
74 |
-
block_ratio: float = 5):
|
75 |
-
"""
|
76 |
-
Same as `torch.nn.functional.conv1d` but using FFT for the convolution.
|
77 |
-
Please check PyTorch documentation for more information.
|
78 |
-
|
79 |
-
Args:
|
80 |
-
input (Tensor): input signal of shape `[B, C, T]`.
|
81 |
-
weight (Tensor): weight of the convolution `[D, C, K]` with `D` the number
|
82 |
-
of output channels.
|
83 |
-
bias (Tensor or None): if not None, bias term for the convolution.
|
84 |
-
stride (int): stride of convolution.
|
85 |
-
padding (int): padding to apply to the input.
|
86 |
-
block_ratio (float): can be tuned for speed. The input is splitted in chunks
|
87 |
-
with a size of `int(block_ratio * kernel_size)`.
|
88 |
-
|
89 |
-
Shape:
|
90 |
-
|
91 |
-
- Inputs: `input` is `[B, C, T]`, `weight` is `[D, C, K]` and bias is `[D]`.
|
92 |
-
- Output: `(*, T)`
|
93 |
-
|
94 |
-
|
95 |
-
..note::
|
96 |
-
This function is faster than `torch.nn.functional.conv1d` only in specific cases.
|
97 |
-
Typically, the kernel size should be of the order of 256 to see any real gain,
|
98 |
-
for a stride of 1.
|
99 |
-
|
100 |
-
..Warning::
|
101 |
-
Dilation and groups are not supported at the moment. This function might use
|
102 |
-
more memory than the default Conv1d implementation.
|
103 |
-
"""
|
104 |
-
input = F.pad(input, (padding, padding))
|
105 |
-
batch, channels, length = input.shape
|
106 |
-
out_channels, _, kernel_size = weight.shape
|
107 |
-
|
108 |
-
if length < kernel_size:
|
109 |
-
raise RuntimeError(f"Input should be at least as large as the kernel size {kernel_size}, "
|
110 |
-
f"but it is only {length} samples long.")
|
111 |
-
if block_ratio < 1:
|
112 |
-
raise RuntimeError("Block ratio must be greater than 1.")
|
113 |
-
|
114 |
-
# We are going to process the input blocks by blocks, as for some reason it is faster
|
115 |
-
# and less memory intensive (I think the culprit is `torch.einsum`.
|
116 |
-
block_size: int = min(int(kernel_size * block_ratio), length)
|
117 |
-
fold_stride = block_size - kernel_size + 1
|
118 |
-
weight = pad_to(weight, block_size)
|
119 |
-
weight_z = _rfft(weight)
|
120 |
-
|
121 |
-
# We pad the input and get the different frames, on which
|
122 |
-
frames = unfold(input, block_size, fold_stride)
|
123 |
-
|
124 |
-
frames_z = _rfft(frames)
|
125 |
-
out_z = _compl_mul_conjugate(frames_z, weight_z)
|
126 |
-
out = _irfft(out_z, block_size)
|
127 |
-
# The last bit is invalid, because FFT will do a circular convolution.
|
128 |
-
out = out[..., :-kernel_size + 1]
|
129 |
-
out = out.reshape(batch, out_channels, -1)
|
130 |
-
out = out[..., ::stride]
|
131 |
-
target_length = (length - kernel_size) // stride + 1
|
132 |
-
out = out[..., :target_length]
|
133 |
-
if bias is not None:
|
134 |
-
out += bias[:, None]
|
135 |
-
return out
|
136 |
-
|
137 |
-
|
138 |
-
class FFTConv1d(torch.nn.Module):
|
139 |
-
"""
|
140 |
-
Same as `torch.nn.Conv1d` but based on `fft_conv1d`.
|
141 |
-
Please check PyTorch documentation for more information.
|
142 |
-
|
143 |
-
Args:
|
144 |
-
in_channels (int): number of input channels.
|
145 |
-
out_channels (int): number of output channels.
|
146 |
-
kernel_size (int): kernel size of convolution.
|
147 |
-
stride (int): stride of convolution.
|
148 |
-
padding (int): padding to apply to the input.
|
149 |
-
bias (bool): if True, use a bias term.
|
150 |
-
|
151 |
-
..note::
|
152 |
-
This module is faster than `torch.nn.Conv1d` only in specific cases.
|
153 |
-
Typically, `kernel_size` should be of the order of 256 to see any real gain,
|
154 |
-
for a stride of 1.
|
155 |
-
|
156 |
-
..warning::
|
157 |
-
Dilation and groups are not supported at the moment. This module might use
|
158 |
-
more memory than the default Conv1d implementation.
|
159 |
-
|
160 |
-
>>> fftconv = FFTConv1d(12, 24, 128, 4)
|
161 |
-
>>> x = torch.randn(4, 12, 1024)
|
162 |
-
>>> print(list(fftconv(x).shape))
|
163 |
-
[4, 24, 225]
|
164 |
-
"""
|
165 |
-
def __init__(self, in_channels: int, out_channels: int, kernel_size: int,
|
166 |
-
stride: int = 1, padding: int = 0, bias: bool = True):
|
167 |
-
super().__init__()
|
168 |
-
self.in_channels = in_channels
|
169 |
-
self.out_channels = out_channels
|
170 |
-
self.kernel_size = kernel_size
|
171 |
-
self.stride = stride
|
172 |
-
self.padding = padding
|
173 |
-
|
174 |
-
conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, bias=bias)
|
175 |
-
self.weight = conv.weight
|
176 |
-
self.bias = conv.bias
|
177 |
-
|
178 |
-
def forward(self, input: torch.Tensor):
|
179 |
-
return fft_conv1d(
|
180 |
-
input, self.weight, self.bias, self.stride, self.padding)
|
181 |
-
|
182 |
-
def __repr__(self):
|
183 |
-
return simple_repr(self, overrides={"bias": self.bias is not None})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|