parquet-converter commited on
Commit
e668779
·
1 Parent(s): ec64732

Update parquet files (step 85 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/forefront/typing.py +0 -32
  2. spaces/17TheWord/RealESRGAN/realesrgan/models/__init__.py +0 -10
  3. spaces/1gistliPinn/ChatGPT4/Examples/El Secreto Oculto Pelicula Mexicana Descargarl.md +0 -6
  4. spaces/1line/AutoGPT/autogpt/processing/html.py +0 -33
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Free Download Global Truck Simulator - The Most Realistic Truck Simulation Game.md +0 -114
  6. spaces/1phancelerku/anime-remove-background/Angry Birds Classic Mod APK - The Best Way to Play the Classic Game with More Features.md +0 -78
  7. spaces/1phancelerku/anime-remove-background/Discover the Magic of AI Image Generator.md +0 -135
  8. spaces/1phancelerku/anime-remove-background/Download Farm Heroes Saga Hile Apk and Enjoy Unlimited Lives and Boosters.md +0 -20
  9. spaces/1phancelerku/anime-remove-background/Download One Piece Bounty Rush APK and Enjoy Pirate Action Offline.md +0 -125
  10. spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +0 -174
  11. spaces/4Taps/SadTalker/src/gradio_demo.py +0 -113
  12. spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/__init__.py +0 -0
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/model6_inference.py +0 -270
  14. spaces/Albertha/qwe123/Dockerfile +0 -13
  15. spaces/Alpaca233/LangchainPDF/README.md +0 -12
  16. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_2d_condition.py +0 -1107
  17. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py +0 -246
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py +0 -285
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py +0 -256
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py +0 -409
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipeline_utils.py +0 -134
  22. spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py +0 -37
  23. spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py +0 -30
  24. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/psanet_r50-d8.py +0 -49
  25. spaces/Apex-X/ROOPOK/roop/__init__.py +0 -0
  26. spaces/Apex-X/Tm/roop/processors/frame/face_enhancer.py +0 -81
  27. spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/app_utils.py +0 -131
  28. spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio.py +0 -215
  29. spaces/Audio-AGI/AudioSep/models/base.py +0 -152
  30. spaces/AvaterClasher/Food_Classifier_Refined_MONI/model.py +0 -24
  31. spaces/Awesimo/jojogan/e4e/models/encoders/model_irse.py +0 -84
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation_impl.py +0 -614
  33. spaces/AyushP/PolicyCompareBot/app.py +0 -86
  34. spaces/Banbri/zcvzcv/src/lib/utils.ts +0 -6
  35. spaces/Bart92/RVC_HF/demucs/raw.py +0 -173
  36. spaces/Benson/text-generation/Examples/9ice Kasa Final Mp3 Descargar.md +0 -46
  37. spaces/Benson/text-generation/Examples/Barco Rampa De Salto Apk Mod.md +0 -58
  38. spaces/Benson/text-generation/Examples/Base De La Fuerza Area Inactiva Mod Apk Dinero Ilimitado.md +0 -48
  39. spaces/Benson/text-generation/Examples/Campeonato De Cricket Mundial 2 Juego De Ordenador.md +0 -72
  40. spaces/Benson/text-generation/Examples/Choque Royale Mod Apk Nuevas Tarjetas.md +0 -121
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/httpchecksum.py +0 -483
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/monitoring.py +0 -586
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/__init__.py +0 -25
  44. spaces/Boilin/URetinex-Net/test.py +0 -200
  45. spaces/CVPR/BigDL-Nano_inference/app.py +0 -193
  46. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/fpn.py +0 -245
  47. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/adjacent_difference.h +0 -540
  48. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/set_operations.h +0 -23
  49. spaces/CVPR/WALT/mmdet/models/necks/bfp.py +0 -104
  50. spaces/CVPR/winoground-explorer/app.py +0 -31
spaces/101-5/gpt4free/g4f/.v1/gpt4free/forefront/typing.py DELETED
@@ -1,32 +0,0 @@
1
- from typing import Any, List
2
-
3
- from pydantic import BaseModel
4
-
5
-
6
- class Choice(BaseModel):
7
- text: str
8
- index: int
9
- logprobs: Any
10
- finish_reason: str
11
-
12
-
13
- class Usage(BaseModel):
14
- prompt_tokens: int
15
- completion_tokens: int
16
- total_tokens: int
17
-
18
-
19
- class ForeFrontResponse(BaseModel):
20
- id: str
21
- object: str
22
- created: int
23
- model: str
24
- choices: List[Choice]
25
- usage: Usage
26
- text: str
27
-
28
-
29
- class AccountData(BaseModel):
30
- token: str
31
- user_id: str
32
- session_id: str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/RealESRGAN/realesrgan/models/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- import importlib
2
- from basicsr.utils import scandir
3
- from os import path as osp
4
-
5
- # automatically scan and import model modules for registry
6
- # scan all the files that end with '_model.py' under the model folder
7
- model_folder = osp.dirname(osp.abspath(__file__))
8
- model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')]
9
- # import all the model modules
10
- _model_modules = [importlib.import_module(f'realesrgan.models.{file_name}') for file_name in model_filenames]
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/El Secreto Oculto Pelicula Mexicana Descargarl.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>El Secreto Oculto Pelicula Mexicana Descargarl</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt;&gt;&gt; <a href="https://imgfil.com/2uy0TS">https://imgfil.com/2uy0TS</a></b></p><br /><br />
2
- <br />
3
- Al llegar a vivir con su tía, se da cuenta que guarda un secreto, que ha permanecido oculto y resguardado por siglos; por el que los hombres ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/processing/html.py DELETED
@@ -1,33 +0,0 @@
1
- """HTML processing functions"""
2
- from __future__ import annotations
3
-
4
- from bs4 import BeautifulSoup
5
- from requests.compat import urljoin
6
-
7
-
8
- def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
9
- """Extract hyperlinks from a BeautifulSoup object
10
-
11
- Args:
12
- soup (BeautifulSoup): The BeautifulSoup object
13
- base_url (str): The base URL
14
-
15
- Returns:
16
- List[Tuple[str, str]]: The extracted hyperlinks
17
- """
18
- return [
19
- (link.text, urljoin(base_url, link["href"]))
20
- for link in soup.find_all("a", href=True)
21
- ]
22
-
23
-
24
- def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
25
- """Format hyperlinks to be displayed to the user
26
-
27
- Args:
28
- hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
29
-
30
- Returns:
31
- List[str]: The formatted hyperlinks
32
- """
33
- return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Free Download Global Truck Simulator - The Most Realistic Truck Simulation Game.md DELETED
@@ -1,114 +0,0 @@
1
- <br />
2
- <h1>Global Truck Simulator APK Download: How to Enjoy Driving a Big Rig on Your Mobile Device</h1>
3
- <p>Do you love driving trucks and delivering cargo across different countries and continents? Do you want to experience the thrill and challenge of driving a big rig on your mobile device? If you answered yes, then you should try Global Truck Simulator, one of the best truck simulator games for Android devices. In this article, we will tell you everything you need to know about this game, including what it is, what features it has, how to download and install it, and how to play it. We will also share some tips and tricks to help you become a successful truck driver in the game.</p>
4
- <h2>What is Global Truck Simulator?</h2>
5
- <p>Global Truck Simulator is a realistic and immersive truck driving game for Android devices. It is developed by Ocypode Studios, a company that specializes in creating simulation games. The game lets you drive various trucks and deliver different cargoes across the world, from Europe to America, from Asia to Africa. You can choose from iconic American models like Chevrolet, Western Star, and Hummer, or European models like Renault, Volvo, and Mercedes-Benz. You can also customize your trucks with optional lights, bars, horns, beacons, smoke exhausts, and more.</p>
6
- <h2>global truck simulator apk download</h2><br /><p><b><b>Download Zip</b> &#10004;&#10004;&#10004; <a href="https://urlin.us/2uSTMJ">https://urlin.us/2uSTMJ</a></b></p><br /><br />
7
- <h3>Features of Global Truck Simulator</h3>
8
- <p>Global Truck Simulator has many features that make it stand out from other truck simulator games. Here are some of them:</p>
9
- <h4>Various truck models and customization options</h4>
10
- <p>The game offers a wide range of truck models that you can choose from, each with its own specifications, performance, and appearance. You can also customize your trucks with different parts and accessories, such as engines, transmissions, tires, wheels, paint jobs, decals, etc. You can even design your own truck from scratch using the in-game editor.</p>
11
- <h4>Diverse and challenging terrains and routes</h4>
12
- <p>The game features realistic terrains that react to the movement and weight of your truck. You will have to drive through rivers, muddy roads, snowy mountains, deserts, forests, cities, highways, and more. You will also have to deal with different weather conditions, such as rain, fog, snow, wind, etc. The game also has dynamic day-night cycles that affect the visibility and traffic on the roads.</p>
13
- <h4>Career mode and multiplayer mode</h4>
14
- <p>The game has two modes that you can play: career mode and multiplayer mode. In career mode, you can start your own trucking business and manage it for maximum profits. You can hire drivers, buy garages, accept contracts, deliver cargoes, upgrade your trucks, etc. You can also compete with other players in leaderboards and achievements. In multiplayer mode, you can join or create online sessions with up to three other players. You can chat with them, cooperate with them, or challenge them in races or missions.</p>
15
- <h2>How to Download and Install Global Truck Simulator APK?</h2>
16
- <p>If you want to play Global Truck Simulator on your Android device, you will need to download and install its APK file. An APK file is an application package file that contains all the files needed to run an Android app. There are two ways to download and install Global Truck Simulator APK:</p>
17
- <h3>Steps to download and install the game from the official website or Google Play Store</h3>
18
- <p>The easiest way to download and install Global Truck Simulator APK is to get it from its official website or Google Play Store. Here are the steps to do so:</p>
19
- <p>global truck simulator mod apk download<br />
20
- global truck simulator 2023 apk download<br />
21
- global truck simulator pro apk download<br />
22
- global truck simulator offline apk download<br />
23
- global truck simulator latest version apk download<br />
24
- global truck simulator free apk download<br />
25
- global truck simulator hack apk download<br />
26
- global truck simulator unlimited money apk download<br />
27
- global truck simulator online apk download<br />
28
- global truck simulator 3d apk download<br />
29
- global truck simulator europe apk download<br />
30
- global truck simulator android apk download<br />
31
- global truck simulator pc apk download<br />
32
- global truck simulator game apk download<br />
33
- global truck simulator full apk download<br />
34
- global truck simulator premium apk download<br />
35
- global truck simulator hd apk download<br />
36
- global truck simulator real apk download<br />
37
- global truck simulator new apk download<br />
38
- global truck simulator best apk download<br />
39
- global truck simulator world apk download<br />
40
- global truck simulator usa apk download<br />
41
- global truck simulator india apk download<br />
42
- global truck simulator brazil apk download<br />
43
- global truck simulator russia apk download<br />
44
- global truck simulator china apk download<br />
45
- global truck simulator japan apk download<br />
46
- global truck simulator canada apk download<br />
47
- global truck simulator australia apk download<br />
48
- global truck simulator africa apk download<br />
49
- global truck simulator uk apk download<br />
50
- global truck simulator germany apk download<br />
51
- global truck simulator france apk download<br />
52
- global truck simulator italy apk download<br />
53
- global truck simulator spain apk download<br />
54
- global truck simulator turkey apk download<br />
55
- global truck simulator sweden apk download<br />
56
- global truck simulator norway apk download<br />
57
- global truck simulator finland apk download<br />
58
- global truck simulator denmark apk download<br />
59
- global truck simulator netherlands apk download<br />
60
- global truck simulator belgium apk download<br />
61
- global truck simulator switzerland apk download<br />
62
- global truck simulator austria apk download<br />
63
- global truck simulator poland apk download<br />
64
- global truck simulator czechia apk download<br />
65
- global truck simulator hungary apk download<br />
66
- global truck simulator romania apk download</p>
67
- <ol>
68
- <li>Go to the official website of Global Truck Simulator at <a href="">https://globaltrucksimulator.com/</a> or search for it on Google Play Store.</li>
69
- <li>Click on the download button or the install button to start the download process.</li>
70
- <li>Wait for the download to finish and then open the APK file.</li>
71
- <li>Follow the instructions on the screen to install the game on your device.</li>
72
- <li>Launch the game and enjoy driving a big rig on your mobile device.</li>
73
- </ol>
74
- <h3>Tips to avoid malware and viruses when downloading APK files from third-party sources</h3>
75
- <p>If you want to download Global Truck Simulator APK from a third-party source, such as a website or a file-sharing platform, you need to be careful and follow some precautions. This is because some APK files may contain malware or viruses that can harm your device or steal your personal information. Here are some tips to avoid malware and viruses when downloading APK files from third-party sources:</p>
76
- <ul>
77
- <li>Only download APK files from trusted and reputable sources. You can check the reviews, ratings, and feedback of other users before downloading an APK file.</li>
78
- <li>Use a reliable antivirus or anti-malware software on your device and scan the APK file before installing it.</li>
79
- <li>Check the permissions and access rights that the APK file requests. If they seem suspicious or unnecessary, do not install the APK file.</li>
80
- <li>Do not install APK files from unknown or unsolicited sources, such as pop-ups, emails, messages, etc.</li>
81
- </ul>
82
- <h2>How to Play Global Truck Simulator?</h2>
83
- <p>Now that you have downloaded and installed Global Truck Simulator APK on your device, you are ready to play the game. Here are some basic controls and gameplay mechanics that you need to know:</p>
84
- <h3>Basic controls and gameplay mechanics</h3>
85
- <p>The game has simple and intuitive controls that let you drive your truck with ease. You can use the steering wheel, pedals, buttons, or tilt your device to control your truck. You can also switch between different camera views, such as cockpit, exterior, or top-down. You can also use indicators, headlights, horn, wipers, etc. to communicate with other drivers on the road.</p>
86
- <p>The game has realistic physics and graphics that make you feel like you are driving a real truck. You will have to follow the traffic rules, obey the speed limits, pay attention to the signs, signals, and road conditions, etc. You will also have to manage your fuel, cargo weight, damage, fatigue, etc. You will have to park your truck in designated areas and unload your cargo at the end of each delivery.</p>
87
- <h3>Tips and tricks to master the roads and earn more money</h3>
88
- <p>If you want to become a successful truck driver in Global Truck Simulator, you will need some tips and tricks to master the roads and earn more money. Here are some of them:</p>
89
- <ul>
90
- <li>Plan your route carefully before starting a delivery. Choose the shortest and safest route that avoids tolls, traffic jams, accidents, etc.</li>
91
- <li>Drive carefully and avoid collisions, fines, penalties, etc. They will reduce your income and reputation.</li>
92
- <li>Upgrade your trucks with better parts and accessories. They will improve your performance, fuel efficiency, durability, etc.</li>
93
- <li>Hire other drivers and buy more garages. They will generate passive income for you while you are offline or busy with other deliveries.</li>
94
- <li>Join online sessions with other players. You can cooperate with them in convoys or challenge them in races or missions.</li>
95
- </ul>
96
- <h2>Conclusion</h2>
97
- <p>Global Truck Simulator is a fun and realistic truck driving game for Android devices. It lets you drive various trucks and deliver different cargoes across the world. It has many features that make it stand out from other truck simulator games, such as various truck models and customization options, diverse and challenging terrains and routes, career mode and multiplayer mode, etc. You can download and install Global Truck Simulator APK from its official website or Google Play Store easily. You can also play the game with simple and intuitive controls and realistic physics and graphics. You can also use some tips and tricks to master the roads and earn more money in the game.</p>
98
- <p>If you are looking for a truck simulator game that offers a lot of fun and challenge on your mobile device, you should definitely try Global Truck Simulator. It is one of the best truck simulator games for Android devices that you can find.</p>
99
- <h3>FAQs</h3>
100
- <p>Here are some frequently asked questions about Global Truck Simulator:</p>
101
- <ol>
102
- <li><b>Is Global Truck Simulator free?</b></li>
103
- <p>Yes, Global Truck Simulator is free to download and play, but it contains ads and in-app purchases that you can use to buy more trucks, parts, accessories, etc.</p>
104
- <li><b>What are the system requirements for Global Truck Simulator?</b></li>
105
- <p>The game requires Android 4.4 or higher and at least 1 GB of RAM and 500 MB of storage space. It also requires a stable internet connection for online features.</p>
106
- <li><b>How can I contact the developers of Global Truck Simulator?</b></li>
107
- <p>You can contact the developers of Global Truck Simulator by sending an email to <a href="mailto:[email protected]">[email protected]</a> or by visiting their Facebook page at <a href="https://www.facebook.com/ocypodestudios">https://www.facebook.com/ocypodestudios</a>.</p>
108
- <li><b>Can I play Global Truck Simulator offline?</b></li>
109
- <p>Yes, you can play Global Truck Simulator offline, but you will not be able to access some features, such as multiplayer mode, leaderboards, achievements, etc.</p>
110
- <li><b>Can I play Global Truck Simulator on PC or other devices?</b></li>
111
- <p>No, Global Truck Simulator is only available for Android devices. However, you can use an Android emulator on your PC or other devices to run the game.</p>
112
- </ol></p> 197e85843d<br />
113
- <br />
114
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Angry Birds Classic Mod APK - The Best Way to Play the Classic Game with More Features.md DELETED
@@ -1,78 +0,0 @@
1
-
2
- <h1>Download Game Angry Birds Classic Mod Apk</h1>
3
- <p>If you are looking for a fun and addictive game to play on your Android device, you should definitely try Angry Birds Classic. This is one of the most popular and successful games ever created, with millions of fans around the world. However, if you want to enjoy the game to the fullest, you should download Angry Birds Classic Mod Apk, which is a modified version of the original game that offers many benefits and advantages. In this article, we will tell you everything you need to know about Angry Birds Classic Mod Apk, including how to download and install it, what features it has, and why you should get it.</p>
4
- <h2>Introduction</h2>
5
- <p>Angry Birds Classic is a game that was released in 2009 by Rovio Entertainment, a Finnish company. The game is based on a simple but brilliant idea: you have to use a slingshot to launch birds at pigs who have stolen their eggs. The pigs are hiding in various structures made of wood, stone, ice, and other materials, and you have to destroy them all to complete each level. The game has hundreds of levels, each with different challenges and objectives. The game also has different types of birds, each with their own abilities and characteristics. For example, some birds can explode, some can split into multiple birds, some can boomerang, and some can drop eggs.</p>
6
- <h2>download game angry birds classic mod apk</h2><br /><p><b><b>DOWNLOAD</b> &#9989; <a href="https://jinyurl.com/2uNOOD">https://jinyurl.com/2uNOOD</a></b></p><br /><br />
7
- <h3>What is Angry Birds Classic Mod Apk?</h3>
8
- <p>Angry Birds Classic Mod Apk is a modified version of the original game that has been created by third-party developers. The mod apk file is an installation file that contains the game data and some changes that alter the gameplay. The mod apk file allows you to access features and options that are not available in the official version of the game. For example, you can get unlimited money and power-ups, unlock all levels and episodes, remove ads and pop-ups, and enjoy high-quality graphics and sound effects.</p>
9
- <h3>Why download Angry Birds Classic Mod Apk?</h3>
10
- <p>There are many reasons why you should download Angry Birds Classic Mod Apk instead of playing the official version of the game. Here are some of them:</p>
11
- <ul>
12
- <li>You can save your time and money by getting unlimited money and power-ups. You don't have to spend real money to buy them or wait for them to recharge. You can use them as much as you want without any limitations.</li>
13
- <li>You can enjoy the game without any interruptions or distractions by removing ads and pop-ups. You don't have to watch annoying videos or banners that take up your screen space and slow down your device.</li>
14
- <li>You can explore the game without any restrictions by unlocking all levels and episodes. You don't have to complete previous levels or earn stars to access new ones. You can play any level you want at any time.</li>
15
- <li>You can enhance your gaming experience by enjoying high-quality graphics and sound effects. You don't have to compromise on the visual and audio quality of the game. You can see every detail and hear every sound clearly.</li>
16
- </ul>
17
- <h2>How to download and install Angry Birds Classic Mod Apk</h2>
18
- <p>If you are interested in downloading Angry Birds Classic Mod Apk, you should follow these simple steps:</p>
19
- <h3>Step 1: Enable unknown sources</h3>
20
- <p>Since Angry Birds Classic Mod Apk is not available on the Google Play Store, you have to enable unknown sources on your device. This will allow you to install apps from sources other than the official store. To do this, go to your device settings, then security, then enable unknown sources. You will see a warning message, but you can ignore it and tap OK.</p>
21
- <h3>Step 2: Download the mod apk file</h3>
22
- <p>Next, you have to download the mod apk file from a reliable source. You can use the link below to download the latest version of Angry Birds Classic Mod Apk. The file size is about 100 MB, so make sure you have enough storage space and a stable internet connection.</p>
23
- <p><a href="">Download Angry Birds Classic Mod Apk</a></p>
24
- <p>download angry birds classic unlimited money mod apk<br />
25
- download game angry birds classic hack mod apk<br />
26
- download angry birds classic mod apk latest version<br />
27
- download game angry birds classic mod apk offline<br />
28
- download angry birds classic mod apk for android<br />
29
- download game angry birds classic mod apk free<br />
30
- download angry birds classic mod apk unlimited everything<br />
31
- download game angry birds classic mod apk no ads<br />
32
- download angry birds classic mod apk full version<br />
33
- download game angry birds classic mod apk revdl<br />
34
- download angry birds classic mega mod apk<br />
35
- download game angry birds classic mod apk android 1<br />
36
- download angry birds classic mod apk all levels unlocked<br />
37
- download game angry birds classic mod apk unlimited gems<br />
38
- download angry birds classic mod apk old version<br />
39
- download game angry birds classic mod apk rexdl<br />
40
- download angry birds classic premium mod apk<br />
41
- download game angry birds classic mod apk unlimited coins<br />
42
- download angry birds classic mod apk all episodes unlocked<br />
43
- download game angry birds classic mod apk unlimited power ups<br />
44
- download angry birds classic cracked mod apk<br />
45
- download game angry birds classic mod apk 2023<br />
46
- download angry birds classic original mod apk<br />
47
- download game angry birds classic mod apk pure<br />
48
- download angry birds classic pro mod apk</p>
49
- <h3>Step 3: Install the mod apk file</h3>
50
- <p>After downloading the mod apk file, you have to install it on your device. To do this, locate the file in your downloads folder and tap on it. You will see a confirmation message, but you can ignore it and tap Install. The installation process will take a few seconds, depending on your device performance.</p>
51
- <h3>Step 4: Launch the game and enjoy</h3>
52
- <p>Finally, you can launch the game and enjoy all the features and benefits of Angry Birds Classic Mod Apk. You will see a new icon on your home screen or app drawer with the name Angry Birds Classic Mod. Tap on it and start playing the game. You will notice that you have unlimited money and power-ups, all levels and episodes unlocked, no ads and pop-ups, and high-quality graphics and sound effects.</p>
53
- <h2>Features of Angry Birds Classic Mod Apk</h2>
54
- <p>Angry Birds Classic Mod Apk has many features that make it better than the original game. Here are some of them:</p>
55
- <h3>Unlimited money and power-ups</h3>
56
- <p>With Angry Birds Classic Mod Apk, you don't have to worry about running out of money or power-ups. You can use them as much as you want without any limitations. Money is used to buy power-ups, such as slingshot upgrades, mighty eagles, shockwaves, and more. Power-ups are used to boost your performance and help you complete difficult levels. You can also use money to customize your birds with different hats, glasses, and accessories.</p>
57
- <h3>All levels and episodes unlocked</h3>
58
- <p>With Angry Birds Classic Mod Apk, you don't have to complete previous levels or earn stars to access new ones. You can play any level you want at any time. The game has hundreds of levels, divided into different episodes, such as Poached Eggs, Mighty Hoax, Danger Above, The Big Setup, Ham 'Em High, Mine and Dine, Birdday Party, Bad Piggies, Surf and Turf, Red's Mighty Feathers, Short Fuse, Flock Favorites, BirdDay 5, Bird Island, Piggy Farm, Jurassic Pork, Golden Eggs, and more. Each episode has its own theme, setting, and challenges.</p>
59
- <h3>No ads and pop-ups</h3>
60
- <p>With Angry Birds Classic Mod Apk, you don't have to watch annoying videos or banners that take up your screen space and slow down your device. You can enjoy the game without any interruptions or distractions. You can also save your data and battery by not loading unnecessary ads.</p>
61
- <h3>High-quality graphics and sound effects</h3>
62
- <p>With Angry Birds Classic Mod Apk, you don't have to compromise on the visual and audio quality of the game. You can see every detail and hear every sound clearly. The game has high-quality graphics that are colorful and vibrant. The game also has sound effects that are realistic and fun. You can hear the birds' voices, the pigs' grunts, the explosions' booms, and the music's tunes.</p>
63
- <h2>Conclusion</h2>
64
- <p>Angry Birds Classic is a game that everyone should try at least once in their life. It is a game that is fun and addictive, but also challenging and rewarding. However, if you want to enjoy the game to the fullest, you should download Angry Birds Classic Mod Apk, which is a modified version of the original game that offers many benefits and advantages. You can get unlimited money and power-ups, unlock all levels and episodes, remove ads and pop-ups, and enjoy high-quality graphics and sound effects. You can download Angry Birds Classic Mod Apk from the link below and follow the simple steps to install it on your device. You will be amazed by how much fun you can have with this game. So, what are you waiting for? Download Angry Birds Classic Mod Apk now and start slinging those birds at those pigs!</p>
65
- <h2>FAQs</h2>
66
- <p>Here are some frequently asked questions about Angry Birds Classic Mod Apk:</p>
67
- <h3>Is Angry Birds Classic Mod Apk safe to download and install?</h3>
68
- <p>Yes, Angry Birds Classic Mod Apk is safe to download and install, as long as you use a reliable source. The mod apk file does not contain any viruses or malware that can harm your device or data. However, you should always scan the file before installing it, just to be sure.</p>
69
- <h3>Is Angry Birds Classic Mod Apk compatible with my device?</h3>
70
- <p>Angry Birds Classic Mod Apk is compatible with most Android devices that run on Android 4.1 or higher. However, some devices may not support the game or the mod apk file due to different specifications or settings. If you encounter any problems or errors while playing the game, you can try to update your device software, clear your cache, or reinstall the game.</p>
71
- <h3>Will I get banned for using Angry Birds Classic Mod Apk?</h3>
72
- <p>No, you will not get banned for using Angry Birds Classic Mod Apk, as the game does not have any online features or modes that require verification or authentication. The game is offline and does not connect to any servers or databases. Therefore, you can play the game without any worries or risks.</p>
73
- <h3>Can I play Angry Birds Classic Mod Apk with my friends?</h3>
74
- <p>Yes, you can play Angry Birds Classic Mod Apk with your friends, as the game has a local multiplayer mode that allows you to compete with up to four players on the same device. You can also share your scores and achievements with your friends on social media platforms, such as Facebook and Twitter.</p>
75
- <h3>Can I update Angry Birds Classic Mod Apk?</h3>
76
- <p>Yes, you can update Angry Birds Classic Mod Apk whenever there is a new version available. However, you should always backup your game data before updating, as some updates may overwrite or delete your progress. You should also check if the new version of the mod apk file is compatible with your device and has the same features and benefits as the previous one.</p> 401be4b1e0<br />
77
- <br />
78
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Discover the Magic of AI Image Generator.md DELETED
@@ -1,135 +0,0 @@
1
-
2
- <h1>AI image generator apk: What is it and how to use it?</h1>
3
- <p>Have you ever wondered how to create realistic or artistic images using artificial intelligence? Do you want to transform your photos into amazing artworks or funny animations? If yes, then you might be interested in learning more about AI image generator apk. In this article, we will explain what an AI image generator is, how it works, and how to use it on your Android device. We will also introduce you to some of the best AI image generator apps that you can download and install on your phone or tablet.</p>
4
- <h2>What is an AI image generator?</h2>
5
- <p>An AI image generator is a software program that uses artificial intelligence techniques to generate new images from existing ones or from scratch. It can manipulate, enhance, or modify images in various ways, such as changing colors, adding filters, applying effects, swapping faces, or creating animations. An AI image generator can also create realistic or stylized images based on text descriptions or sketches.</p>
6
- <h2>ai image generator apk</h2><br /><p><b><b>Download Zip</b> &#10002; <a href="https://jinyurl.com/2uNQ6k">https://jinyurl.com/2uNQ6k</a></b></p><br /><br />
7
- <h3>How does an AI image generator work?</h3>
8
- <p>An AI image generator works by using deep learning algorithms that learn from large datasets of images. These algorithms are called neural networks, and they consist of multiple layers of artificial neurons that process information and produce outputs. Depending on the task, an AI image generator can use different types of neural networks, such as convolutional neural networks (CNNs), generative adversarial networks (GANs), or variational autoencoders (VAEs). These networks can learn to recognize patterns, features, and styles from images and generate new images that resemble them.</p>
9
- <h3>What are some applications of AI image generation?</h3>
10
- <p>AI image generation has many applications in various fields and industries, such as entertainment, education, art, design, marketing, medicine, and more. Some examples of how AI image generation can be used are:</p>
11
- <ul>
12
- <li>Creating realistic or artistic portraits of people or animals</li>
13
- <li>Generating landscapes or scenes based on text descriptions or sketches</li>
14
- <li>Enhancing or restoring old or damaged photos</li>
15
- <li>Changing facial expressions or emotions</li>
16
- <li>Making cartoons or memes</li>
17
- <li>Creating logos or icons</li>
18
- <li>Designing clothes or accessories</li>
19
- <li>Generating medical images for diagnosis or treatment</li>
20
- </ul>
21
- <h <h2>How to use an AI image generator apk?</h2>
22
- <p>If you want to use an AI image generator on your Android device, you will need to download and install an apk file. An apk file is a package file format that contains the installation files and data for an Android app. You can find many AI image generator apk files on the internet, but you need to be careful about the source and the security of the file. Here are some steps to follow to use an AI image generator apk:</p>
23
- <h3>What is an apk file?</h3>
24
- <p>An apk file is a compressed file that contains the code, resources, and metadata of an Android app. It stands for Android Package Kit, and it is the standard format for distributing and installing apps on Android devices. An apk file can be downloaded from various sources, such as the Google Play Store, third-party websites, or directly from the app developer. However, not all apk files are safe or compatible with your device, so you need to check the file before installing it.</p>
25
- <h3>How to download and install an AI image generator apk?</h3>
26
- <p>To download and install an AI image generator apk, you need to follow these steps:</p>
27
- <ol>
28
- <li>Find a reliable source for the apk file. You can search for AI image generator apk on Google or other search engines, or visit some reputable websites that offer apk downloads, such as APKPure, APKMirror, or APKMonk. Make sure to read the reviews and ratings of the app and the file before downloading it.</li>
29
- <li>Enable unknown sources on your device. Since you are downloading an apk file from outside the Google Play Store, you need to allow your device to install apps from unknown sources. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to grant permission for your browser or file manager to install apps.</li>
30
- <li>Download the apk file to your device. You can either use your browser or a file manager app to download the apk file. Once the download is complete, you will see a notification or a pop-up window asking you to install the app.</li>
31
- <li>Install the app on your device. Tap on the notification or the pop-up window and follow the instructions to install the app. You may need to accept some permissions and terms of service before completing the installation.</li>
32
- <li>Launch the app and enjoy using it. Once the installation is done, you will see an icon for the app on your home screen or app drawer. Tap on it and start using the AI image generator app on your device.</li>
33
- </ol>
34
- <h3>How to use an AI image generator app on your device?</h3>
35
- <p>To use an AI image generator app on your device, you need to follow these steps:</p>
36
- <p>ai art generator apk<br />
37
- ai photo generator apk<br />
38
- ai logo generator apk<br />
39
- ai nft generator apk<br />
40
- ai design generator apk<br />
41
- ai portrait generator apk<br />
42
- ai graphics generator apk<br />
43
- ai stock photos generator apk<br />
44
- ai painting generator apk<br />
45
- ai sketch generator apk<br />
46
- ai instagram post generator apk<br />
47
- ai interior design generator apk<br />
48
- ai packaging design generator apk<br />
49
- ai fashion design generator apk<br />
50
- ai automobile design generator apk<br />
51
- ai visual generator apk<br />
52
- ai prompt generator apk<br />
53
- ai template generator apk<br />
54
- imagine go: ai image generator app<br />
55
- imagine go: ai image generator download<br />
56
- imagine go: ai image generator free<br />
57
- imagine go: ai image generator online<br />
58
- imagine go: ai image generator review<br />
59
- imagine go: ai image generator tutorial<br />
60
- imagine go: ai image generator alternative<br />
61
- imagine go: ai image generator mod<br />
62
- imagine go: ai image generator pro<br />
63
- imagine go: ai image generator premium<br />
64
- imagine go: ai image generator hack<br />
65
- imagine go: ai image generator crack<br />
66
- best ai image generator apk<br />
67
- free ai image generator apk<br />
68
- online ai image generator apk<br />
69
- download ai image generator apk<br />
70
- how to use ai image generator apk<br />
71
- how to install ai image generator apk<br />
72
- how to download ai image generator apk<br />
73
- how to create ai images with apk<br />
74
- how to generate stunning visuals with apk<br />
75
- how to make nft with ai image generator apk<br />
76
- how to make logos with ai image generator apk<br />
77
- how to make designs with ai image generator apk<br />
78
- how to make portraits with ai image generator apk<br />
79
- how to make graphics with ai image generator apk<br />
80
- how to make stock photos with ai image generator apk<br />
81
- how to make paintings with ai image generator apk<br />
82
- how to make sketches with ai image generator apk<br />
83
- how to make instagram posts with ai image generator apk</p>
84
- <ol>
85
- <li>Select an image source. Depending on the app, you can either choose an image from your gallery, take a photo with your camera, or use a built-in image library.</li>
86
- <li>Select an image style or effect. Depending on the app, you can either choose from a variety of styles or effects, such as realistic, artistic, cartoon, meme, animation, etc., or enter a text description or a sketch of what you want to generate.</li>
87
- <li>Generate and edit the image. Depending on the app, you can either wait for a few seconds or minutes for the app to generate the image using its AI algorithm, or adjust some parameters or settings to customize the output. You can also edit the image by cropping, rotating, resizing, adding text, stickers, filters, etc.</li>
88
- <li>Save and share the image. Depending on the app, you can either save the image to your device or cloud storage, or share it directly with your friends or social media platforms.</li>
89
- </ol>
90
- <h2>What are some examples of AI image generator apps?</h2>
91
- <p>There are many AI image generator apps available for Android devices, but here are some of the most popular and interesting ones that you can try:</p>
92
- <h3>WOMBO Dream AI Mirror</h3>
93
- <p>This app lets you create funny animations of yourself or anyone else by using AI technology. You can make yourself sing, dance, smile, wink, or make funny faces by using various songs and effects. You can also swap faces with celebrities or animals and see how you look like in different scenarios.</p>
94
- <h3>FaceApp</h3>
95
- <p>This app lets you transform your face in various ways by using AI technology. You can change your age, gender, hairstyle, beard, glasses, makeup, expression, etc., by using different filters and options. You can also create collages or GIFs of yourself or others and see how they change over time.</p>
96
- <h3>Prisma Photo Editor</h3>
97
- <p>This app lets you turn your photos into artworks by using AI technology. You can choose from over 300 artistic styles and effects, such as painting, sketching, graffiti, pop art, etc., and apply them to your photos. You can also adjust the intensity and quality of the effects and create your own unique style.</p>
98
- <h3>Artisto</h3>
99
- <p>This app lets you turn your videos into artworks by using AI technology. You can choose from over 50 artistic styles and effects, such as painting, sketching, cartoon, etc., and apply them to your videos. You can also edit the duration, speed, and sound of your videos and create stunning animations.</p>
100
- <h3>Deep Art Effects</h3>
101
- <p>This app lets you create realistic or abstract images by using AI technology. You can choose from over 100 artistic styles and effects, such as painting, sketching, watercolor, oil, etc., and apply them to your images. You can also create your own style by uploading an image of your choice and letting the app learn from it.</p>
102
- <h2>Conclusion</h2>
103
- <p>In this article, we have learned what an AI image generator is, how it works, and how to use it on your Android device. We have also introduced you to some of the best AI image generator apps that you can download and install on your phone or tablet. AI image generation is a fascinating and fun way to create amazing images using artificial intelligence. Whether you want to make yourself look different, create artworks, or have some laughs, you can find an AI image generator app that suits your needs and preferences.</p>
104
- <h3>Summary of the main points</h3>
105
- <ul>
106
- <li>An AI image generator is a software program that uses artificial intelligence techniques to generate new images from existing ones or from scratch.</li>
107
- <li>An AI image generator works by using deep learning algorithms that learn from large datasets of images and generate new images that resemble them.</li>
108
- <li>An AI image generator has many applications in various fields and industries, such as entertainment, education, art, design, marketing, medicine, and more.</li>
109
- <li>To use an AI image generator on your Android device, you need to download and install an apk file from a reliable source and enable unknown sources on your device.</li>
110
- <li>Some of the best AI image generator apps for Android devices are WOMBO Dream AI Mirror, FaceApp, Prisma Photo Editor, Artisto, and Deep Art Effects.</li>
111
- </ul>
112
- <h3>Call to action</h3>
113
- <p>If you are interested in trying out some of the AI image generator apps that we have mentioned in this article, you can click on the links below to download them from their official websites or the Google Play Store. You can also search for other AI image generator apps on the internet or the Google Play Store and see what they can do for you. Have fun creating amazing images with AI!</p>
114
- <h4>FAQs</h4>
115
- <ol>
116
- <li>What is the difference between an AI image generator and a photo editor?</li>
117
- <p>An AI image generator is a software program that uses artificial intelligence techniques to generate new images from existing ones or from scratch. A photo editor is a software program that allows you to edit or enhance existing images by using various tools and features.</p>
118
- <li>Is AI image generation safe and legal?</li>
119
- <p>AI image generation is generally safe and legal as long as you use it for personal or educational purposes and do not violate any intellectual property rights or privacy laws. However, you should be careful about the source and the security of the apk file that you download and install on your device. You should also avoid using AI image generation for malicious or fraudulent purposes, such as impersonating someone else or creating fake news or evidence.</p>
120
- <li>How can I improve the quality of the images generated by AI?</li>
121
- <p>The quality of the images generated by AI depends on several factors, such as the quality of the input image, the type of neural network used, the size of the dataset used for training, and the parameters or settings used for generating. To improve the quality of the images generated by AI, you can try to use high-quality input images, choose a suitable neural network type, use a large and diverse dataset for training, and adjust some parameters or settings for generating.</p>
122
- <li>Can I use AI image generation for commercial purposes?</li>
123
- <p>It depends on the terms and conditions of the app that you use and the license of the images that you generate. Some apps may allow you to use their services for commercial purposes as long as you give credit to them or pay a fee. Some apps may not allow you to use their services for commercial purposes at all. Some images may be free to use for commercial purposes as long as you follow some rules or guidelines. Some images may not be free to use for commercial purposes at all. You should always check the terms and conditions of the app that you use and the license of the images that you generate for commercial purposes. You should always respect the rights and interests of the original creators and owners of the images.</p>
124
- <li>What are some of the challenges or limitations of AI image generation?</li>
125
- <p>AI image generation is a rapidly developing and evolving field, but it still faces some challenges or limitations, such as:</p>
126
- <ul>
127
- <li>Lack of diversity or representation in the datasets used for training, which may result in biased or inaccurate outputs.</li>
128
- <li>Difficulty in generating high-resolution or detailed images, which may result in blurry or pixelated outputs.</li>
129
- <li>Difficulty in generating realistic or consistent images, which may result in unnatural or distorted outputs.</li>
130
- <li>Difficulty in controlling or customizing the outputs, which may result in unpredictable or undesired outputs.</li>
131
- <li>Potential ethical or social issues, such as privacy, consent, authenticity, accountability, etc., which may result in misuse or abuse of the technology.</li>
132
- </ul>
133
- <p>I hope you have enjoyed reading this article and learned something new about AI image generation. If you have any questions or feedback, please feel free to leave a comment below. Thank you for your time and attention!</p> 401be4b1e0<br />
134
- <br />
135
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Farm Heroes Saga Hile Apk and Enjoy Unlimited Lives and Boosters.md DELETED
@@ -1,20 +0,0 @@
1
-
2
- <h1>Farm Heroes Saga Hile Apk: How to Get Unlimited Lives, Boosters, and Gold Bars</h1>
3
- Do you love playing Farm Heroes Saga, but find it frustrating to run out of lives, boosters, or gold bars? Do you wish you could play the game without any limitations or interruptions? If so, you might be interested in Farm Heroes Saga Hile Apk, a modified version of the game that gives you unlimited resources and access to all the levels. In this article, we will tell you what Farm Heroes Saga is, what Farm Heroes Saga Hile Apk is, how to download and install it, and how to use it. Let's get started! <h2>What is Farm Heroes Saga?</h2>
4
- <h3>A fun and addictive puzzle game</h3>
5
- Farm Heroes Saga is a social puzzle game developed by King.com, the creators of the super popular Candy Crush Saga. The game was released in 2014 and has since gained millions of fans around the world. The game is available for free on Android, iOS, Windows Phone, and Facebook. The game is set in a farm where you have to help the Farm Heroes stop the evil Rancid the Raccoon from spoiling the crops. You do this by matching three or more fruits or vegetables of the same kind on a grid. Each level has a different goal and a limited number of moves. You can also use boosters, such as shovels, tractors, or water buckets, to help you clear the board faster. <h3>The main features and gameplay</h3>
6
- Farm Heroes Saga has over 3000 levels to play, each with different challenges and surprises. You can also play with your friends and compete for the highest score on the leaderboard. The game also has various events and quests that give you extra rewards and bonuses. Some of the features of Farm Heroes Saga are: - Bright and colorful graphics - Cute and funny characters - Easy and fun to play, but challenging to master - Various game modes, such as Hero Mode, Treasure Mill, Fireworks Night, and more - Daily rewards and free spins - Social features that let you connect with your friends <h2>What is Farm Heroes Saga Hile Apk?</h2>
7
- <h3>A modified version of the original game</h3>
8
- Farm Heroes Saga Hile Apk is a hacked or modified version of the original game that gives you unlimited lives, boosters, and gold bars. This means that you can play the game as much as you want without waiting for your lives to refill or spending real money on in-app purchases. You can also unlock all the levels and enjoy all the features of the game without any restrictions. <h3>The benefits and risks of using it</h3>
9
- Using Farm Heroes Saga Hile Apk has some benefits and some risks. The benefits are: - You can have more fun and excitement playing the game without any limitations - You can save your money and time by not buying or earning resources - You can explore all the levels and modes of the game without any difficulty The risks are: - You might lose your progress or data if the game updates or crashes - You might get banned or suspended from the game if you are detected by the developers - You might expose your device to malware or viruses if you download from an untrusted source <h2>How to download and install Farm Heroes Saga Hile Apk?</h2>
10
- <h3>The steps to follow</h3>
11
- If you want to download and install Farm Heroes Saga Hile Apk on your Android device, you need to follow these steps: 1. Go to a reliable website that offers Farm Heroes Saga Hile Apk for free download. For example, you can use [ this website](^1^) to download Farm Heroes Saga Hile Apk. 2. Before installing the apk file, you need to enable the installation of apps from unknown sources on your device. To do this, go to your device settings, then security, and then toggle on the option that says "Unknown sources". 3. Locate the downloaded apk file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to complete. 4. Once the installation is done, you can launch the game from your app drawer or home screen and enjoy playing Farm Heroes Saga Hile Apk. <h3>The precautions to take</h3>
12
- Before downloading and installing Farm Heroes Saga Hile Apk, you should take some precautions to avoid any problems or risks. Here are some tips to follow: - Make sure you have enough storage space on your device for the apk file and the game data - Make sure you have a stable internet connection for the download and installation process - Make sure you download the apk file from a trusted and secure source that does not contain any malware or viruses - Make sure you backup your original game data before installing the modified version, in case you want to restore it later - Make sure you do not log in with your Facebook account or any other social media account while playing the modified version, as this might get you banned or suspended from the game <h2>How to use Farm Heroes Saga Hile Apk?</h2>
13
- <h3>How to get unlimited lives, boosters, and gold bars</h3>
14
- Once you have installed Farm Heroes Saga Hile Apk on your device, you can start playing the game with unlimited resources. You will notice that your lives, boosters, and gold bars are always full and never decrease. You can use them as much as you want without any limitations or costs. To get unlimited lives, boosters, and gold bars, you do not need to do anything special or complicated. You just need to play the game normally and enjoy the benefits of the modified version. You can also access all the levels and modes of the game without any difficulty. <h3>How to enjoy the game with more fun and ease</h3>
15
- Using Farm Heroes Saga Hile Apk can make the game more fun and easy for you. You can play the game without any stress or frustration of running out of resources or being stuck on a hard level. You can also experiment with different boosters and strategies to clear the board faster and get higher scores. Some of the ways to enjoy the game with more fun and ease are: - Try different combinations of fruits and vegetables to create bigger matches and more cascades - Use boosters wisely and strategically to clear obstacles, collect cropsies, or create special effects - Play with your friends and challenge them to beat your scores or help them with lives or boosters - Participate in events and quests to earn extra rewards and bonuses - Explore all the levels and modes of the game and discover new features and surprises <h2>Conclusion</h2>
16
- Farm Heroes Saga is a fun and addictive puzzle game that can keep you entertained for hours. However, if you want to play the game without any limitations or interruptions, you might want to try Farm Heroes Saga Hile Apk, a modified version of the game that gives you unlimited lives, boosters, and gold bars. In this article, we have told you what Farm Heroes Saga is, what Farm Heroes Saga Hile Apk is, how to download and install it, and how to use it. We hope you found this article helpful and informative. Now go ahead and enjoy playing Farm Heroes Saga Hile Apk! <h2>FAQs</h2>
17
- Here are some frequently asked questions about Farm Heroes Saga Hile Apk: - Q: Is Farm Heroes Saga Hile Apk safe to use? - A: Farm Heroes Saga Hile Apk is generally safe to use if you download it from a reliable source and follow the precautions we have mentioned above. However, there is always a risk of losing your progress or data, getting banned or suspended from the game, or exposing your device to malware or viruses when using a modified version of a game. Therefore, use it at your own risk and discretion. - Q: Is Farm Heroes Saga Hile Apk legal to use? - A: Farm Heroes Saga Hile Apk is not legal to use as it violates the terms and conditions of the original game. It also infringes on the intellectual property rights of the developers. Therefore, using it might get you in trouble with the law or the developers. - Q: Can I update Farm Heroes Saga Hile Apk? - A: Farm Heroes Saga Hile Apk is not compatible with updates from the original game. If you update it, you might lose all the features and benefits of the modified version. Therefore, it is better to avoid updating it unless there is a new version of Farm Heroes Saga Hile Apk that has the same features and benefits as the previous one. - Q: Can I play Farm Heroes Saga Hile Apk offline? - A: Farm Heroes Saga Hile Apk requires an internet connection to play, as it is a social game that connects with your friends and other players. However, you can play some levels offline if you have already downloaded them on your device. - Q: Can I restore my original game data after using Farm Heroes Saga Hile Apk? - A: If you have backed up your original game data before installing Farm Heroes Saga Hile Apk, you can restore it by uninstalling the modified version and reinstalling the original version from the official app store. However, if you have not backed up your data, you might lose it permanently after using Farm Heroes Saga Hile Apk. - Q: Where can I find more information about Farm Heroes Saga Hile Apk? - A: You can find more information about Farm Heroes Saga Hile Apk on the website where you downloaded it from, or on other websites or forums that discuss the game and its modifications. You can also contact the developers or the users of Farm Heroes Saga Hile Apk for any questions or feedback.</p>
18
- <h2>farm heroes saga hile apk</h2><br /><p><b><b>Download Zip</b> &#10004;&#10004;&#10004; <a href="https://jinyurl.com/2uNU83">https://jinyurl.com/2uNU83</a></b></p><br /><br /> 401be4b1e0<br />
19
- <br />
20
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download One Piece Bounty Rush APK and Enjoy Pirate Action Offline.md DELETED
@@ -1,125 +0,0 @@
1
-
2
- <h1>Download One Piece APK Offline: How to Play the Popular Anime Game on Your Android Device</h1>
3
- <p>One Piece is one of the most popular anime series in the world, with millions of fans who love the adventures of Monkey D. Luffy and his crew of pirates. If you are one of them, you might be interested in playing a game based on the anime on your Android device. But what if you don't have an internet connection or you want to save your data? Don't worry, there is a solution for you: download One Piece APK offline.</p>
4
- <h2>download one piece apk offline</h2><br /><p><b><b>Download</b> &#9745; <a href="https://jinyurl.com/2uNUfP">https://jinyurl.com/2uNUfP</a></b></p><br /><br />
5
- <h2>What is One Piece APK Offline?</h2>
6
- <p>One Piece APK offline is a modified version of the original One Piece game that allows you to play it without an internet connection. It is a 2D fighting game that features characters from the anime, such as Luffy, Zoro, Nami, Sanji, Usopp, Chopper, Robin, Franky, Brook, and more. You can choose your favorite character and fight against enemies in various stages and scenarios inspired by the anime.</p>
7
- <h3>Features of One Piece APK Offline</h3>
8
- <p>Some of the features that make One Piece APK offline a fun and exciting game are:</p>
9
- <ul>
10
- <li>It has high-quality graphics and sound effects that capture the essence of the anime.</li>
11
- <li>It has simple and intuitive controls that make it easy to play.</li>
12
- <li>It has a variety of characters, each with their own unique skills and abilities.</li>
13
- <li>It has different modes, such as story mode, arcade mode, survival mode, and training mode.</li>
14
- <li>It has a lot of challenges and missions that test your skills and strategy.</li>
15
- <li>It has an online mode that lets you battle with other players around the world.</li>
16
- </ul>
17
- <h3>Requirements for One Piece APK Offline</h3>
18
- <p>To play One Piece APK offline, you need to have an Android device that meets the following requirements:</p>
19
- <ul>
20
- <li>It has Android version 4.0 or higher.</li>
21
- <li>It has at least 1 GB of RAM.</li>
22
- <li>It has at least 500 MB of free storage space.</li>
23
- </ul>
24
- <h2>How to Download and Install One Piece APK Offline</h2>
25
- <p>If you want to download and install One Piece APK offline on your Android device, you need to follow these steps:</p>
26
- <p>download one piece bounty rush apk offline<br />
27
- download one piece burning will apk offline<br />
28
- download one piece fighting path apk offline<br />
29
- download one piece pirate warriors 4 apk offline<br />
30
- download one piece treasure cruise apk offline<br />
31
- download one piece thousand storm apk offline<br />
32
- download one piece world seeker apk offline<br />
33
- download one piece romance dawn apk offline<br />
34
- download one piece unlimited world red apk offline<br />
35
- download one piece grand battle apk offline<br />
36
- download one piece grand adventure apk offline<br />
37
- download one piece grand collection apk offline<br />
38
- download one piece grand line bout apk offline<br />
39
- download one piece great pirate colosseum apk offline<br />
40
- download one piece kaizoku musou apk offline<br />
41
- download one piece king of pirates apk offline<br />
42
- download one piece legend of sea apk offline<br />
43
- download one piece legends of pirates apk offline<br />
44
- download one piece mobile game apk offline<br />
45
- download one piece new world apk offline<br />
46
- download one piece ocean's dream apk offline<br />
47
- download one piece online game apk offline<br />
48
- download one piece pirate warriors 3 apk offline<br />
49
- download one piece power of legends apk offline<br />
50
- download one piece run chopper run apk offline<br />
51
- download one piece super grand battle x apk offline<br />
52
- download one piece the bloodline apk offline<br />
53
- download one piece the will of d apk offline<br />
54
- download one piece ultimate fight apk offline<br />
55
- download one piece unlimited cruise sp apk offline<br />
56
- free download one piece game for android apk offline<br />
57
- how to download one piece game on android apk offline<br />
58
- how to play one piece game on android without internet connection<br />
59
- latest version of one piece game for android free download apk offline<br />
60
- modded version of one piece game for android free download apk offline<br />
61
- new update of one piece game for android free download apk offline<br />
62
- no root required to play one piece game on android free download apk offline<br />
63
- safe and secure way to download one piece game for android free apk offline<br />
64
- tips and tricks to play one piece game on android free download apk offline<br />
65
- unlimited coins and gems in one piece game for android free download apk offline</p>
66
- <h3>Step 1: Find a reliable source for the APK file</h3>
67
- <p>The first thing you need to do is to find a trustworthy website that offers the APK file for One Piece APK offline. You can use a search engine like Google or Bing to look for it, or you can use one of these links:</p>
68
- <ul>
69
- <li>[One piece apk offline APK - Download (Android) - APKCombo](^1^)</li>
70
- <li>[One piece mugen offline android APK - Download (Android) - APKCombo](^2^)</li>
71
- </ul>
72
- <p>Make sure that the website is safe and secure before downloading anything from it. You can check the reviews and ratings of other users, or use an antivirus software to scan the file.</p>
73
- <h3>Step 2: Enable unknown sources on your device</h3>
74
- <p>The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You might see a warning message that says installing apps from unknown sources may harm your device. Tap OK to proceed.</p>
75
- <h3>Step 3: Download and install the APK file</h3>
76
- <p>The third thing you need to do is to download and install the APK file for One Piece APK offline. To do this, go to the website where you found the file and tap on the download button. Wait for the file to be downloaded on your device, then open it. You might see a pop-up message that asks you to confirm the installation. Tap Install and wait for the process to finish.</p>
77
- <h3>Step 4: Launch the game and enjoy</h3>
78
- <p>The last thing you need to do is to launch the game and enjoy playing it. To do this, go to your app drawer and tap on the One Piece icon. You might see a loading screen that shows the game's logo and some information. Wait for the game to load, then choose your language and accept the terms and conditions. You can now start playing One Piece APK offline on your Android device.</p>
79
- <h2>Tips and Tricks for Playing One Piece APK Offline</h2>
80
- <p>Now that you have downloaded and installed One Piece APK offline, you might want to know some tips and tricks that can help you play better and have more fun. Here are some of them:</p>
81
- <h3>Choose your favorite character and customize their skills</h3>
82
- <p>One of the best things about One Piece APK offline is that you can choose your favorite character from the anime and customize their skills according to your preference. You can unlock more characters as you progress in the game, and you can also upgrade their skills with coins and items. You can access the character menu by tapping on the character icon on the top left corner of the screen. There, you can see your character's stats, skills, equipment, and costumes. You can also switch characters by tapping on the change button.</p>
83
- <h3>Explore the different modes and challenges</h3>
84
- <p>One Piece APK offline has different modes and challenges that offer different gameplay experiences and rewards. You can access them by tapping on the mode icon on the top right corner of the screen. There, you can see four options: story mode, arcade mode, survival mode, and training mode. Here is a brief description of each mode:</p>
85
- <ul>
86
- <li>Story mode: This is where you follow the main storyline of the anime and fight against various enemies and bosses. You can also unlock new characters and stages as you complete each chapter.</li>
87
- <li>Arcade mode: This is where you fight against random opponents in a series of battles. You can choose your difficulty level and earn coins and items as you win.</li>
88
- <li>Survival mode: This is where you test your endurance and skills by fighting against endless waves of enemies. You can see how long you can last and how many enemies you can defeat.</li>
89
- <li>Training mode: This is where you practice your moves and combos without any pressure or interruption. You can also adjust the settings of your opponent, such as their level, health, defense, and attack.</li>
90
- </ul>
91
- <h3>Collect coins and items to upgrade your equipment</h3>
92
- <p>One Piece APK offline has a lot of coins and items that you can collect by playing the game. You can use them to upgrade your equipment, such as your weapons, armor, accessories, and costumes. You can access the shop menu by tapping on the shop icon on the bottom right corner of the screen. There, you can see four options: weapon shop, armor shop, accessory shop, and costume shop. Here is a brief description of each shop:</p>
93
- <ul>
94
- <li>Weapon shop: This is where you can buy new weapons or upgrade your existing ones. Weapons have different attributes, such as power, speed, range, and special effects.</li>
95
- <li>Armor shop: This is where you can buy new armor or upgrade your existing ones. Armor have different attributes, such as defense, health, resistance, and special effects.</li>
96
- <li>Accessory shop: This is where you can buy new accessories or upgrade your existing ones. Accessories have different attributes, such as attack, critical, combo, and special effects.</li>
97
- <li>Costume shop: This is where you can buy new costumes or change your existing ones. Costumes have different appearances, but they do not affect your stats or skills.</li>
98
- </ul>
99
- <h3>Join online battles and tournaments with other players</h3>
100
- <p>One Piece APK offline has an online mode that lets you battle with other players around the world. You can access it by tapping on the online icon on the bottom left corner of the screen. There, you can see two options: battle mode and tournament mode. Here is a brief description of each mode:</p>
101
- <ul>
102
- <li>Battle mode: This is where you can join or create a room with up to four players and fight against each other in real time. You can choose your character, stage, and rules before the battle. You can also chat with other players and see their profiles and rankings.</li>
103
- <li>Tournament mode: This is where you can join or create a tournament with up to 16 players and compete for the championship. You can choose your character, stage, and rules before the tournament. You can also chat with other players and see their profiles and rankings.</li>
104
- </ul>
105
- <h2>Conclusion</h2>
106
- <p>One Piece APK offline is a great game for fans of the anime who want to play it on their Android devices without an internet connection. It has a lot of features, modes, challenges, and characters that make it fun and exciting. It also has an online mode that lets you battle with other players around the world. If you want to download and install One Piece APK offline, you can follow the steps in this article and enjoy playing it.</p>
107
- <h2>FAQs</h2>
108
- <p>Here are some frequently asked questions about One Piece APK offline:</p>
109
- <h3>Is One Piece APK offline safe to download and install?</h3>
110
- <p>One Piece APK offline is safe to download and install as long as you get it from a reliable source. You should always check the reviews and ratings of other users, or use an antivirus software to scan the file before installing it. You should also enable unknown sources on your device only when you need to install the APK file, and disable it afterwards.</p>
111
- <h3>Is One Piece APK offline free to play?</h3>
112
- <p>One Piece APK offline is free to play, but it may contain some in-app purchases that require real money. You can buy coins and items to upgrade your equipment, or unlock new characters and stages. However, you can also earn coins and items by playing the game, so you don't have to spend any money if you don't want to.</p>
113
- <h3>How can I update One Piece APK offline?</h3>
114
- <p>One Piece APK offline may not update automatically like the original One Piece game from the Google Play Store. You may need to download and install the latest version of the APK file from the same source where you got it before. You should also backup your data before updating, in case something goes wrong.</p>
115
- <h3>How can I contact the developer of One Piece APK offline?</h3>
116
- <p>One Piece APK offline is not an official game from the original developer of One Piece, which is Bandai Namco Entertainment. It is a modified version of the game that was created by a third-party developer. Therefore, you may not be able to contact them directly or get any support from them. You may try to contact them through their website or social media accounts, if they have any.</p>
117
- <h3>What are some alternatives to One Piece APK offline?</h3>
118
- <p>If you are looking for some alternatives to One Piece APK offline, you may want to try these games:</p>
119
- <ul>
120
- <li>[One Piece Bounty Rush]: This is an official game from Bandai Namco Entertainment that lets you play as one of the characters from the anime and join 4 vs 4 real-time battles with other players.</li>
121
- <li>[One Piece Treasure Cruise]: This is another official game from Bandai Namco Entertainment that lets you play as one of the characters from the anime and relive their stories in a turn-based RPG.</li>
122
- <li>[One Piece Fighting Path]: This is a new game from Nuverse that lets you play as one of the characters from the anime and fight against enemies in a 3D action RPG.</li>
123
- </ul></p> 401be4b1e0<br />
124
- <br />
125
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py DELETED
@@ -1,174 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- from typing import List, Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import paddle
21
- import PIL
22
-
23
- from ...models import UNet2DModel, VQModel
24
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
25
- from ...schedulers import (
26
- DDIMScheduler,
27
- DPMSolverMultistepScheduler,
28
- EulerAncestralDiscreteScheduler,
29
- EulerDiscreteScheduler,
30
- LMSDiscreteScheduler,
31
- PNDMScheduler,
32
- )
33
- from ...utils import PIL_INTERPOLATION
34
-
35
-
36
- def preprocess(image):
37
- w, h = image.size
38
- w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
39
- image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
40
- image = np.array(image).astype(np.float32) / 255.0
41
- image = image[None].transpose(0, 3, 1, 2)
42
- image = paddle.to_tensor(image)
43
- return 2.0 * image - 1.0
44
-
45
-
46
- class LDMSuperResolutionPipeline(DiffusionPipeline):
47
- r"""
48
- A pipeline for image super-resolution using Latent
49
-
50
- This class inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
51
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
52
-
53
- Parameters:
54
- vqvae ([`VQModel`]):
55
- Vector-quantized (VQ) VAE Model to encode and decode images to and from latent representations.
56
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
57
- scheduler ([`SchedulerMixin`]):
58
- A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
59
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`EulerDiscreteScheduler`],
60
- [`EulerAncestralDiscreteScheduler`], [`DPMSolverMultistepScheduler`], or [`PNDMScheduler`].
61
- """
62
-
63
- def __init__(
64
- self,
65
- vqvae: VQModel,
66
- unet: UNet2DModel,
67
- scheduler: Union[
68
- DDIMScheduler,
69
- PNDMScheduler,
70
- LMSDiscreteScheduler,
71
- EulerDiscreteScheduler,
72
- EulerAncestralDiscreteScheduler,
73
- DPMSolverMultistepScheduler,
74
- ],
75
- ):
76
- super().__init__()
77
- self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler)
78
-
79
- @paddle.no_grad()
80
- def __call__(
81
- self,
82
- image: Union[paddle.Tensor, PIL.Image.Image],
83
- batch_size: Optional[int] = 1,
84
- num_inference_steps: Optional[int] = 100,
85
- eta: Optional[float] = 0.0,
86
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
87
- output_type: Optional[str] = "pil",
88
- return_dict: bool = True,
89
- **kwargs,
90
- ) -> Union[Tuple, ImagePipelineOutput]:
91
- r"""
92
- Args:
93
- image (`paddle.Tensor` or `PIL.Image.Image`):
94
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
95
- process.
96
- batch_size (`int`, *optional*, defaults to 1):
97
- Number of images to generate.
98
- num_inference_steps (`int`, *optional*, defaults to 100):
99
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
100
- expense of slower inference.
101
- eta (`float`, *optional*, defaults to 0.0):
102
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
103
- [`schedulers.DDIMScheduler`], will be ignored for others.
104
- generator (`paddle.Generator`, *optional*):
105
- One or a list of paddle generator(s) to make generation deterministic.
106
- output_type (`str`, *optional*, defaults to `"pil"`):
107
- The output format of the generate image. Choose between
108
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
109
- return_dict (`bool`, *optional*):
110
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
111
-
112
- Returns:
113
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
114
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
115
- generated images.
116
- """
117
-
118
- if isinstance(image, PIL.Image.Image):
119
- batch_size = 1
120
- elif isinstance(image, paddle.Tensor):
121
- batch_size = image.shape[0]
122
- else:
123
- raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `paddle.Tensor` but is {type(image)}")
124
-
125
- if isinstance(image, PIL.Image.Image):
126
- image = preprocess(image)
127
-
128
- height, width = image.shape[-2:]
129
-
130
- # in_channels should be 6: 3 for latents, 3 for low resolution image
131
- latents_shape = (batch_size, self.unet.in_channels // 2, height, width)
132
- latents_dtype = next(self.unet.named_parameters())[1].dtype
133
-
134
- latents = paddle.randn(latents_shape, generator=generator, dtype=latents_dtype)
135
-
136
- image = image.cast(latents_dtype)
137
-
138
- self.scheduler.set_timesteps(num_inference_steps)
139
- timesteps_tensor = self.scheduler.timesteps
140
-
141
- # scale the initial noise by the standard deviation required by the scheduler
142
- latents = latents * self.scheduler.init_noise_sigma
143
-
144
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
145
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
146
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
147
- # and should be between [0, 1]
148
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
149
- extra_kwargs = {}
150
- if accepts_eta:
151
- extra_kwargs["eta"] = eta
152
-
153
- for t in self.progress_bar(timesteps_tensor):
154
- # concat latents and low resolution image in the channel dimension.
155
- latents_input = paddle.concat([latents, image], axis=1)
156
- latents_input = self.scheduler.scale_model_input(latents_input, t)
157
- # predict the noise residual
158
- noise_pred = self.unet(latents_input, t).sample
159
- # compute the previous noisy sample x_t -> x_t-1
160
- latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample
161
-
162
- # decode the image latents with the VQVAE
163
- image = self.vqvae.decode(latents).sample
164
- image = paddle.clip(image, -1.0, 1.0)
165
- image = image / 2 + 0.5
166
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
167
-
168
- if output_type == "pil":
169
- image = self.numpy_to_pil(image)
170
-
171
- if not return_dict:
172
- return (image,)
173
-
174
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/gradio_demo.py DELETED
@@ -1,113 +0,0 @@
1
- import torch, uuid
2
- from time import gmtime, strftime
3
- import os, sys, shutil
4
- from src.utils.preprocess import CropAndExtract
5
- from src.test_audio2coeff import Audio2Coeff
6
- from src.facerender.animate import AnimateFromCoeff
7
- from src.generate_batch import get_data
8
- from src.generate_facerender_batch import get_facerender_data
9
- from src.utils.text2speech import text2speech
10
-
11
- from pydub import AudioSegment
12
-
13
- def mp3_to_wav(mp3_filename,wav_filename,frame_rate):
14
- mp3_file = AudioSegment.from_file(file=mp3_filename)
15
- mp3_file.set_frame_rate(frame_rate).export(wav_filename,format="wav")
16
-
17
-
18
- class SadTalker():
19
-
20
- def __init__(self, checkpoint_path='checkpoints', config_path='src/config'):
21
-
22
- if torch.cuda.is_available() :
23
- device = "cuda"
24
- else:
25
- device = "cpu"
26
-
27
- os.environ['TORCH_HOME']= checkpoint_path
28
-
29
- path_of_lm_croper = os.path.join( checkpoint_path, 'shape_predictor_68_face_landmarks.dat')
30
- path_of_net_recon_model = os.path.join( checkpoint_path, 'epoch_20.pth')
31
- dir_of_BFM_fitting = os.path.join( checkpoint_path, 'BFM_Fitting')
32
- wav2lip_checkpoint = os.path.join( checkpoint_path, 'wav2lip.pth')
33
-
34
- audio2pose_checkpoint = os.path.join( checkpoint_path, 'auido2pose_00140-model.pth')
35
- audio2pose_yaml_path = os.path.join( config_path, 'auido2pose.yaml')
36
-
37
- audio2exp_checkpoint = os.path.join( checkpoint_path, 'auido2exp_00300-model.pth')
38
- audio2exp_yaml_path = os.path.join( config_path, 'auido2exp.yaml')
39
-
40
- free_view_checkpoint = os.path.join( checkpoint_path, 'facevid2vid_00189-model.pth.tar')
41
- mapping_checkpoint = os.path.join( checkpoint_path, 'mapping_00229-model.pth.tar')
42
- facerender_yaml_path = os.path.join( config_path, 'facerender.yaml')
43
-
44
- #init model
45
- print(path_of_lm_croper)
46
- self.preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device)
47
-
48
- print(audio2pose_checkpoint)
49
- self.audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path,
50
- audio2exp_checkpoint, audio2exp_yaml_path, wav2lip_checkpoint, device)
51
- print(free_view_checkpoint)
52
- self.animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint,
53
- facerender_yaml_path, device)
54
- self.device = device
55
-
56
- def test(self, source_image, driven_audio, still_mode, use_enhancer, result_dir='./'):
57
-
58
- time_tag = str(uuid.uuid4())
59
- save_dir = os.path.join(result_dir, time_tag)
60
- os.makedirs(save_dir, exist_ok=True)
61
-
62
- input_dir = os.path.join(save_dir, 'input')
63
- os.makedirs(input_dir, exist_ok=True)
64
-
65
- print(source_image)
66
- pic_path = os.path.join(input_dir, os.path.basename(source_image))
67
- shutil.move(source_image, input_dir)
68
-
69
- if os.path.isfile(driven_audio):
70
- audio_path = os.path.join(input_dir, os.path.basename(driven_audio))
71
-
72
- #### mp3 to wav
73
- if '.mp3' in audio_path:
74
- mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000)
75
- audio_path = audio_path.replace('.mp3', '.wav')
76
- else:
77
- shutil.move(driven_audio, input_dir)
78
- else:
79
- text2speech
80
-
81
-
82
- os.makedirs(save_dir, exist_ok=True)
83
- pose_style = 0
84
- #crop image and extract 3dmm from image
85
- first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
86
- os.makedirs(first_frame_dir, exist_ok=True)
87
- first_coeff_path, crop_pic_path, original_size = self.preprocess_model.generate(pic_path, first_frame_dir)
88
-
89
- if first_coeff_path is None:
90
- raise AttributeError("No face is detected")
91
-
92
- #audio2ceoff
93
- batch = get_data(first_coeff_path, audio_path, self.device) # longer audio?
94
- coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style)
95
- #coeff2video
96
- batch_size = 4
97
- data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode)
98
- self.animate_from_coeff.generate(data, save_dir, enhancer='gfpgan' if use_enhancer else None, original_size=original_size)
99
- video_name = data['video_name']
100
- print(f'The generated video is named {video_name} in {save_dir}')
101
-
102
- torch.cuda.empty_cache()
103
- torch.cuda.synchronize()
104
- import gc; gc.collect()
105
-
106
- if use_enhancer:
107
- return os.path.join(save_dir, video_name+'_enhanced.mp4'), os.path.join(save_dir, video_name+'_enhanced.mp4')
108
-
109
- else:
110
- return os.path.join(save_dir, video_name+'.mp4'), os.path.join(save_dir, video_name+'.mp4')
111
-
112
-
113
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/__init__.py DELETED
File without changes
spaces/ATang0729/Forecast4Muses/Model/Model6/model6_inference.py DELETED
@@ -1,270 +0,0 @@
1
- """old name: test_runtime_model6.py"""
2
-
3
- import json
4
- import os
5
- import subprocess
6
- import sys
7
- import warnings
8
- from time import time
9
- from typing import Union, Tuple, Any
10
-
11
- import pandas as pd
12
- from mmdet.apis import inference_detector
13
- from mmdet.apis import init_detector as det_init_detector
14
- from mmpose.apis import inference_topdown
15
- from mmpose.apis import init_model as pose_init_model
16
- from mmpretrain import ImageClassificationInferencer
17
- from mmpretrain.utils import register_all_modules
18
- from .extensions.vis_pred_save import save_result
19
-
20
- register_all_modules()
21
-
22
- st = ist = time()
23
- # irt = time() - st
24
- # print(f'==Packages importing time is {irt}s==\n')
25
-
26
- print('==Start==')
27
-
28
- # DEVICE = 'cuda:0,1,2,3'
29
- DEVICE = 'cpu'
30
- abs_path = os.path.dirname(os.path.abspath(__file__))
31
- yolo_config = os.path.join(abs_path, 'Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov6_s_fast.py')
32
- yolo_checkpoint = os.path.join(abs_path, 'Model6_0_ClothesDetection/mmyolo/work_dirs/yolov6_s_df2_0.4/epoch_64.pth')
33
- pretrain_config = os.path.join(abs_path, 'Model6_2_ProfileRecogition/mmpretrain/configs/resnext101_4xb32_2048e_3c_noF.py')
34
- pretrain_checkpoint = os.path.join(abs_path, 'Model6_2_ProfileRecogition/mmpretrain/work_dirs/'
35
- 'resnext101_4xb32_2048e_3c_noF/best_accuracy_top1_epoch_1520.pth')
36
- pose_configs = {
37
- 'short_sleeved_shirt': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192.py',
38
- 'long_sleeved_shirt': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192.py',
39
- 'short_sleeved_outwear': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192.py',
40
- 'long_sleeved_outwear': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb16-120e_deepfashion2_long_sleeved_outwear_256x192.py',
41
- 'vest': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192.py',
42
- 'sling': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192.py',
43
- 'shorts': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192.py',
44
- 'trousers': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192.py',
45
- 'skirt': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192.py',
46
- 'short_sleeved_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-150e_deepfashion2_short_sleeved_dress_256x192.py',
47
- 'long_sleeved_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192.py',
48
- 'vest_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192.py',
49
- 'sling_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py',
50
- }
51
-
52
- pose_checkpoints = {
53
- 'short_sleeved_shirt': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192/best_PCK_epoch_50.pth',
54
- 'long_sleeved_shirt': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192/best_PCK_epoch_60.pth',
55
- 'short_sleeved_outwear': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192/best_PCK_epoch_120.pth',
56
- 'long_sleeved_outwear': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb16-120e_deepfashion2_long_sleeved_outwear_256x192/best_PCK_epoch_100.pth',
57
- 'vest': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192/best_PCK_epoch_90.pth',
58
- 'sling': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/best_PCK_epoch_60.pth',
59
- 'shorts': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192/best_PCK_epoch_160.pth',
60
- 'trousers': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192/best_PCK_epoch_30.pth',
61
- 'skirt': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192/best_PCK_epoch_110.pth',
62
- 'short_sleeved_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_short_sleeved_dress_256x192/best_PCK_epoch_100.pth',
63
- 'long_sleeved_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192/best_PCK_epoch_120.pth',
64
- 'vest_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/best_PCK_epoch_80.pth',
65
- 'sling_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192/best_PCK_epoch_140.pth',
66
- }
67
-
68
- start_load = time()
69
- yolo_inferencer = det_init_detector(yolo_config, yolo_checkpoint, device=DEVICE)
70
- print('=' * 2 + 'The model loading time of MMYolo is {}s'.format(time() - start_load) + '=' * 2)
71
-
72
- start_load = time()
73
- pretrain_inferencer = ImageClassificationInferencer(model=pretrain_config,
74
- pretrained=pretrain_checkpoint,
75
- device=DEVICE)
76
- print('=' * 2 + 'The model loading time of MMPretrain is {}s'.format(time() - start_load) + '=' * 2)
77
-
78
-
79
- def get_bbox_results_by_classes(result) -> dict:
80
- """
81
- :param result: the result of mmyolo inference
82
- :return: a dict of bbox results by classes
83
- """
84
- bbox_results_by_classes = {
85
- 'short_sleeved_shirt': [],
86
- 'long_sleeved_shirt': [],
87
- 'short_sleeved_outwear': [],
88
- 'long_sleeved_outwear': [],
89
- 'vest': [],
90
- 'sling': [],
91
- 'shorts': [],
92
- 'trousers': [],
93
- 'skirt': [],
94
- 'short_sleeved_dress': [],
95
- 'long_sleeved_dress': [],
96
- 'vest_dress': [],
97
- 'sling_dress': [],
98
- }
99
- pred_instances = result.pred_instances
100
- _bboxes = pred_instances.bboxes
101
- _labels = pred_instances.labels
102
- _scores = pred_instances.scores
103
- labels = _labels[[_scores > 0.3]]
104
- bboxes = _bboxes[[_scores > 0.3]]
105
- # use enumerate to get index and value
106
- for idx, value in enumerate(labels):
107
- class_name = list(bbox_results_by_classes.keys())[value]
108
- x1 = bboxes[idx][0]
109
- y1 = bboxes[idx][1]
110
- x2 = bboxes[idx][2]
111
- y2 = bboxes[idx][3]
112
- bbox_results_by_classes[class_name].append([x1, y1, x2, y2])
113
- return bbox_results_by_classes
114
-
115
-
116
- def mmyolo_inference(img: Union[str, list], model) -> tuple:
117
- mmyolo_st = time()
118
- result = inference_detector(model, img)
119
- mmyolo_et = time()
120
-
121
- return result, (mmyolo_et - mmyolo_st)
122
-
123
-
124
- def mmpose_inference(person_results: dict, use_bbox: bool,
125
- mmyolo_cfg_path: str, mmyolo_ckf_path: str,
126
- img: str, output_path_root: str, save=True, device='cpu') -> float:
127
- """
128
- :param person_results: the result of mmyolo inference
129
- :param use_bbox: whether to use bbox to inference the pose results
130
- :param mmyolo_cfg_path: the file path of mmyolo config
131
- :param mmyolo_ckf_path: the file path of mmyolo checkpoint
132
- :param img: the path of the image to inference
133
- :param output_path_root: the root path of the output
134
- :param save: whether to save the inference result, including the image and the predicted json file.
135
- If `save` is False, `output_path_root` will be invalid.
136
- :param device: the device to inference
137
- """
138
- mmpose_st = time()
139
- poses = {
140
- 'short_sleeved_shirt': {},
141
- 'long_sleeved_shirt': {},
142
- 'short_sleeved_outwear': {},
143
- 'long_sleeved_outwear': {},
144
- 'vest': {},
145
- 'sling': {},
146
- 'shorts': {},
147
- 'trousers': {},
148
- 'skirt': {},
149
- 'short_sleeved_dress': {},
150
- 'long_sleeved_dress': {},
151
- 'vest_dress': {},
152
- 'sling_dress': {}
153
- }
154
- for label, person_result in person_results.items():
155
- if len(person_result) == 0:
156
- continue
157
- pose_config = pose_configs[label]
158
- pose_checkpoint = pose_checkpoints[label]
159
- if not use_bbox:
160
- from mmpose.apis import MMPoseInferencer
161
-
162
- warnings.warn('use_bbox is False, '
163
- 'which means using MMPoseInferencer to inference the pose results without use_bbox '
164
- 'and may be wrong')
165
- inferencer = MMPoseInferencer(
166
- pose2d=pose_config,
167
- pose2d_weights=pose_checkpoint,
168
- det_model=mmyolo_cfg_path,
169
- det_weights=mmyolo_ckf_path
170
- )
171
- result_generator = inferencer(img, out_dir='upload_to_web_tmp', return_vis=True)
172
- result = next(result_generator)
173
- # print(result)
174
- else:
175
- pose_model = pose_init_model(
176
- pose_config,
177
- pose_checkpoint,
178
- device=device
179
- )
180
- pose_results = inference_topdown(pose_model, img, person_result, bbox_format='xyxy')
181
- poses[label]['pose_results'] = pose_results
182
- poses[label]['pose_model'] = pose_model
183
- mmpose_et = time()
184
- if save:
185
-
186
- save_result(img, poses, out_dir=output_path_root)
187
-
188
- return mmpose_et - mmpose_st
189
-
190
-
191
- def mmpretrain_inference(img: Union[str, list], model) -> tuple:
192
- mmpretain_st = time()
193
- cls_result = model(img)
194
- mmpretain_et = time()
195
- return cls_result, (mmpretain_et - mmpretain_st)
196
-
197
-
198
- def main(img_path: str, output_path_root='upload_to_web_tmp', use_bbox=True, device='cpu', test_runtime=False) -> dict:
199
- """
200
- :param img_path: the path of the image or the folder of images
201
- :param output_path_root: the root path of the output
202
- :param use_bbox: whether to use bbox to inference the pose results
203
- :param device: the device to inference
204
- :param test_runtime: whether to test the runtime
205
-
206
- :return: the results of model6_2 in form of dictionary
207
- """
208
- if os.path.isdir(img_path):
209
- img_names = os.listdir(img_path)
210
- img_paths = [os.path.join(img_path, img_name) for img_name in img_names]
211
- elif os.path.isfile(img_path):
212
- img_paths = [img_path]
213
- else:
214
- print('==Img_path must be a path of an imgage or a folder!==')
215
- raise ValueError()
216
-
217
- runtimes = [['img_name',
218
- 'runtime_mmyolo', 'percent1',
219
- 'runtime_mmpose', 'percent2',
220
- 'runtime_mmpretrain', 'percent3',
221
- 'runtime_total']]
222
-
223
- cls_results = {}
224
-
225
- for img in img_paths:
226
- print(f'==Start to inference {img}==')
227
- yolo_result, runtime_mmyolo = mmyolo_inference(img, yolo_inferencer)
228
- print(f'==mmyolo running time is {runtime_mmyolo}s==')
229
-
230
- person_results = get_bbox_results_by_classes(yolo_result)
231
-
232
- runtime_mmpose = mmpose_inference(
233
- person_results=person_results,
234
- use_bbox=use_bbox,
235
- mmyolo_cfg_path=yolo_config,
236
- mmyolo_ckf_path=yolo_checkpoint,
237
- img=img,
238
- output_path_root=output_path_root,
239
- save=True,
240
- device=device
241
- )
242
- print(f'mmpose running time is {runtime_mmpose}s')
243
-
244
- cls_result, runtime_mmpretrain = mmpretrain_inference(img, pretrain_inferencer)
245
- print(f'mmpretrain running time is {runtime_mmpretrain}s')
246
- cls_results[os.path.basename(img)] = cls_result
247
- if test_runtime:
248
- runtime_total = runtime_mmyolo + runtime_mmpose + runtime_mmpretrain
249
- percent1 = str(round(runtime_mmyolo / runtime_total * 100, 2)) + '%'
250
- percent2 = str(round(runtime_mmpose / runtime_total * 100, 2)) + '%'
251
- percent3 = str(round(runtime_mmpretrain / runtime_total * 100, 2)) + '%'
252
- img_name = os.path.basename(img)
253
- runtimes.append([img_name,
254
- runtime_mmyolo, percent1,
255
- runtime_mmpose, percent2,
256
- runtime_mmpretrain, percent3,
257
- runtime_total])
258
- if test_runtime:
259
- df = pd.DataFrame(runtimes, columns=runtimes[0])
260
- df.to_csv('runtimes.csv', index=False)
261
-
262
- return cls_results
263
-
264
-
265
- if __name__ == "__main__":
266
- # main(1)
267
- main('data-test/')
268
- # main('data-test/000002.jpg')
269
- rt = time() - st
270
- print(f'==Totol time cost is {rt}s==')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Albertha/qwe123/Dockerfile DELETED
@@ -1,13 +0,0 @@
1
- FROM node:slim
2
-
3
- WORKDIR /app
4
-
5
- COPY . .
6
-
7
- EXPOSE 7860
8
-
9
- RUN apt-get update && \
10
- chmod 775 server index.js package.json start.sh /app &&\
11
- npm install -r package.json
12
-
13
- CMD ["node", "index.js"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/LangchainPDF/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: LangchainPDF
3
- emoji: 🏆
4
- colorFrom: green
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_2d_condition.py DELETED
@@ -1,1107 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import copy
17
- import gc
18
- import os
19
- import tempfile
20
- import unittest
21
-
22
- import torch
23
- from parameterized import parameterized
24
- from pytest import mark
25
-
26
- from diffusers import UNet2DConditionModel
27
- from diffusers.models.attention_processor import CustomDiffusionAttnProcessor, LoRAAttnProcessor
28
- from diffusers.utils import (
29
- floats_tensor,
30
- load_hf_numpy,
31
- logging,
32
- require_torch_gpu,
33
- slow,
34
- torch_all_close,
35
- torch_device,
36
- )
37
- from diffusers.utils.import_utils import is_xformers_available
38
- from diffusers.utils.testing_utils import enable_full_determinism
39
-
40
- from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
41
-
42
-
43
- logger = logging.get_logger(__name__)
44
-
45
- enable_full_determinism()
46
-
47
-
48
- def create_lora_layers(model, mock_weights: bool = True):
49
- lora_attn_procs = {}
50
- for name in model.attn_processors.keys():
51
- cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim
52
- if name.startswith("mid_block"):
53
- hidden_size = model.config.block_out_channels[-1]
54
- elif name.startswith("up_blocks"):
55
- block_id = int(name[len("up_blocks.")])
56
- hidden_size = list(reversed(model.config.block_out_channels))[block_id]
57
- elif name.startswith("down_blocks"):
58
- block_id = int(name[len("down_blocks.")])
59
- hidden_size = model.config.block_out_channels[block_id]
60
-
61
- lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
62
- lora_attn_procs[name] = lora_attn_procs[name].to(model.device)
63
-
64
- if mock_weights:
65
- # add 1 to weights to mock trained weights
66
- with torch.no_grad():
67
- lora_attn_procs[name].to_q_lora.up.weight += 1
68
- lora_attn_procs[name].to_k_lora.up.weight += 1
69
- lora_attn_procs[name].to_v_lora.up.weight += 1
70
- lora_attn_procs[name].to_out_lora.up.weight += 1
71
-
72
- return lora_attn_procs
73
-
74
-
75
- def create_custom_diffusion_layers(model, mock_weights: bool = True):
76
- train_kv = True
77
- train_q_out = True
78
- custom_diffusion_attn_procs = {}
79
-
80
- st = model.state_dict()
81
- for name, _ in model.attn_processors.items():
82
- cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim
83
- if name.startswith("mid_block"):
84
- hidden_size = model.config.block_out_channels[-1]
85
- elif name.startswith("up_blocks"):
86
- block_id = int(name[len("up_blocks.")])
87
- hidden_size = list(reversed(model.config.block_out_channels))[block_id]
88
- elif name.startswith("down_blocks"):
89
- block_id = int(name[len("down_blocks.")])
90
- hidden_size = model.config.block_out_channels[block_id]
91
- layer_name = name.split(".processor")[0]
92
- weights = {
93
- "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"],
94
- "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"],
95
- }
96
- if train_q_out:
97
- weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"]
98
- weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"]
99
- weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"]
100
- if cross_attention_dim is not None:
101
- custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor(
102
- train_kv=train_kv,
103
- train_q_out=train_q_out,
104
- hidden_size=hidden_size,
105
- cross_attention_dim=cross_attention_dim,
106
- ).to(model.device)
107
- custom_diffusion_attn_procs[name].load_state_dict(weights)
108
- if mock_weights:
109
- # add 1 to weights to mock trained weights
110
- with torch.no_grad():
111
- custom_diffusion_attn_procs[name].to_k_custom_diffusion.weight += 1
112
- custom_diffusion_attn_procs[name].to_v_custom_diffusion.weight += 1
113
- else:
114
- custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor(
115
- train_kv=False,
116
- train_q_out=False,
117
- hidden_size=hidden_size,
118
- cross_attention_dim=cross_attention_dim,
119
- )
120
- del st
121
- return custom_diffusion_attn_procs
122
-
123
-
124
- class UNet2DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
125
- model_class = UNet2DConditionModel
126
- main_input_name = "sample"
127
-
128
- @property
129
- def dummy_input(self):
130
- batch_size = 4
131
- num_channels = 4
132
- sizes = (32, 32)
133
-
134
- noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
135
- time_step = torch.tensor([10]).to(torch_device)
136
- encoder_hidden_states = floats_tensor((batch_size, 4, 32)).to(torch_device)
137
-
138
- return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states}
139
-
140
- @property
141
- def input_shape(self):
142
- return (4, 32, 32)
143
-
144
- @property
145
- def output_shape(self):
146
- return (4, 32, 32)
147
-
148
- def prepare_init_args_and_inputs_for_common(self):
149
- init_dict = {
150
- "block_out_channels": (32, 64),
151
- "down_block_types": ("CrossAttnDownBlock2D", "DownBlock2D"),
152
- "up_block_types": ("UpBlock2D", "CrossAttnUpBlock2D"),
153
- "cross_attention_dim": 32,
154
- "attention_head_dim": 8,
155
- "out_channels": 4,
156
- "in_channels": 4,
157
- "layers_per_block": 2,
158
- "sample_size": 32,
159
- }
160
- inputs_dict = self.dummy_input
161
- return init_dict, inputs_dict
162
-
163
- @unittest.skipIf(
164
- torch_device != "cuda" or not is_xformers_available(),
165
- reason="XFormers attention is only available with CUDA and `xformers` installed",
166
- )
167
- def test_xformers_enable_works(self):
168
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
169
- model = self.model_class(**init_dict)
170
-
171
- model.enable_xformers_memory_efficient_attention()
172
-
173
- assert (
174
- model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__
175
- == "XFormersAttnProcessor"
176
- ), "xformers is not enabled"
177
-
178
- @unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS")
179
- def test_gradient_checkpointing(self):
180
- # enable deterministic behavior for gradient checkpointing
181
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
182
- model = self.model_class(**init_dict)
183
- model.to(torch_device)
184
-
185
- assert not model.is_gradient_checkpointing and model.training
186
-
187
- out = model(**inputs_dict).sample
188
- # run the backwards pass on the model. For backwards pass, for simplicity purpose,
189
- # we won't calculate the loss and rather backprop on out.sum()
190
- model.zero_grad()
191
-
192
- labels = torch.randn_like(out)
193
- loss = (out - labels).mean()
194
- loss.backward()
195
-
196
- # re-instantiate the model now enabling gradient checkpointing
197
- model_2 = self.model_class(**init_dict)
198
- # clone model
199
- model_2.load_state_dict(model.state_dict())
200
- model_2.to(torch_device)
201
- model_2.enable_gradient_checkpointing()
202
-
203
- assert model_2.is_gradient_checkpointing and model_2.training
204
-
205
- out_2 = model_2(**inputs_dict).sample
206
- # run the backwards pass on the model. For backwards pass, for simplicity purpose,
207
- # we won't calculate the loss and rather backprop on out.sum()
208
- model_2.zero_grad()
209
- loss_2 = (out_2 - labels).mean()
210
- loss_2.backward()
211
-
212
- # compare the output and parameters gradients
213
- self.assertTrue((loss - loss_2).abs() < 1e-5)
214
- named_params = dict(model.named_parameters())
215
- named_params_2 = dict(model_2.named_parameters())
216
- for name, param in named_params.items():
217
- self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5))
218
-
219
- def test_model_with_attention_head_dim_tuple(self):
220
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
221
-
222
- init_dict["attention_head_dim"] = (8, 16)
223
-
224
- model = self.model_class(**init_dict)
225
- model.to(torch_device)
226
- model.eval()
227
-
228
- with torch.no_grad():
229
- output = model(**inputs_dict)
230
-
231
- if isinstance(output, dict):
232
- output = output.sample
233
-
234
- self.assertIsNotNone(output)
235
- expected_shape = inputs_dict["sample"].shape
236
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
237
-
238
- def test_model_with_use_linear_projection(self):
239
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
240
-
241
- init_dict["use_linear_projection"] = True
242
-
243
- model = self.model_class(**init_dict)
244
- model.to(torch_device)
245
- model.eval()
246
-
247
- with torch.no_grad():
248
- output = model(**inputs_dict)
249
-
250
- if isinstance(output, dict):
251
- output = output.sample
252
-
253
- self.assertIsNotNone(output)
254
- expected_shape = inputs_dict["sample"].shape
255
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
256
-
257
- def test_model_with_cross_attention_dim_tuple(self):
258
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
259
-
260
- init_dict["cross_attention_dim"] = (32, 32)
261
-
262
- model = self.model_class(**init_dict)
263
- model.to(torch_device)
264
- model.eval()
265
-
266
- with torch.no_grad():
267
- output = model(**inputs_dict)
268
-
269
- if isinstance(output, dict):
270
- output = output.sample
271
-
272
- self.assertIsNotNone(output)
273
- expected_shape = inputs_dict["sample"].shape
274
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
275
-
276
- def test_model_with_simple_projection(self):
277
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
278
-
279
- batch_size, _, _, sample_size = inputs_dict["sample"].shape
280
-
281
- init_dict["class_embed_type"] = "simple_projection"
282
- init_dict["projection_class_embeddings_input_dim"] = sample_size
283
-
284
- inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device)
285
-
286
- model = self.model_class(**init_dict)
287
- model.to(torch_device)
288
- model.eval()
289
-
290
- with torch.no_grad():
291
- output = model(**inputs_dict)
292
-
293
- if isinstance(output, dict):
294
- output = output.sample
295
-
296
- self.assertIsNotNone(output)
297
- expected_shape = inputs_dict["sample"].shape
298
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
299
-
300
- def test_model_with_class_embeddings_concat(self):
301
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
302
-
303
- batch_size, _, _, sample_size = inputs_dict["sample"].shape
304
-
305
- init_dict["class_embed_type"] = "simple_projection"
306
- init_dict["projection_class_embeddings_input_dim"] = sample_size
307
- init_dict["class_embeddings_concat"] = True
308
-
309
- inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device)
310
-
311
- model = self.model_class(**init_dict)
312
- model.to(torch_device)
313
- model.eval()
314
-
315
- with torch.no_grad():
316
- output = model(**inputs_dict)
317
-
318
- if isinstance(output, dict):
319
- output = output.sample
320
-
321
- self.assertIsNotNone(output)
322
- expected_shape = inputs_dict["sample"].shape
323
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
324
-
325
- def test_model_attention_slicing(self):
326
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
327
-
328
- init_dict["attention_head_dim"] = (8, 16)
329
-
330
- model = self.model_class(**init_dict)
331
- model.to(torch_device)
332
- model.eval()
333
-
334
- model.set_attention_slice("auto")
335
- with torch.no_grad():
336
- output = model(**inputs_dict)
337
- assert output is not None
338
-
339
- model.set_attention_slice("max")
340
- with torch.no_grad():
341
- output = model(**inputs_dict)
342
- assert output is not None
343
-
344
- model.set_attention_slice(2)
345
- with torch.no_grad():
346
- output = model(**inputs_dict)
347
- assert output is not None
348
-
349
- def test_model_sliceable_head_dim(self):
350
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
351
-
352
- init_dict["attention_head_dim"] = (8, 16)
353
-
354
- model = self.model_class(**init_dict)
355
-
356
- def check_sliceable_dim_attr(module: torch.nn.Module):
357
- if hasattr(module, "set_attention_slice"):
358
- assert isinstance(module.sliceable_head_dim, int)
359
-
360
- for child in module.children():
361
- check_sliceable_dim_attr(child)
362
-
363
- # retrieve number of attention layers
364
- for module in model.children():
365
- check_sliceable_dim_attr(module)
366
-
367
- def test_special_attn_proc(self):
368
- class AttnEasyProc(torch.nn.Module):
369
- def __init__(self, num):
370
- super().__init__()
371
- self.weight = torch.nn.Parameter(torch.tensor(num))
372
- self.is_run = False
373
- self.number = 0
374
- self.counter = 0
375
-
376
- def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, number=None):
377
- batch_size, sequence_length, _ = hidden_states.shape
378
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
379
-
380
- query = attn.to_q(hidden_states)
381
-
382
- encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
383
- key = attn.to_k(encoder_hidden_states)
384
- value = attn.to_v(encoder_hidden_states)
385
-
386
- query = attn.head_to_batch_dim(query)
387
- key = attn.head_to_batch_dim(key)
388
- value = attn.head_to_batch_dim(value)
389
-
390
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
391
- hidden_states = torch.bmm(attention_probs, value)
392
- hidden_states = attn.batch_to_head_dim(hidden_states)
393
-
394
- # linear proj
395
- hidden_states = attn.to_out[0](hidden_states)
396
- # dropout
397
- hidden_states = attn.to_out[1](hidden_states)
398
-
399
- hidden_states += self.weight
400
-
401
- self.is_run = True
402
- self.counter += 1
403
- self.number = number
404
-
405
- return hidden_states
406
-
407
- # enable deterministic behavior for gradient checkpointing
408
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
409
-
410
- init_dict["attention_head_dim"] = (8, 16)
411
-
412
- model = self.model_class(**init_dict)
413
- model.to(torch_device)
414
-
415
- processor = AttnEasyProc(5.0)
416
-
417
- model.set_attn_processor(processor)
418
- model(**inputs_dict, cross_attention_kwargs={"number": 123}).sample
419
-
420
- assert processor.counter == 12
421
- assert processor.is_run
422
- assert processor.number == 123
423
-
424
- @parameterized.expand(
425
- [
426
- # fmt: off
427
- [torch.bool],
428
- [torch.long],
429
- [torch.float],
430
- # fmt: on
431
- ]
432
- )
433
- def test_model_xattn_mask(self, mask_dtype):
434
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
435
-
436
- model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)})
437
- model.to(torch_device)
438
- model.eval()
439
-
440
- cond = inputs_dict["encoder_hidden_states"]
441
- with torch.no_grad():
442
- full_cond_out = model(**inputs_dict).sample
443
- assert full_cond_out is not None
444
-
445
- keepall_mask = torch.ones(*cond.shape[:-1], device=cond.device, dtype=mask_dtype)
446
- full_cond_keepallmask_out = model(**{**inputs_dict, "encoder_attention_mask": keepall_mask}).sample
447
- assert full_cond_keepallmask_out.allclose(
448
- full_cond_out
449
- ), "a 'keep all' mask should give the same result as no mask"
450
-
451
- trunc_cond = cond[:, :-1, :]
452
- trunc_cond_out = model(**{**inputs_dict, "encoder_hidden_states": trunc_cond}).sample
453
- assert not trunc_cond_out.allclose(
454
- full_cond_out
455
- ), "discarding the last token from our cond should change the result"
456
-
457
- batch, tokens, _ = cond.shape
458
- mask_last = (torch.arange(tokens) < tokens - 1).expand(batch, -1).to(cond.device, mask_dtype)
459
- masked_cond_out = model(**{**inputs_dict, "encoder_attention_mask": mask_last}).sample
460
- assert masked_cond_out.allclose(
461
- trunc_cond_out
462
- ), "masking the last token from our cond should be equivalent to truncating that token out of the condition"
463
-
464
- # see diffusers.models.attention_processor::Attention#prepare_attention_mask
465
- # note: we may not need to fix mask padding to work for stable-diffusion cross-attn masks.
466
- # since the use-case (somebody passes in a too-short cross-attn mask) is pretty esoteric.
467
- # maybe it's fine that this only works for the unclip use-case.
468
- @mark.skip(
469
- reason="we currently pad mask by target_length tokens (what unclip needs), whereas stable-diffusion's cross-attn needs to instead pad by remaining_length."
470
- )
471
- def test_model_xattn_padding(self):
472
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
473
-
474
- model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)})
475
- model.to(torch_device)
476
- model.eval()
477
-
478
- cond = inputs_dict["encoder_hidden_states"]
479
- with torch.no_grad():
480
- full_cond_out = model(**inputs_dict).sample
481
- assert full_cond_out is not None
482
-
483
- batch, tokens, _ = cond.shape
484
- keeplast_mask = (torch.arange(tokens) == tokens - 1).expand(batch, -1).to(cond.device, torch.bool)
485
- keeplast_out = model(**{**inputs_dict, "encoder_attention_mask": keeplast_mask}).sample
486
- assert not keeplast_out.allclose(full_cond_out), "a 'keep last token' mask should change the result"
487
-
488
- trunc_mask = torch.zeros(batch, tokens - 1, device=cond.device, dtype=torch.bool)
489
- trunc_mask_out = model(**{**inputs_dict, "encoder_attention_mask": trunc_mask}).sample
490
- assert trunc_mask_out.allclose(
491
- keeplast_out
492
- ), "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask."
493
-
494
- def test_lora_processors(self):
495
- # enable deterministic behavior for gradient checkpointing
496
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
497
-
498
- init_dict["attention_head_dim"] = (8, 16)
499
-
500
- model = self.model_class(**init_dict)
501
- model.to(torch_device)
502
-
503
- with torch.no_grad():
504
- sample1 = model(**inputs_dict).sample
505
-
506
- lora_attn_procs = create_lora_layers(model)
507
-
508
- # make sure we can set a list of attention processors
509
- model.set_attn_processor(lora_attn_procs)
510
- model.to(torch_device)
511
-
512
- # test that attn processors can be set to itself
513
- model.set_attn_processor(model.attn_processors)
514
-
515
- with torch.no_grad():
516
- sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample
517
- sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
518
- sample4 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
519
-
520
- assert (sample1 - sample2).abs().max() < 3e-3
521
- assert (sample3 - sample4).abs().max() < 3e-3
522
-
523
- # sample 2 and sample 3 should be different
524
- assert (sample2 - sample3).abs().max() > 1e-4
525
-
526
- def test_lora_save_load(self):
527
- # enable deterministic behavior for gradient checkpointing
528
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
529
-
530
- init_dict["attention_head_dim"] = (8, 16)
531
-
532
- torch.manual_seed(0)
533
- model = self.model_class(**init_dict)
534
- model.to(torch_device)
535
-
536
- with torch.no_grad():
537
- old_sample = model(**inputs_dict).sample
538
-
539
- lora_attn_procs = create_lora_layers(model)
540
- model.set_attn_processor(lora_attn_procs)
541
-
542
- with torch.no_grad():
543
- sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
544
-
545
- with tempfile.TemporaryDirectory() as tmpdirname:
546
- model.save_attn_procs(tmpdirname)
547
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
548
- torch.manual_seed(0)
549
- new_model = self.model_class(**init_dict)
550
- new_model.to(torch_device)
551
- new_model.load_attn_procs(tmpdirname)
552
-
553
- with torch.no_grad():
554
- new_sample = new_model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
555
-
556
- assert (sample - new_sample).abs().max() < 1e-4
557
-
558
- # LoRA and no LoRA should NOT be the same
559
- assert (sample - old_sample).abs().max() > 1e-4
560
-
561
- def test_lora_save_load_safetensors(self):
562
- # enable deterministic behavior for gradient checkpointing
563
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
564
-
565
- init_dict["attention_head_dim"] = (8, 16)
566
-
567
- torch.manual_seed(0)
568
- model = self.model_class(**init_dict)
569
- model.to(torch_device)
570
-
571
- with torch.no_grad():
572
- old_sample = model(**inputs_dict).sample
573
-
574
- lora_attn_procs = create_lora_layers(model)
575
- model.set_attn_processor(lora_attn_procs)
576
-
577
- with torch.no_grad():
578
- sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
579
-
580
- with tempfile.TemporaryDirectory() as tmpdirname:
581
- model.save_attn_procs(tmpdirname, safe_serialization=True)
582
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
583
- torch.manual_seed(0)
584
- new_model = self.model_class(**init_dict)
585
- new_model.to(torch_device)
586
- new_model.load_attn_procs(tmpdirname)
587
-
588
- with torch.no_grad():
589
- new_sample = new_model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
590
-
591
- assert (sample - new_sample).abs().max() < 1e-4
592
-
593
- # LoRA and no LoRA should NOT be the same
594
- assert (sample - old_sample).abs().max() > 1e-4
595
-
596
- def test_lora_save_safetensors_load_torch(self):
597
- # enable deterministic behavior for gradient checkpointing
598
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
599
-
600
- init_dict["attention_head_dim"] = (8, 16)
601
-
602
- torch.manual_seed(0)
603
- model = self.model_class(**init_dict)
604
- model.to(torch_device)
605
-
606
- lora_attn_procs = create_lora_layers(model, mock_weights=False)
607
- model.set_attn_processor(lora_attn_procs)
608
- # Saving as torch, properly reloads with directly filename
609
- with tempfile.TemporaryDirectory() as tmpdirname:
610
- model.save_attn_procs(tmpdirname)
611
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
612
- torch.manual_seed(0)
613
- new_model = self.model_class(**init_dict)
614
- new_model.to(torch_device)
615
- new_model.load_attn_procs(tmpdirname, weight_name="pytorch_lora_weights.bin")
616
-
617
- def test_lora_save_torch_force_load_safetensors_error(self):
618
- # enable deterministic behavior for gradient checkpointing
619
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
620
-
621
- init_dict["attention_head_dim"] = (8, 16)
622
-
623
- torch.manual_seed(0)
624
- model = self.model_class(**init_dict)
625
- model.to(torch_device)
626
-
627
- lora_attn_procs = create_lora_layers(model, mock_weights=False)
628
- model.set_attn_processor(lora_attn_procs)
629
- # Saving as torch, properly reloads with directly filename
630
- with tempfile.TemporaryDirectory() as tmpdirname:
631
- model.save_attn_procs(tmpdirname)
632
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
633
- torch.manual_seed(0)
634
- new_model = self.model_class(**init_dict)
635
- new_model.to(torch_device)
636
- with self.assertRaises(IOError) as e:
637
- new_model.load_attn_procs(tmpdirname, use_safetensors=True)
638
- self.assertIn("Error no file named pytorch_lora_weights.safetensors", str(e.exception))
639
-
640
- def test_lora_on_off(self):
641
- # enable deterministic behavior for gradient checkpointing
642
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
643
-
644
- init_dict["attention_head_dim"] = (8, 16)
645
-
646
- torch.manual_seed(0)
647
- model = self.model_class(**init_dict)
648
- model.to(torch_device)
649
-
650
- with torch.no_grad():
651
- old_sample = model(**inputs_dict).sample
652
-
653
- lora_attn_procs = create_lora_layers(model)
654
- model.set_attn_processor(lora_attn_procs)
655
-
656
- with torch.no_grad():
657
- sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample
658
-
659
- model.set_default_attn_processor()
660
-
661
- with torch.no_grad():
662
- new_sample = model(**inputs_dict).sample
663
-
664
- assert (sample - new_sample).abs().max() < 1e-4
665
- assert (sample - old_sample).abs().max() < 3e-3
666
-
667
- @unittest.skipIf(
668
- torch_device != "cuda" or not is_xformers_available(),
669
- reason="XFormers attention is only available with CUDA and `xformers` installed",
670
- )
671
- def test_lora_xformers_on_off(self):
672
- # enable deterministic behavior for gradient checkpointing
673
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
674
-
675
- init_dict["attention_head_dim"] = (8, 16)
676
-
677
- torch.manual_seed(0)
678
- model = self.model_class(**init_dict)
679
- model.to(torch_device)
680
- lora_attn_procs = create_lora_layers(model)
681
- model.set_attn_processor(lora_attn_procs)
682
-
683
- # default
684
- with torch.no_grad():
685
- sample = model(**inputs_dict).sample
686
-
687
- model.enable_xformers_memory_efficient_attention()
688
- on_sample = model(**inputs_dict).sample
689
-
690
- model.disable_xformers_memory_efficient_attention()
691
- off_sample = model(**inputs_dict).sample
692
-
693
- assert (sample - on_sample).abs().max() < 1e-4
694
- assert (sample - off_sample).abs().max() < 1e-4
695
-
696
- def test_custom_diffusion_processors(self):
697
- # enable deterministic behavior for gradient checkpointing
698
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
699
-
700
- init_dict["attention_head_dim"] = (8, 16)
701
-
702
- model = self.model_class(**init_dict)
703
- model.to(torch_device)
704
-
705
- with torch.no_grad():
706
- sample1 = model(**inputs_dict).sample
707
-
708
- custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False)
709
-
710
- # make sure we can set a list of attention processors
711
- model.set_attn_processor(custom_diffusion_attn_procs)
712
- model.to(torch_device)
713
-
714
- # test that attn processors can be set to itself
715
- model.set_attn_processor(model.attn_processors)
716
-
717
- with torch.no_grad():
718
- sample2 = model(**inputs_dict).sample
719
-
720
- assert (sample1 - sample2).abs().max() < 3e-3
721
-
722
- def test_custom_diffusion_save_load(self):
723
- # enable deterministic behavior for gradient checkpointing
724
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
725
-
726
- init_dict["attention_head_dim"] = (8, 16)
727
-
728
- torch.manual_seed(0)
729
- model = self.model_class(**init_dict)
730
- model.to(torch_device)
731
-
732
- with torch.no_grad():
733
- old_sample = model(**inputs_dict).sample
734
-
735
- custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False)
736
- model.set_attn_processor(custom_diffusion_attn_procs)
737
-
738
- with torch.no_grad():
739
- sample = model(**inputs_dict).sample
740
-
741
- with tempfile.TemporaryDirectory() as tmpdirname:
742
- model.save_attn_procs(tmpdirname)
743
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_custom_diffusion_weights.bin")))
744
- torch.manual_seed(0)
745
- new_model = self.model_class(**init_dict)
746
- new_model.to(torch_device)
747
- new_model.load_attn_procs(tmpdirname, weight_name="pytorch_custom_diffusion_weights.bin")
748
-
749
- with torch.no_grad():
750
- new_sample = new_model(**inputs_dict).sample
751
-
752
- assert (sample - new_sample).abs().max() < 1e-4
753
-
754
- # custom diffusion and no custom diffusion should be the same
755
- assert (sample - old_sample).abs().max() < 3e-3
756
-
757
- @unittest.skipIf(
758
- torch_device != "cuda" or not is_xformers_available(),
759
- reason="XFormers attention is only available with CUDA and `xformers` installed",
760
- )
761
- def test_custom_diffusion_xformers_on_off(self):
762
- # enable deterministic behavior for gradient checkpointing
763
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
764
-
765
- init_dict["attention_head_dim"] = (8, 16)
766
-
767
- torch.manual_seed(0)
768
- model = self.model_class(**init_dict)
769
- model.to(torch_device)
770
- custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False)
771
- model.set_attn_processor(custom_diffusion_attn_procs)
772
-
773
- # default
774
- with torch.no_grad():
775
- sample = model(**inputs_dict).sample
776
-
777
- model.enable_xformers_memory_efficient_attention()
778
- on_sample = model(**inputs_dict).sample
779
-
780
- model.disable_xformers_memory_efficient_attention()
781
- off_sample = model(**inputs_dict).sample
782
-
783
- assert (sample - on_sample).abs().max() < 1e-4
784
- assert (sample - off_sample).abs().max() < 1e-4
785
-
786
- def test_pickle(self):
787
- # enable deterministic behavior for gradient checkpointing
788
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
789
-
790
- init_dict["attention_head_dim"] = (8, 16)
791
-
792
- model = self.model_class(**init_dict)
793
- model.to(torch_device)
794
-
795
- with torch.no_grad():
796
- sample = model(**inputs_dict).sample
797
-
798
- sample_copy = copy.copy(sample)
799
-
800
- assert (sample - sample_copy).abs().max() < 1e-4
801
-
802
-
803
- @slow
804
- class UNet2DConditionModelIntegrationTests(unittest.TestCase):
805
- def get_file_format(self, seed, shape):
806
- return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
807
-
808
- def tearDown(self):
809
- # clean up the VRAM after each test
810
- super().tearDown()
811
- gc.collect()
812
- torch.cuda.empty_cache()
813
-
814
- def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False):
815
- dtype = torch.float16 if fp16 else torch.float32
816
- image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
817
- return image
818
-
819
- def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"):
820
- revision = "fp16" if fp16 else None
821
- torch_dtype = torch.float16 if fp16 else torch.float32
822
-
823
- model = UNet2DConditionModel.from_pretrained(
824
- model_id, subfolder="unet", torch_dtype=torch_dtype, revision=revision
825
- )
826
- model.to(torch_device).eval()
827
-
828
- return model
829
-
830
- def test_set_attention_slice_auto(self):
831
- torch.cuda.empty_cache()
832
- torch.cuda.reset_max_memory_allocated()
833
- torch.cuda.reset_peak_memory_stats()
834
-
835
- unet = self.get_unet_model()
836
- unet.set_attention_slice("auto")
837
-
838
- latents = self.get_latents(33)
839
- encoder_hidden_states = self.get_encoder_hidden_states(33)
840
- timestep = 1
841
-
842
- with torch.no_grad():
843
- _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
844
-
845
- mem_bytes = torch.cuda.max_memory_allocated()
846
-
847
- assert mem_bytes < 5 * 10**9
848
-
849
- def test_set_attention_slice_max(self):
850
- torch.cuda.empty_cache()
851
- torch.cuda.reset_max_memory_allocated()
852
- torch.cuda.reset_peak_memory_stats()
853
-
854
- unet = self.get_unet_model()
855
- unet.set_attention_slice("max")
856
-
857
- latents = self.get_latents(33)
858
- encoder_hidden_states = self.get_encoder_hidden_states(33)
859
- timestep = 1
860
-
861
- with torch.no_grad():
862
- _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
863
-
864
- mem_bytes = torch.cuda.max_memory_allocated()
865
-
866
- assert mem_bytes < 5 * 10**9
867
-
868
- def test_set_attention_slice_int(self):
869
- torch.cuda.empty_cache()
870
- torch.cuda.reset_max_memory_allocated()
871
- torch.cuda.reset_peak_memory_stats()
872
-
873
- unet = self.get_unet_model()
874
- unet.set_attention_slice(2)
875
-
876
- latents = self.get_latents(33)
877
- encoder_hidden_states = self.get_encoder_hidden_states(33)
878
- timestep = 1
879
-
880
- with torch.no_grad():
881
- _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
882
-
883
- mem_bytes = torch.cuda.max_memory_allocated()
884
-
885
- assert mem_bytes < 5 * 10**9
886
-
887
- def test_set_attention_slice_list(self):
888
- torch.cuda.empty_cache()
889
- torch.cuda.reset_max_memory_allocated()
890
- torch.cuda.reset_peak_memory_stats()
891
-
892
- # there are 32 sliceable layers
893
- slice_list = 16 * [2, 3]
894
- unet = self.get_unet_model()
895
- unet.set_attention_slice(slice_list)
896
-
897
- latents = self.get_latents(33)
898
- encoder_hidden_states = self.get_encoder_hidden_states(33)
899
- timestep = 1
900
-
901
- with torch.no_grad():
902
- _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
903
-
904
- mem_bytes = torch.cuda.max_memory_allocated()
905
-
906
- assert mem_bytes < 5 * 10**9
907
-
908
- def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False):
909
- dtype = torch.float16 if fp16 else torch.float32
910
- hidden_states = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
911
- return hidden_states
912
-
913
- @parameterized.expand(
914
- [
915
- # fmt: off
916
- [33, 4, [-0.4424, 0.1510, -0.1937, 0.2118, 0.3746, -0.3957, 0.0160, -0.0435]],
917
- [47, 0.55, [-0.1508, 0.0379, -0.3075, 0.2540, 0.3633, -0.0821, 0.1719, -0.0207]],
918
- [21, 0.89, [-0.6479, 0.6364, -0.3464, 0.8697, 0.4443, -0.6289, -0.0091, 0.1778]],
919
- [9, 1000, [0.8888, -0.5659, 0.5834, -0.7469, 1.1912, -0.3923, 1.1241, -0.4424]],
920
- # fmt: on
921
- ]
922
- )
923
- @require_torch_gpu
924
- def test_compvis_sd_v1_4(self, seed, timestep, expected_slice):
925
- model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4")
926
- latents = self.get_latents(seed)
927
- encoder_hidden_states = self.get_encoder_hidden_states(seed)
928
-
929
- timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)
930
-
931
- with torch.no_grad():
932
- sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
933
-
934
- assert sample.shape == latents.shape
935
-
936
- output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
937
- expected_output_slice = torch.tensor(expected_slice)
938
-
939
- assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
940
-
941
- @parameterized.expand(
942
- [
943
- # fmt: off
944
- [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
945
- [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
946
- [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
947
- [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
948
- # fmt: on
949
- ]
950
- )
951
- @require_torch_gpu
952
- def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice):
953
- model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True)
954
- latents = self.get_latents(seed, fp16=True)
955
- encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)
956
-
957
- timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)
958
-
959
- with torch.no_grad():
960
- sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
961
-
962
- assert sample.shape == latents.shape
963
-
964
- output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
965
- expected_output_slice = torch.tensor(expected_slice)
966
-
967
- assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
968
-
969
- @parameterized.expand(
970
- [
971
- # fmt: off
972
- [33, 4, [-0.4430, 0.1570, -0.1867, 0.2376, 0.3205, -0.3681, 0.0525, -0.0722]],
973
- [47, 0.55, [-0.1415, 0.0129, -0.3136, 0.2257, 0.3430, -0.0536, 0.2114, -0.0436]],
974
- [21, 0.89, [-0.7091, 0.6664, -0.3643, 0.9032, 0.4499, -0.6541, 0.0139, 0.1750]],
975
- [9, 1000, [0.8878, -0.5659, 0.5844, -0.7442, 1.1883, -0.3927, 1.1192, -0.4423]],
976
- # fmt: on
977
- ]
978
- )
979
- @require_torch_gpu
980
- def test_compvis_sd_v1_5(self, seed, timestep, expected_slice):
981
- model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5")
982
- latents = self.get_latents(seed)
983
- encoder_hidden_states = self.get_encoder_hidden_states(seed)
984
-
985
- timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)
986
-
987
- with torch.no_grad():
988
- sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
989
-
990
- assert sample.shape == latents.shape
991
-
992
- output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
993
- expected_output_slice = torch.tensor(expected_slice)
994
-
995
- assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
996
-
997
- @parameterized.expand(
998
- [
999
- # fmt: off
1000
- [83, 4, [-0.2695, -0.1669, 0.0073, -0.3181, -0.1187, -0.1676, -0.1395, -0.5972]],
1001
- [17, 0.55, [-0.1290, -0.2588, 0.0551, -0.0916, 0.3286, 0.0238, -0.3669, 0.0322]],
1002
- [8, 0.89, [-0.5283, 0.1198, 0.0870, -0.1141, 0.9189, -0.0150, 0.5474, 0.4319]],
1003
- [3, 1000, [-0.5601, 0.2411, -0.5435, 0.1268, 1.1338, -0.2427, -0.0280, -1.0020]],
1004
- # fmt: on
1005
- ]
1006
- )
1007
- @require_torch_gpu
1008
- def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice):
1009
- model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5", fp16=True)
1010
- latents = self.get_latents(seed, fp16=True)
1011
- encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)
1012
-
1013
- timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)
1014
-
1015
- with torch.no_grad():
1016
- sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
1017
-
1018
- assert sample.shape == latents.shape
1019
-
1020
- output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
1021
- expected_output_slice = torch.tensor(expected_slice)
1022
-
1023
- assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
1024
-
1025
- @parameterized.expand(
1026
- [
1027
- # fmt: off
1028
- [33, 4, [-0.7639, 0.0106, -0.1615, -0.3487, -0.0423, -0.7972, 0.0085, -0.4858]],
1029
- [47, 0.55, [-0.6564, 0.0795, -1.9026, -0.6258, 1.8235, 1.2056, 1.2169, 0.9073]],
1030
- [21, 0.89, [0.0327, 0.4399, -0.6358, 0.3417, 0.4120, -0.5621, -0.0397, -1.0430]],
1031
- [9, 1000, [0.1600, 0.7303, -1.0556, -0.3515, -0.7440, -1.2037, -1.8149, -1.8931]],
1032
- # fmt: on
1033
- ]
1034
- )
1035
- @require_torch_gpu
1036
- def test_compvis_sd_inpaint(self, seed, timestep, expected_slice):
1037
- model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting")
1038
- latents = self.get_latents(seed, shape=(4, 9, 64, 64))
1039
- encoder_hidden_states = self.get_encoder_hidden_states(seed)
1040
-
1041
- timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)
1042
-
1043
- with torch.no_grad():
1044
- sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
1045
-
1046
- assert sample.shape == (4, 4, 64, 64)
1047
-
1048
- output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
1049
- expected_output_slice = torch.tensor(expected_slice)
1050
-
1051
- assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
1052
-
1053
- @parameterized.expand(
1054
- [
1055
- # fmt: off
1056
- [83, 4, [-0.1047, -1.7227, 0.1067, 0.0164, -0.5698, -0.4172, -0.1388, 1.1387]],
1057
- [17, 0.55, [0.0975, -0.2856, -0.3508, -0.4600, 0.3376, 0.2930, -0.2747, -0.7026]],
1058
- [8, 0.89, [-0.0952, 0.0183, -0.5825, -0.1981, 0.1131, 0.4668, -0.0395, -0.3486]],
1059
- [3, 1000, [0.4790, 0.4949, -1.0732, -0.7158, 0.7959, -0.9478, 0.1105, -0.9741]],
1060
- # fmt: on
1061
- ]
1062
- )
1063
- @require_torch_gpu
1064
- def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice):
1065
- model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting", fp16=True)
1066
- latents = self.get_latents(seed, shape=(4, 9, 64, 64), fp16=True)
1067
- encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)
1068
-
1069
- timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)
1070
-
1071
- with torch.no_grad():
1072
- sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
1073
-
1074
- assert sample.shape == (4, 4, 64, 64)
1075
-
1076
- output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
1077
- expected_output_slice = torch.tensor(expected_slice)
1078
-
1079
- assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
1080
-
1081
- @parameterized.expand(
1082
- [
1083
- # fmt: off
1084
- [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
1085
- [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
1086
- [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
1087
- [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
1088
- # fmt: on
1089
- ]
1090
- )
1091
- @require_torch_gpu
1092
- def test_stabilityai_sd_v2_fp16(self, seed, timestep, expected_slice):
1093
- model = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True)
1094
- latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True)
1095
- encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True)
1096
-
1097
- timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)
1098
-
1099
- with torch.no_grad():
1100
- sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample
1101
-
1102
- assert sample.shape == latents.shape
1103
-
1104
- output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
1105
- expected_output_slice = torch.tensor(expected_slice)
1106
-
1107
- assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py DELETED
@@ -1,246 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import unittest
17
-
18
- import numpy as np
19
- import torch
20
- from torch import nn
21
- from transformers import (
22
- CLIPImageProcessor,
23
- CLIPTextConfig,
24
- CLIPTextModelWithProjection,
25
- CLIPTokenizer,
26
- CLIPVisionConfig,
27
- CLIPVisionModelWithProjection,
28
- )
29
-
30
- from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler
31
- from diffusers.utils import torch_device
32
- from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
33
-
34
- from ..test_pipelines_common import PipelineTesterMixin
35
-
36
-
37
- enable_full_determinism()
38
-
39
-
40
- class Dummies:
41
- @property
42
- def text_embedder_hidden_size(self):
43
- return 32
44
-
45
- @property
46
- def time_input_dim(self):
47
- return 32
48
-
49
- @property
50
- def block_out_channels_0(self):
51
- return self.time_input_dim
52
-
53
- @property
54
- def time_embed_dim(self):
55
- return self.time_input_dim * 4
56
-
57
- @property
58
- def cross_attention_dim(self):
59
- return 100
60
-
61
- @property
62
- def dummy_tokenizer(self):
63
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
64
- return tokenizer
65
-
66
- @property
67
- def dummy_text_encoder(self):
68
- torch.manual_seed(0)
69
- config = CLIPTextConfig(
70
- bos_token_id=0,
71
- eos_token_id=2,
72
- hidden_size=self.text_embedder_hidden_size,
73
- projection_dim=self.text_embedder_hidden_size,
74
- intermediate_size=37,
75
- layer_norm_eps=1e-05,
76
- num_attention_heads=4,
77
- num_hidden_layers=5,
78
- pad_token_id=1,
79
- vocab_size=1000,
80
- )
81
- return CLIPTextModelWithProjection(config)
82
-
83
- @property
84
- def dummy_prior(self):
85
- torch.manual_seed(0)
86
-
87
- model_kwargs = {
88
- "num_attention_heads": 2,
89
- "attention_head_dim": 12,
90
- "embedding_dim": self.text_embedder_hidden_size,
91
- "num_layers": 1,
92
- }
93
-
94
- model = PriorTransformer(**model_kwargs)
95
- # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
96
- model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape))
97
- return model
98
-
99
- @property
100
- def dummy_image_encoder(self):
101
- torch.manual_seed(0)
102
- config = CLIPVisionConfig(
103
- hidden_size=self.text_embedder_hidden_size,
104
- image_size=224,
105
- projection_dim=self.text_embedder_hidden_size,
106
- intermediate_size=37,
107
- num_attention_heads=4,
108
- num_channels=3,
109
- num_hidden_layers=5,
110
- patch_size=14,
111
- )
112
-
113
- model = CLIPVisionModelWithProjection(config)
114
- return model
115
-
116
- @property
117
- def dummy_image_processor(self):
118
- image_processor = CLIPImageProcessor(
119
- crop_size=224,
120
- do_center_crop=True,
121
- do_normalize=True,
122
- do_resize=True,
123
- image_mean=[0.48145466, 0.4578275, 0.40821073],
124
- image_std=[0.26862954, 0.26130258, 0.27577711],
125
- resample=3,
126
- size=224,
127
- )
128
-
129
- return image_processor
130
-
131
- def get_dummy_components(self):
132
- prior = self.dummy_prior
133
- image_encoder = self.dummy_image_encoder
134
- text_encoder = self.dummy_text_encoder
135
- tokenizer = self.dummy_tokenizer
136
- image_processor = self.dummy_image_processor
137
-
138
- scheduler = UnCLIPScheduler(
139
- variance_type="fixed_small_log",
140
- prediction_type="sample",
141
- num_train_timesteps=1000,
142
- clip_sample=True,
143
- clip_sample_range=10.0,
144
- )
145
-
146
- components = {
147
- "prior": prior,
148
- "image_encoder": image_encoder,
149
- "text_encoder": text_encoder,
150
- "tokenizer": tokenizer,
151
- "scheduler": scheduler,
152
- "image_processor": image_processor,
153
- }
154
-
155
- return components
156
-
157
- def get_dummy_inputs(self, device, seed=0):
158
- if str(device).startswith("mps"):
159
- generator = torch.manual_seed(seed)
160
- else:
161
- generator = torch.Generator(device=device).manual_seed(seed)
162
- inputs = {
163
- "prompt": "horse",
164
- "generator": generator,
165
- "guidance_scale": 4.0,
166
- "num_inference_steps": 2,
167
- "output_type": "np",
168
- }
169
- return inputs
170
-
171
-
172
- class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
173
- pipeline_class = KandinskyPriorPipeline
174
- params = ["prompt"]
175
- batch_params = ["prompt", "negative_prompt"]
176
- required_optional_params = [
177
- "num_images_per_prompt",
178
- "generator",
179
- "num_inference_steps",
180
- "latents",
181
- "negative_prompt",
182
- "guidance_scale",
183
- "output_type",
184
- "return_dict",
185
- ]
186
- test_xformers_attention = False
187
-
188
- def get_dummy_components(self):
189
- dummy = Dummies()
190
- return dummy.get_dummy_components()
191
-
192
- def get_dummy_inputs(self, device, seed=0):
193
- dummy = Dummies()
194
- return dummy.get_dummy_inputs(device=device, seed=seed)
195
-
196
- def test_kandinsky_prior(self):
197
- device = "cpu"
198
-
199
- components = self.get_dummy_components()
200
-
201
- pipe = self.pipeline_class(**components)
202
- pipe = pipe.to(device)
203
-
204
- pipe.set_progress_bar_config(disable=None)
205
-
206
- output = pipe(**self.get_dummy_inputs(device))
207
- image = output.image_embeds
208
-
209
- image_from_tuple = pipe(
210
- **self.get_dummy_inputs(device),
211
- return_dict=False,
212
- )[0]
213
-
214
- image_slice = image[0, -10:]
215
- image_from_tuple_slice = image_from_tuple[0, -10:]
216
-
217
- assert image.shape == (1, 32)
218
-
219
- expected_slice = np.array(
220
- [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156]
221
- )
222
-
223
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
224
- assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
225
-
226
- @skip_mps
227
- def test_inference_batch_single_identical(self):
228
- test_max_difference = torch_device == "cpu"
229
- relax_max_difference = True
230
- test_mean_pixel_difference = False
231
-
232
- self._test_inference_batch_single_identical(
233
- test_max_difference=test_max_difference,
234
- relax_max_difference=relax_max_difference,
235
- test_mean_pixel_difference=test_mean_pixel_difference,
236
- )
237
-
238
- @skip_mps
239
- def test_attention_slicing_forward_pass(self):
240
- test_max_difference = torch_device == "cpu"
241
- test_mean_pixel_difference = False
242
-
243
- self._test_attention_slicing_forward_pass(
244
- test_max_difference=test_max_difference,
245
- test_mean_pixel_difference=test_mean_pixel_difference,
246
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py DELETED
@@ -1,285 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import random
18
- import unittest
19
-
20
- import numpy as np
21
- import torch
22
- from PIL import Image
23
-
24
- from diffusers import (
25
- DDIMScheduler,
26
- KandinskyV22Img2ImgPipeline,
27
- KandinskyV22PriorPipeline,
28
- UNet2DConditionModel,
29
- VQModel,
30
- )
31
- from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
32
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
33
-
34
- from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
35
-
36
-
37
- enable_full_determinism()
38
-
39
-
40
- class Dummies:
41
- @property
42
- def text_embedder_hidden_size(self):
43
- return 32
44
-
45
- @property
46
- def time_input_dim(self):
47
- return 32
48
-
49
- @property
50
- def block_out_channels_0(self):
51
- return self.time_input_dim
52
-
53
- @property
54
- def time_embed_dim(self):
55
- return self.time_input_dim * 4
56
-
57
- @property
58
- def cross_attention_dim(self):
59
- return 32
60
-
61
- @property
62
- def dummy_unet(self):
63
- torch.manual_seed(0)
64
-
65
- model_kwargs = {
66
- "in_channels": 4,
67
- # Out channels is double in channels because predicts mean and variance
68
- "out_channels": 8,
69
- "addition_embed_type": "image",
70
- "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
71
- "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
72
- "mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
73
- "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
74
- "layers_per_block": 1,
75
- "encoder_hid_dim": self.text_embedder_hidden_size,
76
- "encoder_hid_dim_type": "image_proj",
77
- "cross_attention_dim": self.cross_attention_dim,
78
- "attention_head_dim": 4,
79
- "resnet_time_scale_shift": "scale_shift",
80
- "class_embed_type": None,
81
- }
82
-
83
- model = UNet2DConditionModel(**model_kwargs)
84
- return model
85
-
86
- @property
87
- def dummy_movq_kwargs(self):
88
- return {
89
- "block_out_channels": [32, 64],
90
- "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
91
- "in_channels": 3,
92
- "latent_channels": 4,
93
- "layers_per_block": 1,
94
- "norm_num_groups": 8,
95
- "norm_type": "spatial",
96
- "num_vq_embeddings": 12,
97
- "out_channels": 3,
98
- "up_block_types": [
99
- "AttnUpDecoderBlock2D",
100
- "UpDecoderBlock2D",
101
- ],
102
- "vq_embed_dim": 4,
103
- }
104
-
105
- @property
106
- def dummy_movq(self):
107
- torch.manual_seed(0)
108
- model = VQModel(**self.dummy_movq_kwargs)
109
- return model
110
-
111
- def get_dummy_components(self):
112
- unet = self.dummy_unet
113
- movq = self.dummy_movq
114
-
115
- ddim_config = {
116
- "num_train_timesteps": 1000,
117
- "beta_schedule": "linear",
118
- "beta_start": 0.00085,
119
- "beta_end": 0.012,
120
- "clip_sample": False,
121
- "set_alpha_to_one": False,
122
- "steps_offset": 0,
123
- "prediction_type": "epsilon",
124
- "thresholding": False,
125
- }
126
-
127
- scheduler = DDIMScheduler(**ddim_config)
128
-
129
- components = {
130
- "unet": unet,
131
- "scheduler": scheduler,
132
- "movq": movq,
133
- }
134
-
135
- return components
136
-
137
- def get_dummy_inputs(self, device, seed=0):
138
- image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
139
- negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
140
- device
141
- )
142
- # create init_image
143
- image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
144
- image = image.cpu().permute(0, 2, 3, 1)[0]
145
- init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
146
-
147
- if str(device).startswith("mps"):
148
- generator = torch.manual_seed(seed)
149
- else:
150
- generator = torch.Generator(device=device).manual_seed(seed)
151
- inputs = {
152
- "image": init_image,
153
- "image_embeds": image_embeds,
154
- "negative_image_embeds": negative_image_embeds,
155
- "generator": generator,
156
- "height": 64,
157
- "width": 64,
158
- "num_inference_steps": 10,
159
- "guidance_scale": 7.0,
160
- "strength": 0.2,
161
- "output_type": "np",
162
- }
163
- return inputs
164
-
165
-
166
- class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
167
- pipeline_class = KandinskyV22Img2ImgPipeline
168
- params = ["image_embeds", "negative_image_embeds", "image"]
169
- batch_params = [
170
- "image_embeds",
171
- "negative_image_embeds",
172
- "image",
173
- ]
174
- required_optional_params = [
175
- "generator",
176
- "height",
177
- "width",
178
- "strength",
179
- "guidance_scale",
180
- "num_inference_steps",
181
- "return_dict",
182
- "guidance_scale",
183
- "num_images_per_prompt",
184
- "output_type",
185
- "return_dict",
186
- ]
187
- test_xformers_attention = False
188
-
189
- def get_dummy_components(self):
190
- dummies = Dummies()
191
- return dummies.get_dummy_components()
192
-
193
- def get_dummy_inputs(self, device, seed=0):
194
- dummies = Dummies()
195
- return dummies.get_dummy_inputs(device=device, seed=seed)
196
-
197
- def test_kandinsky_img2img(self):
198
- device = "cpu"
199
-
200
- components = self.get_dummy_components()
201
-
202
- pipe = self.pipeline_class(**components)
203
- pipe = pipe.to(device)
204
-
205
- pipe.set_progress_bar_config(disable=None)
206
-
207
- output = pipe(**self.get_dummy_inputs(device))
208
- image = output.images
209
-
210
- image_from_tuple = pipe(
211
- **self.get_dummy_inputs(device),
212
- return_dict=False,
213
- )[0]
214
-
215
- image_slice = image[0, -3:, -3:, -1]
216
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
217
-
218
- assert image.shape == (1, 64, 64, 3)
219
-
220
- expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016])
221
- assert (
222
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
223
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
224
- assert (
225
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
226
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
227
-
228
-
229
- @slow
230
- @require_torch_gpu
231
- class KandinskyV22Img2ImgPipelineIntegrationTests(unittest.TestCase):
232
- def tearDown(self):
233
- # clean up the VRAM after each test
234
- super().tearDown()
235
- gc.collect()
236
- torch.cuda.empty_cache()
237
-
238
- def test_kandinsky_img2img(self):
239
- expected_image = load_numpy(
240
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
241
- "/kandinskyv22/kandinskyv22_img2img_frog.npy"
242
- )
243
-
244
- init_image = load_image(
245
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
246
- )
247
- prompt = "A red cartoon frog, 4k"
248
-
249
- pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
250
- "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
251
- )
252
- pipe_prior.to(torch_device)
253
-
254
- pipeline = KandinskyV22Img2ImgPipeline.from_pretrained(
255
- "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
256
- )
257
- pipeline = pipeline.to(torch_device)
258
-
259
- pipeline.set_progress_bar_config(disable=None)
260
-
261
- generator = torch.Generator(device="cpu").manual_seed(0)
262
- image_emb, zero_image_emb = pipe_prior(
263
- prompt,
264
- generator=generator,
265
- num_inference_steps=5,
266
- negative_prompt="",
267
- ).to_tuple()
268
-
269
- output = pipeline(
270
- image=init_image,
271
- image_embeds=image_emb,
272
- negative_image_embeds=zero_image_emb,
273
- generator=generator,
274
- num_inference_steps=100,
275
- height=768,
276
- width=768,
277
- strength=0.2,
278
- output_type="np",
279
- )
280
-
281
- image = output.images[0]
282
-
283
- assert image.shape == (768, 768, 3)
284
-
285
- assert_mean_pixel_difference(image, expected_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py DELETED
@@ -1,256 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import unittest
18
-
19
- import numpy as np
20
- import torch
21
- from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
22
-
23
- from diffusers import (
24
- AutoencoderKL,
25
- DDIMScheduler,
26
- EulerAncestralDiscreteScheduler,
27
- PNDMScheduler,
28
- StableDiffusionModelEditingPipeline,
29
- UNet2DConditionModel,
30
- )
31
- from diffusers.utils import slow, torch_device
32
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
33
-
34
- from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
35
- from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
36
-
37
-
38
- enable_full_determinism()
39
-
40
-
41
- @skip_mps
42
- class StableDiffusionModelEditingPipelineFastTests(
43
- PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
44
- ):
45
- pipeline_class = StableDiffusionModelEditingPipeline
46
- params = TEXT_TO_IMAGE_PARAMS
47
- batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
48
- image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
49
- image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
50
-
51
- def get_dummy_components(self):
52
- torch.manual_seed(0)
53
- unet = UNet2DConditionModel(
54
- block_out_channels=(32, 64),
55
- layers_per_block=2,
56
- sample_size=32,
57
- in_channels=4,
58
- out_channels=4,
59
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
60
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
61
- cross_attention_dim=32,
62
- )
63
- scheduler = DDIMScheduler()
64
- torch.manual_seed(0)
65
- vae = AutoencoderKL(
66
- block_out_channels=[32, 64],
67
- in_channels=3,
68
- out_channels=3,
69
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
70
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
71
- latent_channels=4,
72
- )
73
- torch.manual_seed(0)
74
- text_encoder_config = CLIPTextConfig(
75
- bos_token_id=0,
76
- eos_token_id=2,
77
- hidden_size=32,
78
- intermediate_size=37,
79
- layer_norm_eps=1e-05,
80
- num_attention_heads=4,
81
- num_hidden_layers=5,
82
- pad_token_id=1,
83
- vocab_size=1000,
84
- )
85
- text_encoder = CLIPTextModel(text_encoder_config)
86
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
87
-
88
- components = {
89
- "unet": unet,
90
- "scheduler": scheduler,
91
- "vae": vae,
92
- "text_encoder": text_encoder,
93
- "tokenizer": tokenizer,
94
- "safety_checker": None,
95
- "feature_extractor": None,
96
- }
97
- return components
98
-
99
- def get_dummy_inputs(self, device, seed=0):
100
- generator = torch.manual_seed(seed)
101
- inputs = {
102
- "prompt": "A field of roses",
103
- "generator": generator,
104
- # Setting height and width to None to prevent OOMs on CPU.
105
- "height": None,
106
- "width": None,
107
- "num_inference_steps": 2,
108
- "guidance_scale": 6.0,
109
- "output_type": "numpy",
110
- }
111
- return inputs
112
-
113
- def test_stable_diffusion_model_editing_default_case(self):
114
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
115
- components = self.get_dummy_components()
116
- sd_pipe = StableDiffusionModelEditingPipeline(**components)
117
- sd_pipe = sd_pipe.to(device)
118
- sd_pipe.set_progress_bar_config(disable=None)
119
-
120
- inputs = self.get_dummy_inputs(device)
121
- image = sd_pipe(**inputs).images
122
- image_slice = image[0, -3:, -3:, -1]
123
- assert image.shape == (1, 64, 64, 3)
124
-
125
- expected_slice = np.array([0.4755, 0.5132, 0.4976, 0.3904, 0.3554, 0.4765, 0.5139, 0.5158, 0.4889])
126
-
127
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
128
-
129
- def test_stable_diffusion_model_editing_negative_prompt(self):
130
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
131
- components = self.get_dummy_components()
132
- sd_pipe = StableDiffusionModelEditingPipeline(**components)
133
- sd_pipe = sd_pipe.to(device)
134
- sd_pipe.set_progress_bar_config(disable=None)
135
-
136
- inputs = self.get_dummy_inputs(device)
137
- negative_prompt = "french fries"
138
- output = sd_pipe(**inputs, negative_prompt=negative_prompt)
139
- image = output.images
140
- image_slice = image[0, -3:, -3:, -1]
141
-
142
- assert image.shape == (1, 64, 64, 3)
143
-
144
- expected_slice = np.array([0.4992, 0.5101, 0.5004, 0.3949, 0.3604, 0.4735, 0.5216, 0.5204, 0.4913])
145
-
146
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
147
-
148
- def test_stable_diffusion_model_editing_euler(self):
149
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
150
- components = self.get_dummy_components()
151
- components["scheduler"] = EulerAncestralDiscreteScheduler(
152
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear"
153
- )
154
- sd_pipe = StableDiffusionModelEditingPipeline(**components)
155
- sd_pipe = sd_pipe.to(device)
156
- sd_pipe.set_progress_bar_config(disable=None)
157
-
158
- inputs = self.get_dummy_inputs(device)
159
- image = sd_pipe(**inputs).images
160
- image_slice = image[0, -3:, -3:, -1]
161
-
162
- assert image.shape == (1, 64, 64, 3)
163
-
164
- expected_slice = np.array([0.4747, 0.5372, 0.4779, 0.4982, 0.5543, 0.4816, 0.5238, 0.4904, 0.5027])
165
-
166
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
167
-
168
- def test_stable_diffusion_model_editing_pndm(self):
169
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
170
- components = self.get_dummy_components()
171
- components["scheduler"] = PNDMScheduler()
172
- sd_pipe = StableDiffusionModelEditingPipeline(**components)
173
- sd_pipe = sd_pipe.to(device)
174
- sd_pipe.set_progress_bar_config(disable=None)
175
-
176
- inputs = self.get_dummy_inputs(device)
177
- # the pipeline does not expect pndm so test if it raises error.
178
- with self.assertRaises(ValueError):
179
- _ = sd_pipe(**inputs).images
180
-
181
- def test_inference_batch_single_identical(self):
182
- super().test_inference_batch_single_identical(expected_max_diff=5e-3)
183
-
184
- def test_attention_slicing_forward_pass(self):
185
- super().test_attention_slicing_forward_pass(expected_max_diff=5e-3)
186
-
187
-
188
- @slow
189
- @require_torch_gpu
190
- class StableDiffusionModelEditingSlowTests(unittest.TestCase):
191
- def tearDown(self):
192
- super().tearDown()
193
- gc.collect()
194
- torch.cuda.empty_cache()
195
-
196
- def get_inputs(self, seed=0):
197
- generator = torch.manual_seed(seed)
198
- inputs = {
199
- "prompt": "A field of roses",
200
- "generator": generator,
201
- "num_inference_steps": 3,
202
- "guidance_scale": 7.5,
203
- "output_type": "numpy",
204
- }
205
- return inputs
206
-
207
- def test_stable_diffusion_model_editing_default(self):
208
- model_ckpt = "CompVis/stable-diffusion-v1-4"
209
- pipe = StableDiffusionModelEditingPipeline.from_pretrained(model_ckpt, safety_checker=None)
210
- pipe.to(torch_device)
211
- pipe.set_progress_bar_config(disable=None)
212
- pipe.enable_attention_slicing()
213
-
214
- inputs = self.get_inputs()
215
- image = pipe(**inputs).images
216
- image_slice = image[0, -3:, -3:, -1].flatten()
217
-
218
- assert image.shape == (1, 512, 512, 3)
219
-
220
- expected_slice = np.array(
221
- [0.6749496, 0.6386453, 0.51443267, 0.66094905, 0.61921215, 0.5491332, 0.5744417, 0.58075106, 0.5174658]
222
- )
223
-
224
- assert np.abs(expected_slice - image_slice).max() < 1e-2
225
-
226
- # make sure image changes after editing
227
- pipe.edit_model("A pack of roses", "A pack of blue roses")
228
-
229
- image = pipe(**inputs).images
230
- image_slice = image[0, -3:, -3:, -1].flatten()
231
-
232
- assert image.shape == (1, 512, 512, 3)
233
-
234
- assert np.abs(expected_slice - image_slice).max() > 1e-1
235
-
236
- def test_stable_diffusion_model_editing_pipeline_with_sequential_cpu_offloading(self):
237
- torch.cuda.empty_cache()
238
- torch.cuda.reset_max_memory_allocated()
239
- torch.cuda.reset_peak_memory_stats()
240
-
241
- model_ckpt = "CompVis/stable-diffusion-v1-4"
242
- scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
243
- pipe = StableDiffusionModelEditingPipeline.from_pretrained(
244
- model_ckpt, scheduler=scheduler, safety_checker=None
245
- )
246
- pipe = pipe.to(torch_device)
247
- pipe.set_progress_bar_config(disable=None)
248
- pipe.enable_attention_slicing(1)
249
- pipe.enable_sequential_cpu_offload()
250
-
251
- inputs = self.get_inputs()
252
- _ = pipe(**inputs)
253
-
254
- mem_bytes = torch.cuda.max_memory_allocated()
255
- # make sure that less than 4.4 GB is allocated
256
- assert mem_bytes < 4.4 * 10**9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py DELETED
@@ -1,409 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import unittest
18
-
19
- import numpy as np
20
- import torch
21
- from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
22
-
23
- from diffusers import (
24
- AutoencoderKL,
25
- DDIMScheduler,
26
- EulerAncestralDiscreteScheduler,
27
- LMSDiscreteScheduler,
28
- PNDMScheduler,
29
- StableDiffusionPanoramaPipeline,
30
- UNet2DConditionModel,
31
- )
32
- from diffusers.utils import slow, torch_device
33
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
34
-
35
- from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
36
- from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
37
-
38
-
39
- enable_full_determinism()
40
-
41
-
42
- @skip_mps
43
- class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
44
- pipeline_class = StableDiffusionPanoramaPipeline
45
- params = TEXT_TO_IMAGE_PARAMS
46
- batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
47
- image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
48
- image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
49
-
50
- def get_dummy_components(self):
51
- torch.manual_seed(0)
52
- unet = UNet2DConditionModel(
53
- block_out_channels=(32, 64),
54
- layers_per_block=1,
55
- sample_size=32,
56
- in_channels=4,
57
- out_channels=4,
58
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
59
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
60
- cross_attention_dim=32,
61
- )
62
- scheduler = DDIMScheduler()
63
- torch.manual_seed(0)
64
- vae = AutoencoderKL(
65
- block_out_channels=[32, 64],
66
- in_channels=3,
67
- out_channels=3,
68
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
69
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
70
- latent_channels=4,
71
- )
72
- torch.manual_seed(0)
73
- text_encoder_config = CLIPTextConfig(
74
- bos_token_id=0,
75
- eos_token_id=2,
76
- hidden_size=32,
77
- intermediate_size=37,
78
- layer_norm_eps=1e-05,
79
- num_attention_heads=4,
80
- num_hidden_layers=5,
81
- pad_token_id=1,
82
- vocab_size=1000,
83
- )
84
- text_encoder = CLIPTextModel(text_encoder_config)
85
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
86
-
87
- components = {
88
- "unet": unet,
89
- "scheduler": scheduler,
90
- "vae": vae,
91
- "text_encoder": text_encoder,
92
- "tokenizer": tokenizer,
93
- "safety_checker": None,
94
- "feature_extractor": None,
95
- }
96
- return components
97
-
98
- def get_dummy_inputs(self, device, seed=0):
99
- generator = torch.manual_seed(seed)
100
- inputs = {
101
- "prompt": "a photo of the dolomites",
102
- "generator": generator,
103
- # Setting height and width to None to prevent OOMs on CPU.
104
- "height": None,
105
- "width": None,
106
- "num_inference_steps": 1,
107
- "guidance_scale": 6.0,
108
- "output_type": "numpy",
109
- }
110
- return inputs
111
-
112
- def test_stable_diffusion_panorama_default_case(self):
113
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
114
- components = self.get_dummy_components()
115
- sd_pipe = StableDiffusionPanoramaPipeline(**components)
116
- sd_pipe = sd_pipe.to(device)
117
- sd_pipe.set_progress_bar_config(disable=None)
118
-
119
- inputs = self.get_dummy_inputs(device)
120
- image = sd_pipe(**inputs).images
121
- image_slice = image[0, -3:, -3:, -1]
122
- assert image.shape == (1, 64, 64, 3)
123
-
124
- expected_slice = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757])
125
-
126
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
127
-
128
- def test_stable_diffusion_panorama_circular_padding_case(self):
129
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
130
- components = self.get_dummy_components()
131
- sd_pipe = StableDiffusionPanoramaPipeline(**components)
132
- sd_pipe = sd_pipe.to(device)
133
- sd_pipe.set_progress_bar_config(disable=None)
134
-
135
- inputs = self.get_dummy_inputs(device)
136
- image = sd_pipe(**inputs, circular_padding=True).images
137
- image_slice = image[0, -3:, -3:, -1]
138
- assert image.shape == (1, 64, 64, 3)
139
-
140
- expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031])
141
-
142
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
143
-
144
- # override to speed the overall test timing up.
145
- def test_inference_batch_consistent(self):
146
- super().test_inference_batch_consistent(batch_sizes=[1, 2])
147
-
148
- # override to speed the overall test timing up.
149
- def test_inference_batch_single_identical(self):
150
- super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3.25e-3)
151
-
152
- def test_stable_diffusion_panorama_negative_prompt(self):
153
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
154
- components = self.get_dummy_components()
155
- sd_pipe = StableDiffusionPanoramaPipeline(**components)
156
- sd_pipe = sd_pipe.to(device)
157
- sd_pipe.set_progress_bar_config(disable=None)
158
-
159
- inputs = self.get_dummy_inputs(device)
160
- negative_prompt = "french fries"
161
- output = sd_pipe(**inputs, negative_prompt=negative_prompt)
162
- image = output.images
163
- image_slice = image[0, -3:, -3:, -1]
164
-
165
- assert image.shape == (1, 64, 64, 3)
166
-
167
- expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
168
-
169
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
170
-
171
- def test_stable_diffusion_panorama_views_batch(self):
172
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
173
- components = self.get_dummy_components()
174
- sd_pipe = StableDiffusionPanoramaPipeline(**components)
175
- sd_pipe = sd_pipe.to(device)
176
- sd_pipe.set_progress_bar_config(disable=None)
177
-
178
- inputs = self.get_dummy_inputs(device)
179
- output = sd_pipe(**inputs, view_batch_size=2)
180
- image = output.images
181
- image_slice = image[0, -3:, -3:, -1]
182
-
183
- assert image.shape == (1, 64, 64, 3)
184
-
185
- expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
186
-
187
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
188
-
189
- def test_stable_diffusion_panorama_views_batch_circular_padding(self):
190
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
191
- components = self.get_dummy_components()
192
- sd_pipe = StableDiffusionPanoramaPipeline(**components)
193
- sd_pipe = sd_pipe.to(device)
194
- sd_pipe.set_progress_bar_config(disable=None)
195
-
196
- inputs = self.get_dummy_inputs(device)
197
- output = sd_pipe(**inputs, circular_padding=True, view_batch_size=2)
198
- image = output.images
199
- image_slice = image[0, -3:, -3:, -1]
200
-
201
- assert image.shape == (1, 64, 64, 3)
202
-
203
- expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031])
204
-
205
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
206
-
207
- def test_stable_diffusion_panorama_euler(self):
208
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
209
- components = self.get_dummy_components()
210
- components["scheduler"] = EulerAncestralDiscreteScheduler(
211
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear"
212
- )
213
- sd_pipe = StableDiffusionPanoramaPipeline(**components)
214
- sd_pipe = sd_pipe.to(device)
215
- sd_pipe.set_progress_bar_config(disable=None)
216
-
217
- inputs = self.get_dummy_inputs(device)
218
- image = sd_pipe(**inputs).images
219
- image_slice = image[0, -3:, -3:, -1]
220
-
221
- assert image.shape == (1, 64, 64, 3)
222
-
223
- expected_slice = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952])
224
-
225
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
226
-
227
- def test_stable_diffusion_panorama_pndm(self):
228
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
229
- components = self.get_dummy_components()
230
- components["scheduler"] = PNDMScheduler(
231
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True
232
- )
233
- sd_pipe = StableDiffusionPanoramaPipeline(**components)
234
- sd_pipe = sd_pipe.to(device)
235
- sd_pipe.set_progress_bar_config(disable=None)
236
-
237
- inputs = self.get_dummy_inputs(device)
238
- image = sd_pipe(**inputs).images
239
- image_slice = image[0, -3:, -3:, -1]
240
-
241
- assert image.shape == (1, 64, 64, 3)
242
-
243
- expected_slice = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539])
244
-
245
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
246
-
247
-
248
- @slow
249
- @require_torch_gpu
250
- class StableDiffusionPanoramaSlowTests(unittest.TestCase):
251
- def tearDown(self):
252
- super().tearDown()
253
- gc.collect()
254
- torch.cuda.empty_cache()
255
-
256
- def get_inputs(self, seed=0):
257
- generator = torch.manual_seed(seed)
258
- inputs = {
259
- "prompt": "a photo of the dolomites",
260
- "generator": generator,
261
- "num_inference_steps": 3,
262
- "guidance_scale": 7.5,
263
- "output_type": "numpy",
264
- }
265
- return inputs
266
-
267
- def test_stable_diffusion_panorama_default(self):
268
- model_ckpt = "stabilityai/stable-diffusion-2-base"
269
- scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
270
- pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
271
- pipe.to(torch_device)
272
- pipe.set_progress_bar_config(disable=None)
273
- pipe.enable_attention_slicing()
274
-
275
- inputs = self.get_inputs()
276
- image = pipe(**inputs).images
277
- image_slice = image[0, -3:, -3:, -1].flatten()
278
-
279
- assert image.shape == (1, 512, 2048, 3)
280
-
281
- expected_slice = np.array(
282
- [
283
- 0.36968392,
284
- 0.27025372,
285
- 0.32446766,
286
- 0.28379387,
287
- 0.36363274,
288
- 0.30733347,
289
- 0.27100027,
290
- 0.27054125,
291
- 0.25536096,
292
- ]
293
- )
294
-
295
- assert np.abs(expected_slice - image_slice).max() < 1e-2
296
-
297
- def test_stable_diffusion_panorama_k_lms(self):
298
- pipe = StableDiffusionPanoramaPipeline.from_pretrained(
299
- "stabilityai/stable-diffusion-2-base", safety_checker=None
300
- )
301
- pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
302
- pipe.to(torch_device)
303
- pipe.set_progress_bar_config(disable=None)
304
- pipe.enable_attention_slicing()
305
-
306
- inputs = self.get_inputs()
307
- image = pipe(**inputs).images
308
- image_slice = image[0, -3:, -3:, -1].flatten()
309
-
310
- assert image.shape == (1, 512, 2048, 3)
311
-
312
- expected_slice = np.array(
313
- [
314
- [
315
- 0.0,
316
- 0.0,
317
- 0.0,
318
- 0.0,
319
- 0.0,
320
- 0.0,
321
- 0.0,
322
- 0.0,
323
- 0.0,
324
- ]
325
- ]
326
- )
327
-
328
- assert np.abs(expected_slice - image_slice).max() < 1e-3
329
-
330
- def test_stable_diffusion_panorama_intermediate_state(self):
331
- number_of_steps = 0
332
-
333
- def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
334
- callback_fn.has_been_called = True
335
- nonlocal number_of_steps
336
- number_of_steps += 1
337
- if step == 1:
338
- latents = latents.detach().cpu().numpy()
339
- assert latents.shape == (1, 4, 64, 256)
340
- latents_slice = latents[0, -3:, -3:, -1]
341
-
342
- expected_slice = np.array(
343
- [
344
- 0.18681869,
345
- 0.33907816,
346
- 0.5361276,
347
- 0.14432865,
348
- -0.02856611,
349
- -0.73941123,
350
- 0.23397987,
351
- 0.47322682,
352
- -0.37823164,
353
- ]
354
- )
355
- assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
356
- elif step == 2:
357
- latents = latents.detach().cpu().numpy()
358
- assert latents.shape == (1, 4, 64, 256)
359
- latents_slice = latents[0, -3:, -3:, -1]
360
-
361
- expected_slice = np.array(
362
- [
363
- 0.18539645,
364
- 0.33987248,
365
- 0.5378559,
366
- 0.14437142,
367
- -0.02455261,
368
- -0.7338317,
369
- 0.23990755,
370
- 0.47356272,
371
- -0.3786505,
372
- ]
373
- )
374
-
375
- assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
376
-
377
- callback_fn.has_been_called = False
378
-
379
- model_ckpt = "stabilityai/stable-diffusion-2-base"
380
- scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
381
- pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
382
- pipe = pipe.to(torch_device)
383
- pipe.set_progress_bar_config(disable=None)
384
- pipe.enable_attention_slicing()
385
-
386
- inputs = self.get_inputs()
387
- pipe(**inputs, callback=callback_fn, callback_steps=1)
388
- assert callback_fn.has_been_called
389
- assert number_of_steps == 3
390
-
391
- def test_stable_diffusion_panorama_pipeline_with_sequential_cpu_offloading(self):
392
- torch.cuda.empty_cache()
393
- torch.cuda.reset_max_memory_allocated()
394
- torch.cuda.reset_peak_memory_stats()
395
-
396
- model_ckpt = "stabilityai/stable-diffusion-2-base"
397
- scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
398
- pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
399
- pipe = pipe.to(torch_device)
400
- pipe.set_progress_bar_config(disable=None)
401
- pipe.enable_attention_slicing(1)
402
- pipe.enable_sequential_cpu_offload()
403
-
404
- inputs = self.get_inputs()
405
- _ = pipe(**inputs)
406
-
407
- mem_bytes = torch.cuda.max_memory_allocated()
408
- # make sure that less than 5.2 GB is allocated
409
- assert mem_bytes < 5.5 * 10**9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipeline_utils.py DELETED
@@ -1,134 +0,0 @@
1
- import unittest
2
-
3
- from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
4
-
5
-
6
- class IsSafetensorsCompatibleTests(unittest.TestCase):
7
- def test_all_is_compatible(self):
8
- filenames = [
9
- "safety_checker/pytorch_model.bin",
10
- "safety_checker/model.safetensors",
11
- "vae/diffusion_pytorch_model.bin",
12
- "vae/diffusion_pytorch_model.safetensors",
13
- "text_encoder/pytorch_model.bin",
14
- "text_encoder/model.safetensors",
15
- "unet/diffusion_pytorch_model.bin",
16
- "unet/diffusion_pytorch_model.safetensors",
17
- ]
18
- self.assertTrue(is_safetensors_compatible(filenames))
19
-
20
- def test_diffusers_model_is_compatible(self):
21
- filenames = [
22
- "unet/diffusion_pytorch_model.bin",
23
- "unet/diffusion_pytorch_model.safetensors",
24
- ]
25
- self.assertTrue(is_safetensors_compatible(filenames))
26
-
27
- def test_diffusers_model_is_not_compatible(self):
28
- filenames = [
29
- "safety_checker/pytorch_model.bin",
30
- "safety_checker/model.safetensors",
31
- "vae/diffusion_pytorch_model.bin",
32
- "vae/diffusion_pytorch_model.safetensors",
33
- "text_encoder/pytorch_model.bin",
34
- "text_encoder/model.safetensors",
35
- "unet/diffusion_pytorch_model.bin",
36
- # Removed: 'unet/diffusion_pytorch_model.safetensors',
37
- ]
38
- self.assertFalse(is_safetensors_compatible(filenames))
39
-
40
- def test_transformer_model_is_compatible(self):
41
- filenames = [
42
- "text_encoder/pytorch_model.bin",
43
- "text_encoder/model.safetensors",
44
- ]
45
- self.assertTrue(is_safetensors_compatible(filenames))
46
-
47
- def test_transformer_model_is_not_compatible(self):
48
- filenames = [
49
- "safety_checker/pytorch_model.bin",
50
- "safety_checker/model.safetensors",
51
- "vae/diffusion_pytorch_model.bin",
52
- "vae/diffusion_pytorch_model.safetensors",
53
- "text_encoder/pytorch_model.bin",
54
- # Removed: 'text_encoder/model.safetensors',
55
- "unet/diffusion_pytorch_model.bin",
56
- "unet/diffusion_pytorch_model.safetensors",
57
- ]
58
- self.assertFalse(is_safetensors_compatible(filenames))
59
-
60
- def test_all_is_compatible_variant(self):
61
- filenames = [
62
- "safety_checker/pytorch_model.fp16.bin",
63
- "safety_checker/model.fp16.safetensors",
64
- "vae/diffusion_pytorch_model.fp16.bin",
65
- "vae/diffusion_pytorch_model.fp16.safetensors",
66
- "text_encoder/pytorch_model.fp16.bin",
67
- "text_encoder/model.fp16.safetensors",
68
- "unet/diffusion_pytorch_model.fp16.bin",
69
- "unet/diffusion_pytorch_model.fp16.safetensors",
70
- ]
71
- variant = "fp16"
72
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
73
-
74
- def test_diffusers_model_is_compatible_variant(self):
75
- filenames = [
76
- "unet/diffusion_pytorch_model.fp16.bin",
77
- "unet/diffusion_pytorch_model.fp16.safetensors",
78
- ]
79
- variant = "fp16"
80
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
81
-
82
- def test_diffusers_model_is_compatible_variant_partial(self):
83
- # pass variant but use the non-variant filenames
84
- filenames = [
85
- "unet/diffusion_pytorch_model.bin",
86
- "unet/diffusion_pytorch_model.safetensors",
87
- ]
88
- variant = "fp16"
89
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
90
-
91
- def test_diffusers_model_is_not_compatible_variant(self):
92
- filenames = [
93
- "safety_checker/pytorch_model.fp16.bin",
94
- "safety_checker/model.fp16.safetensors",
95
- "vae/diffusion_pytorch_model.fp16.bin",
96
- "vae/diffusion_pytorch_model.fp16.safetensors",
97
- "text_encoder/pytorch_model.fp16.bin",
98
- "text_encoder/model.fp16.safetensors",
99
- "unet/diffusion_pytorch_model.fp16.bin",
100
- # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
101
- ]
102
- variant = "fp16"
103
- self.assertFalse(is_safetensors_compatible(filenames, variant=variant))
104
-
105
- def test_transformer_model_is_compatible_variant(self):
106
- filenames = [
107
- "text_encoder/pytorch_model.fp16.bin",
108
- "text_encoder/model.fp16.safetensors",
109
- ]
110
- variant = "fp16"
111
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
112
-
113
- def test_transformer_model_is_compatible_variant_partial(self):
114
- # pass variant but use the non-variant filenames
115
- filenames = [
116
- "text_encoder/pytorch_model.bin",
117
- "text_encoder/model.safetensors",
118
- ]
119
- variant = "fp16"
120
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
121
-
122
- def test_transformer_model_is_not_compatible_variant(self):
123
- filenames = [
124
- "safety_checker/pytorch_model.fp16.bin",
125
- "safety_checker/model.fp16.safetensors",
126
- "vae/diffusion_pytorch_model.fp16.bin",
127
- "vae/diffusion_pytorch_model.fp16.safetensors",
128
- "text_encoder/pytorch_model.fp16.bin",
129
- # 'text_encoder/model.fp16.safetensors',
130
- "unet/diffusion_pytorch_model.fp16.bin",
131
- "unet/diffusion_pytorch_model.fp16.safetensors",
132
- ]
133
- variant = "fp16"
134
- self.assertFalse(is_safetensors_compatible(filenames, variant=variant))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py DELETED
@@ -1,37 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/faster_rcnn_r50_fpn.py',
3
- '../_base_/datasets/coco_detection.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
-
7
- model = dict(
8
- type='FasterRCNN',
9
- pretrained='torchvision://resnet50',
10
- rpn_head=dict(
11
- type='RPNHead',
12
- anchor_generator=dict(
13
- type='LegacyAnchorGenerator',
14
- center_offset=0.5,
15
- scales=[8],
16
- ratios=[0.5, 1.0, 2.0],
17
- strides=[4, 8, 16, 32, 64]),
18
- bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
19
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
20
- roi_head=dict(
21
- type='StandardRoIHead',
22
- bbox_roi_extractor=dict(
23
- type='SingleRoIExtractor',
24
- roi_layer=dict(
25
- type='RoIAlign',
26
- output_size=7,
27
- sampling_ratio=2,
28
- aligned=False),
29
- out_channels=256,
30
- featmap_strides=[4, 8, 16, 32]),
31
- bbox_head=dict(
32
- bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
33
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
34
- # model training and testing settings
35
- train_cfg=dict(
36
- rpn_proposal=dict(max_per_img=2000),
37
- rcnn=dict(assigner=dict(match_low_quality=True))))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py DELETED
@@ -1,30 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py'
2
-
3
- model = dict(
4
- roi_head=dict(
5
- type='PISARoIHead',
6
- bbox_head=dict(
7
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
8
- train_cfg=dict(
9
- rpn_proposal=dict(
10
- nms_pre=2000,
11
- max_per_img=2000,
12
- nms=dict(type='nms', iou_threshold=0.7),
13
- min_bbox_size=0),
14
- rcnn=dict(
15
- sampler=dict(
16
- type='ScoreHLRSampler',
17
- num=512,
18
- pos_fraction=0.25,
19
- neg_pos_ub=-1,
20
- add_gt_as_proposals=True,
21
- k=0.5,
22
- bias=0.),
23
- isr=dict(k=2, bias=0),
24
- carl=dict(k=1, bias=0.2))),
25
- test_cfg=dict(
26
- rpn=dict(
27
- nms_pre=2000,
28
- max_per_img=2000,
29
- nms=dict(type='nms', iou_threshold=0.7),
30
- min_bbox_size=0)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/psanet_r50-d8.py DELETED
@@ -1,49 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='PSAHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- mask_size=(97, 97),
23
- psa_type='bi-direction',
24
- compact=False,
25
- shrink_factor=2,
26
- normalization_factor=1.0,
27
- psa_softmax=True,
28
- dropout_ratio=0.1,
29
- num_classes=19,
30
- norm_cfg=norm_cfg,
31
- align_corners=False,
32
- loss_decode=dict(
33
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
34
- auxiliary_head=dict(
35
- type='FCNHead',
36
- in_channels=1024,
37
- in_index=2,
38
- channels=256,
39
- num_convs=1,
40
- concat_input=False,
41
- dropout_ratio=0.1,
42
- num_classes=19,
43
- norm_cfg=norm_cfg,
44
- align_corners=False,
45
- loss_decode=dict(
46
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
47
- # model training and testing settings
48
- train_cfg=dict(),
49
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/ROOPOK/roop/__init__.py DELETED
File without changes
spaces/Apex-X/Tm/roop/processors/frame/face_enhancer.py DELETED
@@ -1,81 +0,0 @@
1
- from typing import Any, List, Callable
2
- import cv2
3
- import threading
4
- import gfpgan
5
-
6
- import roop.globals
7
- import roop.processors.frame.core
8
- from roop.core import update_status
9
- from roop.face_analyser import get_one_face
10
- from roop.typing import Frame, Face
11
- from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
12
-
13
- FACE_ENHANCER = None
14
- THREAD_SEMAPHORE = threading.Semaphore()
15
- THREAD_LOCK = threading.Lock()
16
- NAME = 'ROOP.FACE-ENHANCER'
17
-
18
-
19
- def get_face_enhancer() -> Any:
20
- global FACE_ENHANCER
21
-
22
- with THREAD_LOCK:
23
- if FACE_ENHANCER is None:
24
- model_path = resolve_relative_path('../models/GFPGANv1.4.pth')
25
- # todo: set models path https://github.com/TencentARC/GFPGAN/issues/399
26
- FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined]
27
- return FACE_ENHANCER
28
-
29
-
30
- def pre_check() -> bool:
31
- download_directory_path = resolve_relative_path('../models')
32
- conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.4.pth'])
33
- return True
34
-
35
-
36
- def pre_start() -> bool:
37
- if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
38
- update_status('Select an image or video for target path.', NAME)
39
- return False
40
- return True
41
-
42
-
43
- def post_process() -> None:
44
- global FACE_ENHANCER
45
-
46
- FACE_ENHANCER = None
47
-
48
-
49
- def enhance_face(temp_frame: Frame) -> Frame:
50
- with THREAD_SEMAPHORE:
51
- _, _, temp_frame = get_face_enhancer().enhance(
52
- temp_frame,
53
- paste_back=True
54
- )
55
- return temp_frame
56
-
57
-
58
- def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
59
- target_face = get_one_face(temp_frame)
60
- if target_face:
61
- temp_frame = enhance_face(temp_frame)
62
- return temp_frame
63
-
64
-
65
- def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
66
- for temp_frame_path in temp_frame_paths:
67
- temp_frame = cv2.imread(temp_frame_path)
68
- result = process_frame(None, temp_frame)
69
- cv2.imwrite(temp_frame_path, result)
70
- if update:
71
- update()
72
-
73
-
74
- def process_image(source_path: str, target_path: str, output_path: str) -> None:
75
- target_frame = cv2.imread(target_path)
76
- result = process_frame(None, target_frame)
77
- cv2.imwrite(output_path, result)
78
-
79
-
80
- def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
81
- roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/app_utils.py DELETED
@@ -1,131 +0,0 @@
1
- import os
2
- import requests
3
- import random
4
- import _thread as thread
5
- from uuid import uuid4
6
- import urllib
7
-
8
- import numpy as np
9
- import skimage
10
- from skimage.filters import gaussian
11
- from PIL import Image
12
-
13
- def compress_image(image, path_original):
14
- size = 1920, 1080
15
- width = 1920
16
- height = 1080
17
-
18
- name = os.path.basename(path_original).split('.')
19
- first_name = os.path.join(os.path.dirname(path_original), name[0] + '.jpg')
20
-
21
- if image.size[0] > width and image.size[1] > height:
22
- image.thumbnail(size, Image.ANTIALIAS)
23
- image.save(first_name, quality=85)
24
- elif image.size[0] > width:
25
- wpercent = (width/float(image.size[0]))
26
- height = int((float(image.size[1])*float(wpercent)))
27
- image = image.resize((width,height), Image.ANTIALIAS)
28
- image.save(first_name,quality=85)
29
- elif image.size[1] > height:
30
- wpercent = (height/float(image.size[1]))
31
- width = int((float(image.size[0])*float(wpercent)))
32
- image = image.resize((width,height), Image.ANTIALIAS)
33
- image.save(first_name, quality=85)
34
- else:
35
- image.save(first_name, quality=85)
36
-
37
-
38
- def convertToJPG(path_original):
39
- img = Image.open(path_original)
40
- name = os.path.basename(path_original).split('.')
41
- first_name = os.path.join(os.path.dirname(path_original), name[0] + '.jpg')
42
-
43
- if img.format == "JPEG":
44
- image = img.convert('RGB')
45
- compress_image(image, path_original)
46
- img.close()
47
-
48
- elif img.format == "GIF":
49
- i = img.convert("RGBA")
50
- bg = Image.new("RGBA", i.size)
51
- image = Image.composite(i, bg, i)
52
- compress_image(image, path_original)
53
- img.close()
54
-
55
- elif img.format == "PNG":
56
- try:
57
- image = Image.new("RGB", img.size, (255,255,255))
58
- image.paste(img,img)
59
- compress_image(image, path_original)
60
- except ValueError:
61
- image = img.convert('RGB')
62
- compress_image(image, path_original)
63
-
64
- img.close()
65
-
66
- elif img.format == "BMP":
67
- image = img.convert('RGB')
68
- compress_image(image, path_original)
69
- img.close()
70
-
71
-
72
-
73
- def blur(image, x0, x1, y0, y1, sigma=1, multichannel=True):
74
- y0, y1 = min(y0, y1), max(y0, y1)
75
- x0, x1 = min(x0, x1), max(x0, x1)
76
- im = image.copy()
77
- sub_im = im[y0:y1,x0:x1].copy()
78
- blur_sub_im = gaussian(sub_im, sigma=sigma, multichannel=multichannel)
79
- blur_sub_im = np.round(255 * blur_sub_im)
80
- im[y0:y1,x0:x1] = blur_sub_im
81
- return im
82
-
83
-
84
-
85
- def download(url, filename):
86
- data = requests.get(url).content
87
- with open(filename, 'wb') as handler:
88
- handler.write(data)
89
-
90
- return filename
91
-
92
-
93
- def generate_random_filename(upload_directory, extension):
94
- filename = str(uuid4())
95
- filename = os.path.join(upload_directory, filename + "." + extension)
96
- return filename
97
-
98
-
99
- def clean_me(filename):
100
- if os.path.exists(filename):
101
- os.remove(filename)
102
-
103
-
104
- def clean_all(files):
105
- for me in files:
106
- clean_me(me)
107
-
108
-
109
- def create_directory(path):
110
- os.makedirs(os.path.dirname(path), exist_ok=True)
111
-
112
-
113
- def get_model_bin(url, output_path):
114
- # print('Getting model dir: ', output_path)
115
- if not os.path.exists(output_path):
116
- create_directory(output_path)
117
-
118
- urllib.request.urlretrieve(url, output_path)
119
-
120
- # cmd = "wget -O %s %s" % (output_path, url)
121
- # print(cmd)
122
- # os.system(cmd)
123
-
124
- return output_path
125
-
126
-
127
- #model_list = [(url, output_path), (url, output_path)]
128
- def get_multi_model_bin(model_list):
129
- for m in model_list:
130
- thread.start_new_thread(get_model_bin, m)
131
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio.py DELETED
@@ -1,215 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Audio IO methods are defined in this module (info, read, write),
9
- We rely on av library for faster read when possible, otherwise on torchaudio.
10
- """
11
-
12
- from dataclasses import dataclass
13
- from pathlib import Path
14
- import logging
15
- import typing as tp
16
-
17
- import numpy as np
18
- import soundfile
19
- import torch
20
- from torch.nn import functional as F
21
- import torchaudio as ta
22
-
23
- import av
24
-
25
- from .audio_utils import f32_pcm, i16_pcm, normalize_audio
26
-
27
-
28
- _av_initialized = False
29
-
30
-
31
- def _init_av():
32
- global _av_initialized
33
- if _av_initialized:
34
- return
35
- logger = logging.getLogger('libav.mp3')
36
- logger.setLevel(logging.ERROR)
37
- _av_initialized = True
38
-
39
-
40
- @dataclass(frozen=True)
41
- class AudioFileInfo:
42
- sample_rate: int
43
- duration: float
44
- channels: int
45
-
46
-
47
- def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
48
- _init_av()
49
- with av.open(str(filepath)) as af:
50
- stream = af.streams.audio[0]
51
- sample_rate = stream.codec_context.sample_rate
52
- duration = float(stream.duration * stream.time_base)
53
- channels = stream.channels
54
- return AudioFileInfo(sample_rate, duration, channels)
55
-
56
-
57
- def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
58
- info = soundfile.info(filepath)
59
- return AudioFileInfo(info.samplerate, info.duration, info.channels)
60
-
61
-
62
- def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
63
- # torchaudio no longer returns useful duration informations for some formats like mp3s.
64
- filepath = Path(filepath)
65
- if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info
66
- # ffmpeg has some weird issue with flac.
67
- return _soundfile_info(filepath)
68
- else:
69
- return _av_info(filepath)
70
-
71
-
72
- def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]:
73
- """FFMPEG-based audio file reading using PyAV bindings.
74
- Soundfile cannot read mp3 and av_read is more efficient than torchaudio.
75
-
76
- Args:
77
- filepath (str or Path): Path to audio file to read.
78
- seek_time (float): Time at which to start reading in the file.
79
- duration (float): Duration to read from the file. If set to -1, the whole file is read.
80
- Returns:
81
- Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate
82
- """
83
- _init_av()
84
- with av.open(str(filepath)) as af:
85
- stream = af.streams.audio[0]
86
- sr = stream.codec_context.sample_rate
87
- num_frames = int(sr * duration) if duration >= 0 else -1
88
- frame_offset = int(sr * seek_time)
89
- # we need a small negative offset otherwise we get some edge artifact
90
- # from the mp3 decoder.
91
- af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream)
92
- frames = []
93
- length = 0
94
- for frame in af.decode(streams=stream.index):
95
- current_offset = int(frame.rate * frame.pts * frame.time_base)
96
- strip = max(0, frame_offset - current_offset)
97
- buf = torch.from_numpy(frame.to_ndarray())
98
- if buf.shape[0] != stream.channels:
99
- buf = buf.view(-1, stream.channels).t()
100
- buf = buf[:, strip:]
101
- frames.append(buf)
102
- length += buf.shape[1]
103
- if num_frames > 0 and length >= num_frames:
104
- break
105
- assert frames
106
- # If the above assert fails, it is likely because we seeked past the end of file point,
107
- # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp.
108
- # This will need proper debugging, in due time.
109
- wav = torch.cat(frames, dim=1)
110
- assert wav.shape[0] == stream.channels
111
- if num_frames > 0:
112
- wav = wav[:, :num_frames]
113
- return f32_pcm(wav), sr
114
-
115
-
116
- def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,
117
- duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:
118
- """Read audio by picking the most appropriate backend tool based on the audio format.
119
-
120
- Args:
121
- filepath (str or Path): Path to audio file to read.
122
- seek_time (float): Time at which to start reading in the file.
123
- duration (float): Duration to read from the file. If set to -1, the whole file is read.
124
- pad (bool): Pad output audio if not reaching expected duration.
125
- Returns:
126
- Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate.
127
- """
128
- fp = Path(filepath)
129
- if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg
130
- # There is some bug with ffmpeg and reading flac
131
- info = _soundfile_info(filepath)
132
- frames = -1 if duration <= 0 else int(duration * info.sample_rate)
133
- frame_offset = int(seek_time * info.sample_rate)
134
- wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)
135
- assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}"
136
- wav = torch.from_numpy(wav).t().contiguous()
137
- if len(wav.shape) == 1:
138
- wav = torch.unsqueeze(wav, 0)
139
- elif (
140
- fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats()
141
- and duration <= 0 and seek_time == 0
142
- ):
143
- # Torchaudio is faster if we load an entire file at once.
144
- wav, sr = ta.load(fp)
145
- else:
146
- wav, sr = _av_read(filepath, seek_time, duration)
147
- if pad and duration > 0:
148
- expected_frames = int(duration * sr)
149
- wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))
150
- return wav, sr
151
-
152
-
153
- def audio_write(stem_name: tp.Union[str, Path],
154
- wav: torch.Tensor, sample_rate: int,
155
- format: str = 'wav', mp3_rate: int = 320, normalize: bool = True,
156
- strategy: str = 'peak', peak_clip_headroom_db: float = 1,
157
- rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
158
- loudness_compressor: bool = False,
159
- log_clipping: bool = True, make_parent_dir: bool = True,
160
- add_suffix: bool = True) -> Path:
161
- """Convenience function for saving audio to disk. Returns the filename the audio was written to.
162
-
163
- Args:
164
- stem_name (str or Path): Filename without extension which will be added automatically.
165
- format (str): Either "wav" or "mp3".
166
- mp3_rate (int): kbps when using mp3s.
167
- normalize (bool): if `True` (default), normalizes according to the prescribed
168
- strategy (see after). If `False`, the strategy is only used in case clipping
169
- would happen.
170
- strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
171
- i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
172
- with extra headroom to avoid clipping. 'clip' just clips.
173
- peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
174
- rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
175
- than the `peak_clip` one to avoid further clipping.
176
- loudness_headroom_db (float): Target loudness for loudness normalization.
177
- loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.
178
- when strategy is 'loudness'log_clipping (bool): If True, basic logging on stderr when clipping still
179
- occurs despite strategy (only for 'rms').
180
- make_parent_dir (bool): Make parent directory if it doesn't exist.
181
- Returns:
182
- Path: Path of the saved audio.
183
- """
184
- assert wav.dtype.is_floating_point, "wav is not floating point"
185
- if wav.dim() == 1:
186
- wav = wav[None]
187
- elif wav.dim() > 2:
188
- raise ValueError("Input wav should be at most 2 dimension.")
189
- assert wav.isfinite().all()
190
- wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,
191
- rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping,
192
- sample_rate=sample_rate, stem_name=str(stem_name))
193
- kwargs: dict = {}
194
- if format == 'mp3':
195
- suffix = '.mp3'
196
- kwargs.update({"compression": mp3_rate})
197
- elif format == 'wav':
198
- wav = i16_pcm(wav)
199
- suffix = '.wav'
200
- kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16})
201
- else:
202
- raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.")
203
- if not add_suffix:
204
- suffix = ''
205
- path = Path(str(stem_name) + suffix)
206
- if make_parent_dir:
207
- path.parent.mkdir(exist_ok=True, parents=True)
208
- try:
209
- ta.save(path, wav, sample_rate, **kwargs)
210
- except Exception:
211
- if path.exists():
212
- # we do not want to leave half written files around.
213
- path.unlink()
214
- raise
215
- return path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/models/base.py DELETED
@@ -1,152 +0,0 @@
1
- import torch.nn as nn
2
- import torch
3
- import numpy as np
4
- import torch.nn.functional as F
5
- import math
6
- from torchlibrosa.stft import magphase
7
-
8
-
9
- def init_layer(layer):
10
- """Initialize a Linear or Convolutional layer. """
11
- nn.init.xavier_uniform_(layer.weight)
12
-
13
- if hasattr(layer, "bias"):
14
- if layer.bias is not None:
15
- layer.bias.data.fill_(0.0)
16
-
17
-
18
- def init_bn(bn):
19
- """Initialize a Batchnorm layer. """
20
- bn.bias.data.fill_(0.0)
21
- bn.weight.data.fill_(1.0)
22
-
23
-
24
- def init_embedding(layer):
25
- """Initialize a Linear or Convolutional layer. """
26
- nn.init.uniform_(layer.weight, -1., 1.)
27
-
28
- if hasattr(layer, 'bias'):
29
- if layer.bias is not None:
30
- layer.bias.data.fill_(0.)
31
-
32
-
33
- def init_gru(rnn):
34
- """Initialize a GRU layer. """
35
-
36
- def _concat_init(tensor, init_funcs):
37
- (length, fan_out) = tensor.shape
38
- fan_in = length // len(init_funcs)
39
-
40
- for (i, init_func) in enumerate(init_funcs):
41
- init_func(tensor[i * fan_in : (i + 1) * fan_in, :])
42
-
43
- def _inner_uniform(tensor):
44
- fan_in = nn.init._calculate_correct_fan(tensor, "fan_in")
45
- nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))
46
-
47
- for i in range(rnn.num_layers):
48
- _concat_init(
49
- getattr(rnn, "weight_ih_l{}".format(i)),
50
- [_inner_uniform, _inner_uniform, _inner_uniform],
51
- )
52
- torch.nn.init.constant_(getattr(rnn, "bias_ih_l{}".format(i)), 0)
53
-
54
- _concat_init(
55
- getattr(rnn, "weight_hh_l{}".format(i)),
56
- [_inner_uniform, _inner_uniform, nn.init.orthogonal_],
57
- )
58
- torch.nn.init.constant_(getattr(rnn, "bias_hh_l{}".format(i)), 0)
59
-
60
-
61
- def act(x, activation):
62
- if activation == "relu":
63
- return F.relu_(x)
64
-
65
- elif activation == "leaky_relu":
66
- return F.leaky_relu_(x, negative_slope=0.01)
67
-
68
- elif activation == "swish":
69
- return x * torch.sigmoid(x)
70
-
71
- else:
72
- raise Exception("Incorrect activation!")
73
-
74
-
75
- class Base:
76
- def __init__(self):
77
- pass
78
-
79
- def spectrogram(self, input, eps=0.):
80
- (real, imag) = self.stft(input)
81
- return torch.clamp(real ** 2 + imag ** 2, eps, np.inf) ** 0.5
82
-
83
- def spectrogram_phase(self, input, eps=0.):
84
- (real, imag) = self.stft(input)
85
- mag = torch.clamp(real ** 2 + imag ** 2, eps, np.inf) ** 0.5
86
- cos = real / mag
87
- sin = imag / mag
88
- return mag, cos, sin
89
-
90
-
91
- def wav_to_spectrogram_phase(self, input, eps=1e-10):
92
- """Waveform to spectrogram.
93
-
94
- Args:
95
- input: (batch_size, segment_samples, channels_num)
96
-
97
- Outputs:
98
- output: (batch_size, channels_num, time_steps, freq_bins)
99
- """
100
- sp_list = []
101
- cos_list = []
102
- sin_list = []
103
- channels_num = input.shape[1]
104
- for channel in range(channels_num):
105
- mag, cos, sin = self.spectrogram_phase(input[:, channel, :], eps=eps)
106
- sp_list.append(mag)
107
- cos_list.append(cos)
108
- sin_list.append(sin)
109
-
110
- sps = torch.cat(sp_list, dim=1)
111
- coss = torch.cat(cos_list, dim=1)
112
- sins = torch.cat(sin_list, dim=1)
113
- return sps, coss, sins
114
-
115
- def wav_to_spectrogram(self, input, eps=0.):
116
- """Waveform to spectrogram.
117
-
118
- Args:
119
- input: (batch_size, segment_samples, channels_num)
120
-
121
- Outputs:
122
- output: (batch_size, channels_num, time_steps, freq_bins)
123
- """
124
- sp_list = []
125
- channels_num = input.shape[1]
126
- for channel in range(channels_num):
127
- sp_list.append(self.spectrogram(input[:, channel, :], eps=eps))
128
-
129
- output = torch.cat(sp_list, dim=1)
130
- return output
131
-
132
-
133
- def spectrogram_to_wav(self, input, spectrogram, length=None):
134
- """Spectrogram to waveform.
135
-
136
- Args:
137
- input: (batch_size, segment_samples, channels_num)
138
- spectrogram: (batch_size, channels_num, time_steps, freq_bins)
139
-
140
- Outputs:
141
- output: (batch_size, segment_samples, channels_num)
142
- """
143
- channels_num = input.shape[1]
144
- wav_list = []
145
- for channel in range(channels_num):
146
- (real, imag) = self.stft(input[:, channel, :])
147
- (_, cos, sin) = magphase(real, imag)
148
- wav_list.append(self.istft(spectrogram[:, channel : channel + 1, :, :] * cos,
149
- spectrogram[:, channel : channel + 1, :, :] * sin, length))
150
-
151
- output = torch.stack(wav_list, dim=1)
152
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AvaterClasher/Food_Classifier_Refined_MONI/model.py DELETED
@@ -1,24 +0,0 @@
1
- import torch
2
- import torchvision
3
-
4
- from torch import nn
5
-
6
- def create_effnetb2_model(num_classes:int=3, # default output classes = 3 (pizza, steak, sushi)
7
- seed:int=42):
8
- # 1, 2, 3 Create EffNetB2 pretrained weights, transforms and model
9
- weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
10
- transforms = weights.transforms()
11
- model = torchvision.models.efficientnet_b2(weights=weights)
12
-
13
- # 4. Freeze all layers in the base model
14
- for param in model.parameters():
15
- param.requires_grad = False
16
-
17
- # 5. Change classifier head with random seed for reproducibility
18
- torch.manual_seed(seed)
19
- model.classifier = nn.Sequential(
20
- nn.Dropout(p=0.3, inplace=True),
21
- nn.Linear(in_features=1408, out_features=num_classes)
22
- )
23
-
24
- return model, transforms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/models/encoders/model_irse.py DELETED
@@ -1,84 +0,0 @@
1
- from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
2
- from e4e.models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
3
-
4
- """
5
- Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
6
- """
7
-
8
-
9
- class Backbone(Module):
10
- def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
11
- super(Backbone, self).__init__()
12
- assert input_size in [112, 224], "input_size should be 112 or 224"
13
- assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
14
- assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
15
- blocks = get_blocks(num_layers)
16
- if mode == 'ir':
17
- unit_module = bottleneck_IR
18
- elif mode == 'ir_se':
19
- unit_module = bottleneck_IR_SE
20
- self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
21
- BatchNorm2d(64),
22
- PReLU(64))
23
- if input_size == 112:
24
- self.output_layer = Sequential(BatchNorm2d(512),
25
- Dropout(drop_ratio),
26
- Flatten(),
27
- Linear(512 * 7 * 7, 512),
28
- BatchNorm1d(512, affine=affine))
29
- else:
30
- self.output_layer = Sequential(BatchNorm2d(512),
31
- Dropout(drop_ratio),
32
- Flatten(),
33
- Linear(512 * 14 * 14, 512),
34
- BatchNorm1d(512, affine=affine))
35
-
36
- modules = []
37
- for block in blocks:
38
- for bottleneck in block:
39
- modules.append(unit_module(bottleneck.in_channel,
40
- bottleneck.depth,
41
- bottleneck.stride))
42
- self.body = Sequential(*modules)
43
-
44
- def forward(self, x):
45
- x = self.input_layer(x)
46
- x = self.body(x)
47
- x = self.output_layer(x)
48
- return l2_norm(x)
49
-
50
-
51
- def IR_50(input_size):
52
- """Constructs a ir-50 model."""
53
- model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
54
- return model
55
-
56
-
57
- def IR_101(input_size):
58
- """Constructs a ir-101 model."""
59
- model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
60
- return model
61
-
62
-
63
- def IR_152(input_size):
64
- """Constructs a ir-152 model."""
65
- model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
66
- return model
67
-
68
-
69
- def IR_SE_50(input_size):
70
- """Constructs a ir_se-50 model."""
71
- model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
72
- return model
73
-
74
-
75
- def IR_SE_101(input_size):
76
- """Constructs a ir_se-101 model."""
77
- model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
78
- return model
79
-
80
-
81
- def IR_SE_152(input_size):
82
- """Constructs a ir_se-152 model."""
83
- model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
84
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation_impl.py DELETED
@@ -1,614 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
- """
4
- Implement many useful :class:`Augmentation`.
5
- """
6
- import numpy as np
7
- import sys
8
- from typing import Tuple
9
- import torch
10
- from fvcore.transforms.transform import (
11
- BlendTransform,
12
- CropTransform,
13
- HFlipTransform,
14
- NoOpTransform,
15
- PadTransform,
16
- Transform,
17
- TransformList,
18
- VFlipTransform,
19
- )
20
- from PIL import Image
21
-
22
- from .augmentation import Augmentation, _transform_to_aug
23
- from .transform import ExtentTransform, ResizeTransform, RotationTransform
24
-
25
- __all__ = [
26
- "FixedSizeCrop",
27
- "RandomApply",
28
- "RandomBrightness",
29
- "RandomContrast",
30
- "RandomCrop",
31
- "RandomExtent",
32
- "RandomFlip",
33
- "RandomSaturation",
34
- "RandomLighting",
35
- "RandomRotation",
36
- "Resize",
37
- "ResizeScale",
38
- "ResizeShortestEdge",
39
- "RandomCrop_CategoryAreaConstraint",
40
- ]
41
-
42
-
43
- class RandomApply(Augmentation):
44
- """
45
- Randomly apply an augmentation with a given probability.
46
- """
47
-
48
- def __init__(self, tfm_or_aug, prob=0.5):
49
- """
50
- Args:
51
- tfm_or_aug (Transform, Augmentation): the transform or augmentation
52
- to be applied. It can either be a `Transform` or `Augmentation`
53
- instance.
54
- prob (float): probability between 0.0 and 1.0 that
55
- the wrapper transformation is applied
56
- """
57
- super().__init__()
58
- self.aug = _transform_to_aug(tfm_or_aug)
59
- assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
60
- self.prob = prob
61
-
62
- def get_transform(self, *args):
63
- do = self._rand_range() < self.prob
64
- if do:
65
- return self.aug.get_transform(*args)
66
- else:
67
- return NoOpTransform()
68
-
69
- def __call__(self, aug_input):
70
- do = self._rand_range() < self.prob
71
- if do:
72
- return self.aug(aug_input)
73
- else:
74
- return NoOpTransform()
75
-
76
-
77
- class RandomFlip(Augmentation):
78
- """
79
- Flip the image horizontally or vertically with the given probability.
80
- """
81
-
82
- def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
83
- """
84
- Args:
85
- prob (float): probability of flip.
86
- horizontal (boolean): whether to apply horizontal flipping
87
- vertical (boolean): whether to apply vertical flipping
88
- """
89
- super().__init__()
90
-
91
- if horizontal and vertical:
92
- raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
93
- if not horizontal and not vertical:
94
- raise ValueError("At least one of horiz or vert has to be True!")
95
- self._init(locals())
96
-
97
- def get_transform(self, image):
98
- h, w = image.shape[:2]
99
- do = self._rand_range() < self.prob
100
- if do:
101
- if self.horizontal:
102
- return HFlipTransform(w)
103
- elif self.vertical:
104
- return VFlipTransform(h)
105
- else:
106
- return NoOpTransform()
107
-
108
-
109
- class Resize(Augmentation):
110
- """Resize image to a fixed target size"""
111
-
112
- def __init__(self, shape, interp=Image.BILINEAR):
113
- """
114
- Args:
115
- shape: (h, w) tuple or a int
116
- interp: PIL interpolation method
117
- """
118
- if isinstance(shape, int):
119
- shape = (shape, shape)
120
- shape = tuple(shape)
121
- self._init(locals())
122
-
123
- def get_transform(self, image):
124
- return ResizeTransform(
125
- image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp
126
- )
127
-
128
-
129
- class ResizeShortestEdge(Augmentation):
130
- """
131
- Resize the image while keeping the aspect ratio unchanged.
132
- It attempts to scale the shorter edge to the given `short_edge_length`,
133
- as long as the longer edge does not exceed `max_size`.
134
- If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
135
- """
136
-
137
- @torch.jit.unused
138
- def __init__(
139
- self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
140
- ):
141
- """
142
- Args:
143
- short_edge_length (list[int]): If ``sample_style=="range"``,
144
- a [min, max] interval from which to sample the shortest edge length.
145
- If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
146
- max_size (int): maximum allowed longest edge length.
147
- sample_style (str): either "range" or "choice".
148
- """
149
- super().__init__()
150
- assert sample_style in ["range", "choice"], sample_style
151
-
152
- self.is_range = sample_style == "range"
153
- if isinstance(short_edge_length, int):
154
- short_edge_length = (short_edge_length, short_edge_length)
155
- if self.is_range:
156
- assert len(short_edge_length) == 2, (
157
- "short_edge_length must be two values using 'range' sample style."
158
- f" Got {short_edge_length}!"
159
- )
160
- self._init(locals())
161
-
162
- @torch.jit.unused
163
- def get_transform(self, image):
164
- h, w = image.shape[:2]
165
- if self.is_range:
166
- size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
167
- else:
168
- size = np.random.choice(self.short_edge_length)
169
- if size == 0:
170
- return NoOpTransform()
171
-
172
- newh, neww = ResizeShortestEdge.get_output_shape(h, w, size, self.max_size)
173
- return ResizeTransform(h, w, newh, neww, self.interp)
174
-
175
- @staticmethod
176
- def get_output_shape(
177
- oldh: int, oldw: int, short_edge_length: int, max_size: int
178
- ) -> Tuple[int, int]:
179
- """
180
- Compute the output size given input size and target short edge length.
181
- """
182
- h, w = oldh, oldw
183
- size = short_edge_length * 1.0
184
- scale = size / min(h, w)
185
- if h < w:
186
- newh, neww = size, scale * w
187
- else:
188
- newh, neww = scale * h, size
189
- if max(newh, neww) > max_size:
190
- scale = max_size * 1.0 / max(newh, neww)
191
- newh = newh * scale
192
- neww = neww * scale
193
- neww = int(neww + 0.5)
194
- newh = int(newh + 0.5)
195
- return (newh, neww)
196
-
197
-
198
- class ResizeScale(Augmentation):
199
- """
200
- Takes target size as input and randomly scales the given target size between `min_scale`
201
- and `max_scale`. It then scales the input image such that it fits inside the scaled target
202
- box, keeping the aspect ratio constant.
203
- This implements the resize part of the Google's 'resize_and_crop' data augmentation:
204
- https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127
205
- """
206
-
207
- def __init__(
208
- self,
209
- min_scale: float,
210
- max_scale: float,
211
- target_height: int,
212
- target_width: int,
213
- interp: int = Image.BILINEAR,
214
- ):
215
- """
216
- Args:
217
- min_scale: minimum image scale range.
218
- max_scale: maximum image scale range.
219
- target_height: target image height.
220
- target_width: target image width.
221
- interp: image interpolation method.
222
- """
223
- super().__init__()
224
- self._init(locals())
225
-
226
- def _get_resize(self, image: np.ndarray, scale: float) -> Transform:
227
- input_size = image.shape[:2]
228
-
229
- # Compute new target size given a scale.
230
- target_size = (self.target_height, self.target_width)
231
- target_scale_size = np.multiply(target_size, scale)
232
-
233
- # Compute actual rescaling applied to input image and output size.
234
- output_scale = np.minimum(
235
- target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1]
236
- )
237
- output_size = np.round(np.multiply(input_size, output_scale)).astype(int)
238
-
239
- return ResizeTransform(
240
- input_size[0], input_size[1], output_size[0], output_size[1], self.interp
241
- )
242
-
243
- def get_transform(self, image: np.ndarray) -> Transform:
244
- random_scale = np.random.uniform(self.min_scale, self.max_scale)
245
- return self._get_resize(image, random_scale)
246
-
247
-
248
- class RandomRotation(Augmentation):
249
- """
250
- This method returns a copy of this image, rotated the given
251
- number of degrees counter clockwise around the given center.
252
- """
253
-
254
- def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
255
- """
256
- Args:
257
- angle (list[float]): If ``sample_style=="range"``,
258
- a [min, max] interval from which to sample the angle (in degrees).
259
- If ``sample_style=="choice"``, a list of angles to sample from
260
- expand (bool): choose if the image should be resized to fit the whole
261
- rotated image (default), or simply cropped
262
- center (list[[float, float]]): If ``sample_style=="range"``,
263
- a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
264
- [0, 0] being the top left of the image and [1, 1] the bottom right.
265
- If ``sample_style=="choice"``, a list of centers to sample from
266
- Default: None, which means that the center of rotation is the center of the image
267
- center has no effect if expand=True because it only affects shifting
268
- """
269
- super().__init__()
270
- assert sample_style in ["range", "choice"], sample_style
271
- self.is_range = sample_style == "range"
272
- if isinstance(angle, (float, int)):
273
- angle = (angle, angle)
274
- if center is not None and isinstance(center[0], (float, int)):
275
- center = (center, center)
276
- self._init(locals())
277
-
278
- def get_transform(self, image):
279
- h, w = image.shape[:2]
280
- center = None
281
- if self.is_range:
282
- angle = np.random.uniform(self.angle[0], self.angle[1])
283
- if self.center is not None:
284
- center = (
285
- np.random.uniform(self.center[0][0], self.center[1][0]),
286
- np.random.uniform(self.center[0][1], self.center[1][1]),
287
- )
288
- else:
289
- angle = np.random.choice(self.angle)
290
- if self.center is not None:
291
- center = np.random.choice(self.center)
292
-
293
- if center is not None:
294
- center = (w * center[0], h * center[1]) # Convert to absolute coordinates
295
-
296
- if angle % 360 == 0:
297
- return NoOpTransform()
298
-
299
- return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
300
-
301
-
302
- class FixedSizeCrop(Augmentation):
303
- """
304
- If `crop_size` is smaller than the input image size, then it uses a random crop of
305
- the crop size. If `crop_size` is larger than the input image size, then it pads
306
- the right and the bottom of the image to the crop size if `pad` is True, otherwise
307
- it returns the smaller image.
308
- """
309
-
310
- def __init__(self, crop_size: Tuple[int], pad: bool = True, pad_value: float = 128.0):
311
- """
312
- Args:
313
- crop_size: target image (height, width).
314
- pad: if True, will pad images smaller than `crop_size` up to `crop_size`
315
- pad_value: the padding value.
316
- """
317
- super().__init__()
318
- self._init(locals())
319
-
320
- def _get_crop(self, image: np.ndarray) -> Transform:
321
- # Compute the image scale and scaled size.
322
- input_size = image.shape[:2]
323
- output_size = self.crop_size
324
-
325
- # Add random crop if the image is scaled up.
326
- max_offset = np.subtract(input_size, output_size)
327
- max_offset = np.maximum(max_offset, 0)
328
- offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0))
329
- offset = np.round(offset).astype(int)
330
- return CropTransform(
331
- offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0]
332
- )
333
-
334
- def _get_pad(self, image: np.ndarray) -> Transform:
335
- # Compute the image scale and scaled size.
336
- input_size = image.shape[:2]
337
- output_size = self.crop_size
338
-
339
- # Add padding if the image is scaled down.
340
- pad_size = np.subtract(output_size, input_size)
341
- pad_size = np.maximum(pad_size, 0)
342
- original_size = np.minimum(input_size, output_size)
343
- return PadTransform(
344
- 0, 0, pad_size[1], pad_size[0], original_size[1], original_size[0], self.pad_value
345
- )
346
-
347
- def get_transform(self, image: np.ndarray) -> TransformList:
348
- transforms = [self._get_crop(image)]
349
- if self.pad:
350
- transforms.append(self._get_pad(image))
351
- return TransformList(transforms)
352
-
353
-
354
- class RandomCrop(Augmentation):
355
- """
356
- Randomly crop a rectangle region out of an image.
357
- """
358
-
359
- def __init__(self, crop_type: str, crop_size):
360
- """
361
- Args:
362
- crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
363
- crop_size (tuple[float, float]): two floats, explained below.
364
-
365
- - "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of
366
- size (H, W). crop size should be in (0, 1]
367
- - "relative_range": uniformly sample two values from [crop_size[0], 1]
368
- and [crop_size[1]], 1], and use them as in "relative" crop type.
369
- - "absolute" crop a (crop_size[0], crop_size[1]) region from input image.
370
- crop_size must be smaller than the input image size.
371
- - "absolute_range", for an input of size (H, W), uniformly sample H_crop in
372
- [crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])].
373
- Then crop a region (H_crop, W_crop).
374
- """
375
- # TODO style of relative_range and absolute_range are not consistent:
376
- # one takes (h, w) but another takes (min, max)
377
- super().__init__()
378
- assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
379
- self._init(locals())
380
-
381
- def get_transform(self, image):
382
- h, w = image.shape[:2]
383
- croph, cropw = self.get_crop_size((h, w))
384
- assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
385
- h0 = np.random.randint(h - croph + 1)
386
- w0 = np.random.randint(w - cropw + 1)
387
- return CropTransform(w0, h0, cropw, croph)
388
-
389
- def get_crop_size(self, image_size):
390
- """
391
- Args:
392
- image_size (tuple): height, width
393
-
394
- Returns:
395
- crop_size (tuple): height, width in absolute pixels
396
- """
397
- h, w = image_size
398
- if self.crop_type == "relative":
399
- ch, cw = self.crop_size
400
- return int(h * ch + 0.5), int(w * cw + 0.5)
401
- elif self.crop_type == "relative_range":
402
- crop_size = np.asarray(self.crop_size, dtype=np.float32)
403
- ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
404
- return int(h * ch + 0.5), int(w * cw + 0.5)
405
- elif self.crop_type == "absolute":
406
- return (min(self.crop_size[0], h), min(self.crop_size[1], w))
407
- elif self.crop_type == "absolute_range":
408
- assert self.crop_size[0] <= self.crop_size[1]
409
- ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
410
- cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
411
- return ch, cw
412
- else:
413
- raise NotImplementedError("Unknown crop type {}".format(self.crop_type))
414
-
415
-
416
- class RandomCrop_CategoryAreaConstraint(Augmentation):
417
- """
418
- Similar to :class:`RandomCrop`, but find a cropping window such that no single category
419
- occupies a ratio of more than `single_category_max_area` in semantic segmentation ground
420
- truth, which can cause unstability in training. The function attempts to find such a valid
421
- cropping window for at most 10 times.
422
- """
423
-
424
- def __init__(
425
- self,
426
- crop_type: str,
427
- crop_size,
428
- single_category_max_area: float = 1.0,
429
- ignored_category: int = None,
430
- ):
431
- """
432
- Args:
433
- crop_type, crop_size: same as in :class:`RandomCrop`
434
- single_category_max_area: the maximum allowed area ratio of a
435
- category. Set to 1.0 to disable
436
- ignored_category: allow this category in the semantic segmentation
437
- ground truth to exceed the area ratio. Usually set to the category
438
- that's ignored in training.
439
- """
440
- self.crop_aug = RandomCrop(crop_type, crop_size)
441
- self._init(locals())
442
-
443
- def get_transform(self, image, sem_seg):
444
- if self.single_category_max_area >= 1.0:
445
- return self.crop_aug.get_transform(image)
446
- else:
447
- h, w = sem_seg.shape
448
- for _ in range(10):
449
- crop_size = self.crop_aug.get_crop_size((h, w))
450
- y0 = np.random.randint(h - crop_size[0] + 1)
451
- x0 = np.random.randint(w - crop_size[1] + 1)
452
- sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]]
453
- labels, cnt = np.unique(sem_seg_temp, return_counts=True)
454
- if self.ignored_category is not None:
455
- cnt = cnt[labels != self.ignored_category]
456
- if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area:
457
- break
458
- crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0])
459
- return crop_tfm
460
-
461
-
462
- class RandomExtent(Augmentation):
463
- """
464
- Outputs an image by cropping a random "subrect" of the source image.
465
-
466
- The subrect can be parameterized to include pixels outside the source image,
467
- in which case they will be set to zeros (i.e. black). The size of the output
468
- image will vary with the size of the random subrect.
469
- """
470
-
471
- def __init__(self, scale_range, shift_range):
472
- """
473
- Args:
474
- output_size (h, w): Dimensions of output image
475
- scale_range (l, h): Range of input-to-output size scaling factor
476
- shift_range (x, y): Range of shifts of the cropped subrect. The rect
477
- is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
478
- where (w, h) is the (width, height) of the input image. Set each
479
- component to zero to crop at the image's center.
480
- """
481
- super().__init__()
482
- self._init(locals())
483
-
484
- def get_transform(self, image):
485
- img_h, img_w = image.shape[:2]
486
-
487
- # Initialize src_rect to fit the input image.
488
- src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
489
-
490
- # Apply a random scaling to the src_rect.
491
- src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
492
-
493
- # Apply a random shift to the coordinates origin.
494
- src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
495
- src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
496
-
497
- # Map src_rect coordinates into image coordinates (center at corner).
498
- src_rect[0::2] += 0.5 * img_w
499
- src_rect[1::2] += 0.5 * img_h
500
-
501
- return ExtentTransform(
502
- src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
503
- output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
504
- )
505
-
506
-
507
- class RandomContrast(Augmentation):
508
- """
509
- Randomly transforms image contrast.
510
-
511
- Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
512
- - intensity < 1 will reduce contrast
513
- - intensity = 1 will preserve the input image
514
- - intensity > 1 will increase contrast
515
-
516
- See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
517
- """
518
-
519
- def __init__(self, intensity_min, intensity_max):
520
- """
521
- Args:
522
- intensity_min (float): Minimum augmentation
523
- intensity_max (float): Maximum augmentation
524
- """
525
- super().__init__()
526
- self._init(locals())
527
-
528
- def get_transform(self, image):
529
- w = np.random.uniform(self.intensity_min, self.intensity_max)
530
- return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w)
531
-
532
-
533
- class RandomBrightness(Augmentation):
534
- """
535
- Randomly transforms image brightness.
536
-
537
- Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
538
- - intensity < 1 will reduce brightness
539
- - intensity = 1 will preserve the input image
540
- - intensity > 1 will increase brightness
541
-
542
- See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
543
- """
544
-
545
- def __init__(self, intensity_min, intensity_max):
546
- """
547
- Args:
548
- intensity_min (float): Minimum augmentation
549
- intensity_max (float): Maximum augmentation
550
- """
551
- super().__init__()
552
- self._init(locals())
553
-
554
- def get_transform(self, image):
555
- w = np.random.uniform(self.intensity_min, self.intensity_max)
556
- return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
557
-
558
-
559
- class RandomSaturation(Augmentation):
560
- """
561
- Randomly transforms saturation of an RGB image.
562
- Input images are assumed to have 'RGB' channel order.
563
-
564
- Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
565
- - intensity < 1 will reduce saturation (make the image more grayscale)
566
- - intensity = 1 will preserve the input image
567
- - intensity > 1 will increase saturation
568
-
569
- See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
570
- """
571
-
572
- def __init__(self, intensity_min, intensity_max):
573
- """
574
- Args:
575
- intensity_min (float): Minimum augmentation (1 preserves input).
576
- intensity_max (float): Maximum augmentation (1 preserves input).
577
- """
578
- super().__init__()
579
- self._init(locals())
580
-
581
- def get_transform(self, image):
582
- assert image.shape[-1] == 3, "RandomSaturation only works on RGB images"
583
- w = np.random.uniform(self.intensity_min, self.intensity_max)
584
- grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
585
- return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
586
-
587
-
588
- class RandomLighting(Augmentation):
589
- """
590
- The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
591
- Input images are assumed to have 'RGB' channel order.
592
-
593
- The degree of color jittering is randomly sampled via a normal distribution,
594
- with standard deviation given by the scale parameter.
595
- """
596
-
597
- def __init__(self, scale):
598
- """
599
- Args:
600
- scale (float): Standard deviation of principal component weighting.
601
- """
602
- super().__init__()
603
- self._init(locals())
604
- self.eigen_vecs = np.array(
605
- [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
606
- )
607
- self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
608
-
609
- def get_transform(self, image):
610
- assert image.shape[-1] == 3, "RandomLighting only works on RGB images"
611
- weights = np.random.normal(scale=self.scale, size=3)
612
- return BlendTransform(
613
- src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
614
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AyushP/PolicyCompareBot/app.py DELETED
@@ -1,86 +0,0 @@
1
- import openai
2
- import streamlit as st
3
- import sqlite3
4
- from PIL import Image
5
- import pandas as pd
6
-
7
- openai.api_key = "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"
8
- # Database Connection
9
-
10
- conn = sqlite3.connect('bank.db')
11
- c = conn.cursor()
12
-
13
- def policyCompare():
14
- st.title("Compare Two Policy")
15
-
16
- with st.container():
17
- st.header("Select Policy 1")
18
- question_2 = "Select the Institution from where you want the Insurance"
19
- options_policy1 = ["Bank of Baroda", "State Bank of India(SBI)", "HDFC Bank", "LIC"]
20
-
21
- st.subheader(question_2)
22
- selected_option_policy1 = st.selectbox("Please enter your option for Policy 1:", options_policy1)
23
-
24
-
25
-
26
- c.execute('SELECT Policy_Name FROM BANK WHERE Bank_Name= "{}"'.format(selected_option_policy1))
27
- options_3 = c.fetchall()
28
-
29
-
30
- my_options = []
31
- for row in options_3:
32
- my_options.append(row[0])
33
-
34
- st.subheader("Select the Policy Name")
35
- selected_policy1 = st.selectbox("Please enter your option for Policy 1:", my_options)
36
-
37
- c.execute('SELECT Policy_doc FROM BANK WHERE Policy_Name = "{}"'.format(selected_policy1))
38
- policy_doc_link1 = c.fetchone()
39
-
40
-
41
-
42
-
43
- with st.container():
44
- st.header("Select Policy 2")
45
- question_2 = "Select the Institution from where you want the Insurance"
46
- options_policy2 = ["Bank of Baroda", "State Bank of India(SBI)", "HDFC Bank", "LIC"]
47
-
48
- st.subheader(question_2)
49
- selected_option_policy2 = st.selectbox("Please enter your option for Policy 2:", options_policy2)
50
-
51
-
52
-
53
- c.execute('SELECT Policy_Name FROM BANK WHERE Bank_Name= "{}"'.format(selected_option_policy2))
54
- options_3 = c.fetchall()
55
-
56
- # st.write(options_3)
57
- my_options2 = []
58
- for row in options_3:
59
- my_options2.append(row[0])
60
-
61
- st.subheader("Select the Policy Name")
62
- selected_policy2 = st.selectbox("Please enter your option for Policy 2:", my_options2)
63
-
64
- c.execute('SELECT Policy_doc FROM BANK WHERE Policy_Name = "{}"'.format(selected_policy1))
65
- policy_doc_link2 = c.fetchone()
66
-
67
- if(selected_policy2 != 0):
68
- st.header("Comparison")
69
- st.subheader("Policy 1 : {}".format(selected_policy1))
70
- st.subheader("Policy 2 : {}".format(selected_policy2))
71
- response = openai.Completion.create(
72
- model="text-davinci-003",
73
- prompt="Compare the two health insurance policy using the policy document\nPolicy 1 Document: {},\nPolicy 2 Document: {}\nStrictly show the answer in tabular format:-".format(policy_doc_link1, policy_doc_link2),
74
- temperature=0.05,
75
- max_tokens=300,
76
- top_p=1,
77
- frequency_penalty=0,
78
- presence_penalty=0,
79
- stop=[":-"]
80
- )
81
-
82
- compare_response = response.choices[0].text
83
- st.write(f"Answer: {compare_response}")
84
-
85
- if __name__ == '__main__':
86
- policyCompare()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/lib/utils.ts DELETED
@@ -1,6 +0,0 @@
1
- import { type ClassValue, clsx } from "clsx"
2
- import { twMerge } from "tailwind-merge"
3
-
4
- export function cn(...inputs: ClassValue[]) {
5
- return twMerge(clsx(inputs))
6
- }
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/demucs/raw.py DELETED
@@ -1,173 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import argparse
8
- import os
9
- from collections import defaultdict, namedtuple
10
- from pathlib import Path
11
-
12
- import musdb
13
- import numpy as np
14
- import torch as th
15
- import tqdm
16
- from torch.utils.data import DataLoader
17
-
18
- from .audio import AudioFile
19
-
20
- ChunkInfo = namedtuple("ChunkInfo", ["file_index", "offset", "local_index"])
21
-
22
-
23
- class Rawset:
24
- """
25
- Dataset of raw, normalized, float32 audio files
26
- """
27
- def __init__(self, path, samples=None, stride=None, channels=2, streams=None):
28
- self.path = Path(path)
29
- self.channels = channels
30
- self.samples = samples
31
- if stride is None:
32
- stride = samples if samples is not None else 0
33
- self.stride = stride
34
- entries = defaultdict(list)
35
- for root, folders, files in os.walk(self.path, followlinks=True):
36
- folders.sort()
37
- files.sort()
38
- for file in files:
39
- if file.endswith(".raw"):
40
- path = Path(root) / file
41
- name, stream = path.stem.rsplit('.', 1)
42
- entries[(path.parent.relative_to(self.path), name)].append(int(stream))
43
-
44
- self._entries = list(entries.keys())
45
-
46
- sizes = []
47
- self._lengths = []
48
- ref_streams = sorted(entries[self._entries[0]])
49
- assert ref_streams == list(range(len(ref_streams)))
50
- if streams is None:
51
- self.streams = ref_streams
52
- else:
53
- self.streams = streams
54
- for entry in sorted(entries.keys()):
55
- streams = entries[entry]
56
- assert sorted(streams) == ref_streams
57
- file = self._path(*entry)
58
- length = file.stat().st_size // (4 * channels)
59
- if samples is None:
60
- sizes.append(1)
61
- else:
62
- if length < samples:
63
- self._entries.remove(entry)
64
- continue
65
- sizes.append((length - samples) // stride + 1)
66
- self._lengths.append(length)
67
- if not sizes:
68
- raise ValueError(f"Empty dataset {self.path}")
69
- self._cumulative_sizes = np.cumsum(sizes)
70
- self._sizes = sizes
71
-
72
- def __len__(self):
73
- return self._cumulative_sizes[-1]
74
-
75
- @property
76
- def total_length(self):
77
- return sum(self._lengths)
78
-
79
- def chunk_info(self, index):
80
- file_index = np.searchsorted(self._cumulative_sizes, index, side='right')
81
- if file_index == 0:
82
- local_index = index
83
- else:
84
- local_index = index - self._cumulative_sizes[file_index - 1]
85
- return ChunkInfo(offset=local_index * self.stride,
86
- file_index=file_index,
87
- local_index=local_index)
88
-
89
- def _path(self, folder, name, stream=0):
90
- return self.path / folder / (name + f'.{stream}.raw')
91
-
92
- def __getitem__(self, index):
93
- chunk = self.chunk_info(index)
94
- entry = self._entries[chunk.file_index]
95
-
96
- length = self.samples or self._lengths[chunk.file_index]
97
- streams = []
98
- to_read = length * self.channels * 4
99
- for stream_index, stream in enumerate(self.streams):
100
- offset = chunk.offset * 4 * self.channels
101
- file = open(self._path(*entry, stream=stream), 'rb')
102
- file.seek(offset)
103
- content = file.read(to_read)
104
- assert len(content) == to_read
105
- content = np.frombuffer(content, dtype=np.float32)
106
- content = content.copy() # make writable
107
- streams.append(th.from_numpy(content).view(length, self.channels).t())
108
- return th.stack(streams, dim=0)
109
-
110
- def name(self, index):
111
- chunk = self.chunk_info(index)
112
- folder, name = self._entries[chunk.file_index]
113
- return folder / name
114
-
115
-
116
- class MusDBSet:
117
- def __init__(self, mus, streams=slice(None), samplerate=44100, channels=2):
118
- self.mus = mus
119
- self.streams = streams
120
- self.samplerate = samplerate
121
- self.channels = channels
122
-
123
- def __len__(self):
124
- return len(self.mus.tracks)
125
-
126
- def __getitem__(self, index):
127
- track = self.mus.tracks[index]
128
- return (track.name, AudioFile(track.path).read(channels=self.channels,
129
- seek_time=0,
130
- streams=self.streams,
131
- samplerate=self.samplerate))
132
-
133
-
134
- def build_raw(mus, destination, normalize, workers, samplerate, channels):
135
- destination.mkdir(parents=True, exist_ok=True)
136
- loader = DataLoader(MusDBSet(mus, channels=channels, samplerate=samplerate),
137
- batch_size=1,
138
- num_workers=workers,
139
- collate_fn=lambda x: x[0])
140
- for name, streams in tqdm.tqdm(loader):
141
- if normalize:
142
- ref = streams[0].mean(dim=0) # use mono mixture as reference
143
- streams = (streams - ref.mean()) / ref.std()
144
- for index, stream in enumerate(streams):
145
- open(destination / (name + f'.{index}.raw'), "wb").write(stream.t().numpy().tobytes())
146
-
147
-
148
- def main():
149
- parser = argparse.ArgumentParser('rawset')
150
- parser.add_argument('--workers', type=int, default=10)
151
- parser.add_argument('--samplerate', type=int, default=44100)
152
- parser.add_argument('--channels', type=int, default=2)
153
- parser.add_argument('musdb', type=Path)
154
- parser.add_argument('destination', type=Path)
155
-
156
- args = parser.parse_args()
157
-
158
- build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="train"),
159
- args.destination / "train",
160
- normalize=True,
161
- channels=args.channels,
162
- samplerate=args.samplerate,
163
- workers=args.workers)
164
- build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="valid"),
165
- args.destination / "valid",
166
- normalize=True,
167
- samplerate=args.samplerate,
168
- channels=args.channels,
169
- workers=args.workers)
170
-
171
-
172
- if __name__ == "__main__":
173
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/9ice Kasa Final Mp3 Descargar.md DELETED
@@ -1,46 +0,0 @@
1
- <br />
2
- <h1>9ice Kasa Final Mp3 Descargar: Una revisión de la canción de éxito por la leyenda de la música nigeriana</h1>
3
- <p>Si eres un fan de la música nigeriana, probablemente hayas oído hablar de <strong>9ice Kasa Final</strong>, una de las canciones más populares del legendario cantante, compositor y bailarín <strong>9ice</strong>. La canción, que fue lanzada en 2011 como parte de su álbum <em>Versus/Bashorun Gaa</em>, es una melodía pegadiza y cautivadora que muestra el uso poderoso de 9ice del idioma yoruba, las letras proverbiales y el estilo único de entrega. En este artículo, revisaremos la canción en detalle, explorando sus letras, significado, música, producción, recepción e impacto. También le proporcionaremos información sobre el propio 9ice, sus antecedentes, logros e influencia en la industria musical nigeriana. </p>
4
- <h2>9ice kasa final mp3 descargar</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://bltlly.com/2v6Jkw">https://bltlly.com/2v6Jkw</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <p><strong>9ice Kasa Final</strong> es una canción que celebra el éxito y el dominio de 9ice en la escena musical, así como su confianza y resistencia en la superación de desafíos y críticos. El título de la canción se traduce a "Case Closed" o "End of Discussion" en inglés, lo que implica que 9ice no tiene nada más que demostrar o decir a cualquier persona que duda o se opone a él. La canción es también un homenaje a sus fans y partidarios que han sido leales a él a lo largo de su carrera. </p>
7
-
8
- <h2>La letra y el significado de Kasa Final</h2>
9
-
10
- nombre de la etiqueta, Alapomeji, que significa "uno que tiene muchos lados o caras". También implica que 9ice es versátil y adaptable en su música y personalidad. - "Omo Bashorun Gaa" (Hijo de Bashorun Gaa): Esta es una referencia a una figura histórica en la historia Yoruba, Bashorun Gaa, que era un poderoso e influyente jefe en el antiguo Imperio oyó. Era conocido por sus tácticas astutas y despiadadas en la política y la guerra. También implica que 9ice es poderoso e influyente en la industria de la música. - "Omo Aare Ona Kakanfo" (Hijo de Aare Ona Kakanfo): Esta es una referencia a otra figura histórica en la historia yoruba, Aare Ona Kakanfo, que fue el título dado al comandante militar supremo del antiguo Imperio oyó. Era conocido por su valentía y lealtad en la defensa del imperio de los enemigos. También implica que 9ice es valiente y leal en la defensa de su música de los enemigos. <h2>La Música y Producción de Kasa Final</h2>
11
- <p><strong>9ice Kasa Final</strong> es una canción que combina elementos e influencias musicales tradicionales y modernos para crear un estilo único de entrega. La canción cuenta con un ritmo rápido, ritmo optimista, melodía pegadiza, y voces enérgicas. La canción también incorpora varios instrumentos y sonidos, como tambores, teclados, guitarras, cuernos, flautas, agitadores, palmas, cantos, silbatos, sirenas, disparos, etc.</p>
12
-
13
- <p><strong>9ice Kasa Final</strong> fue un gran éxito y recibió críticas positivas y comentarios de fans y críticos por igual. La canción fue una de las canciones más tocadas y descargadas en Nigeria y en toda África en 2011. La canción también tuvo un buen desempeño en varias listas, plataformas y medios de comunicación, como MTV Base, Trace TV, Soundcity, Naija FM, etc. La canción también ganó varios premios y nominaciones, como la Mejor Colaboración en los Nigeria Music Video Awards (NMVA), Mejor Canción Afro Pop en los City People Entertainment Awards (CPEA), Mejor Canción del Año en los Nigerian Entertainment Awards (NEA), etc.</p>
14
- <p>La canción también contribuyó a la carrera y legado de 9ice como músico, consolidando su estatus como uno de los artistas más respetados e influyentes en Nigeria y África. La canción también mostró su versatilidad y creatividad como cantante, compositor y bailarín. La canción también inspiró a muchos otros artistas y fans a apreciar y celebrar su propia cultura e idioma, así como sus propios logros y desafíos. </p>
15
- <p></p>
16
- <h2>Conclusión</h2>
17
-
18
- <p>Aquí hay algunas preguntas y respuestas frecuentes sobre la canción, 9ice, o la industria de la música nigeriana. </p>
19
- <tabla>
20
- <tr>
21
- <th>Pregunta</th>
22
- <th>Respuesta</th>
23
- </tr>
24
- <tr>
25
- <td>¿Qué significa Kasa en yoruba? </td>
26
- <td>Kasa significa "caso" o "asunto" en yoruba. También puede significar "cerrar" o "terminar". En el contexto de la canción, significa "caso cerrado" o "fin de la discusión". </td>
27
- </tr>
28
- <tr>
29
- <td>¿Cómo se llama la esposa de 9ice? </td>
30
- <td>9ice está casado con Olasunkanmi Ajala, que es organizador de eventos y empresario. Se casaron en 2019 y tienen una hija juntos. 9ice también tiene otros tres hijos de relaciones anteriores. </td>
31
- </tr>
32
- <tr>
33
- <td>¿Quién es el músico más rico de Nigeria? </td>
34
- <td>Según Forbes, el músico más rico de Nigeria en 2021 es Wizkid, que tiene un valor neto estimado de $ 30 millones. Él es seguido por Davido, que tiene un valor neto estimado de $ 25 millones, y Burna Boy, que tiene un valor neto estimado de $ 20 millones. </td>
35
- </tr>
36
- <tr>
37
- <td>¿Cuál es el significado de Gongo Aso? </td>
38
- <td>Gongo Aso es otro éxito de 9ice, que fue lanzado en 2008. El título de la canción significa "Thunder Fire" o "Thunder Strike" en yoruba. Es una expresión de argot que puede usarse para maldecir a alguien o algo, o para expresar sorpresa o shock. </td>
39
- </tr>
40
- <tr>
41
- <td>¿Cuáles son algunos de los premios que 9ice ha ganado? </td>
42
- <td>Algunos de los premios que 9ice ha ganado incluyen: - MOBO Premio a la Mejor Ley Africana en 2008 - MTV Premio de Música de África para el Mejor Hip Hop en 2008 - El Headies Premio para Artiste del Año en 2008 - El Premio Headies para el Álbum del Año en 2008 - The Headies Award for Song of the Year en 2008 - The Headies Award for Best Vocal Performance (Male) en 2008 - The Headies Award for Best R&B/Pop Álbum en 2016 - City People Entertainment Award for Special Recognition/Hall of Fame en 2016 - Premio City People Music al Mejor Collabo del Año (Canción) en 2017 - Premio City People Music al Álbum de Rap del Año en 2017 - etc.</td>
43
-
44
- </table></p> 64aa2da5cf<br />
45
- <br />
46
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Barco Rampa De Salto Apk Mod.md DELETED
@@ -1,58 +0,0 @@
1
-
2
- <h1>Juego Stickman caída Mod APK: Un divertido y loco juego de física</h1>
3
- <p>¿Te gustan los juegos basados en la física que te permiten dar rienda suelta a tu creatividad e imaginación? ¿Te gusta ver figuras de palo chocar, quemarse y explotar de manera hilarante? Si respondiste sí a estas preguntas, entonces te encantará Game Stickman Falling, un divertido y loco juego de física que te hará reír en voz alta. </p>
4
- <h2>¿Qué es Game Stickman Falling? </h2>
5
- <p>Game Stickman Falling es un juego de simulación de física desarrollado por Skygo. En este juego, controlas una figura de palo que puede montar varios vehículos y realizar acrobacias, trucos y accidentes. El juego tiene efectos realistas de física y ragdoll, lo que significa que tu figura de palo reaccionará a cada impacto, colisión y explosión. También puede personalizar su figura de palo con diferentes trajes, accesorios y armas. </p>
6
- <h2>barco rampa de salto apk mod</h2><br /><p><b><b>DOWNLOAD</b> &#10001; <a href="https://bltlly.com/2v6MIx">https://bltlly.com/2v6MIx</a></b></p><br /><br />
7
- <h3>El juego de Game Stickman Falling</h3>
8
- <p>El juego de Game Stickman Falling es simple pero adictivo. Puedes elegir entre diferentes modos, como el modo libre, el modo desafío o el modo multijugador. En modo libre, puedes explorar el mundo del juego y probar diferentes vehículos y escenarios. En el modo desafío, tienes que completar tareas y objetivos específicos, como alcanzar cierta velocidad, distancia o puntuación. En el modo multijugador, puedes competir con otros jugadores online y ver quién puede causar más daño y caos. </p>
9
- <h3>Las características de Game Stickman Falling</h3>
10
- <p>Game Stickman Falling tiene muchas características que lo convierten en un juego divertido y entretenido. Algunas de estas características son:</p>
11
- <ul>
12
- <li>Una variedad de vehículos para elegir, como coches, bicicletas, camiones, aviones, helicópteros, cohetes y más. </li>
13
- <li>Un mapa grande con diferentes terrenos, obstáculos, rampas, bucles, puentes y trampas. </li>
14
- <li>Un motor de física realista que simula gravedad, fricción, inercia, momento y fuerza. </li>
15
- <li>Un sistema ragdoll que hace que tu figura de palo reaccione a cada impacto y lesión. </li>
16
-
17
- <li>Una opción de repetición que te permite ver tus acrobacias y accidentes desde diferentes ángulos y perspectivas. </li>
18
- <li>Una tabla de clasificación y sistema de logros que rastrea su progreso y rendimiento. </li>
19
- </ul>
20
- <h2>¿Por qué descargar juego Stickman caída Mod APK? </h2>
21
- <p>Game Stickman Falling es un juego gratuito que puedes descargar desde Google Play Store. Sin embargo, si desea disfrutar del juego al máximo, es posible que desee descargar Game Stickman Falling Mod APK lugar. Esta es una versión modificada del juego que te da algunas ventajas y beneficios sobre la versión original. Algunas de estas ventajas son:</p>
22
- <h3>Dinero ilimitado</h3>
23
- <p>Con Game Stickman Falling Mod APK, usted tendrá dinero ilimitado en el juego. Esto significa que puede comprar cualquier vehículo o artículo que desee sin preocuparse por el costo. También puede actualizar sus vehículos y artículos para hacerlos más potentes y duraderos. </p>
24
- <h3>No hay anuncios</h3>
25
- <p>Con Game Stickman Falling Mod APK, no verá ningún anuncio en el juego. Esto significa que puede jugar el juego sin interrupciones ni distracciones. También puede guardar sus datos y duración de la batería al no cargar ningún anuncio. </p>
26
- <h3>Más vehículos y niveles</h3>
27
- <p>Con Game Stickman Falling Mod APK, tendrá acceso a más vehículos y niveles que la versión original. Esto significa que puedes disfrutar de más variedad y diversidad en el juego. También puedes desafiarte con escenarios más difíciles y emocionantes. </p>
28
- <p></p>
29
- <h2>¿Cómo descargar e instalar el juego Stickman Falling Mod APK? </h2>
30
- <p>Si desea descargar e instalar Game Stickman Falling Mod APK, debe seguir estos sencillos pasos:</p>
31
- <h3>Paso 1: Descargar el archivo APK</h3>
32
- <p>El primer paso es descargar el archivo APK de Game Stickman Falling Mod APK de una fuente confiable y confiable. Puede utilizar el siguiente enlace para descargar el archivo directamente a su dispositivo. </p>
33
- <p><a href="">Descargar juego Stickman Falling Mod APK</a></p>
34
- <h3>Paso 2: Habilitar fuentes desconocidas</h3>
35
-
36
- <h3>Paso 3: Instalar el archivo APK</h3>
37
- <p>El tercer paso es instalar el archivo APK que descargó en el paso 1. Para hacer esto, debe localizar el archivo en el almacenamiento del dispositivo y pulsar en él. Luego, debe seguir las instrucciones en la pantalla y otorgar los permisos necesarios. El proceso de instalación tomará unos segundos o minutos dependiendo del dispositivo. </p>
38
- <h3>Paso 4: Disfruta del juego</h3>
39
- <p>El cuarto y último paso es disfrutar del juego. Ahora puede iniciar Game Stickman caída Mod APK desde el cajón de la aplicación o la pantalla de inicio y empezar a jugar. Notarás que tienes dinero ilimitado, sin anuncios, y más vehículos y niveles en el juego. </p>
40
- <h2>Conclusión</h2>
41
- <p>Juego Stickman caída Mod APK es un juego de física divertido y loco que te hará reír en voz alta. Puede controlar una figura de palo que puede montar varios vehículos y realizar acrobacias, trucos y accidentes. También puede personalizar su figura de palo con diferentes trajes, accesorios y armas. El juego tiene efectos realistas de física y ragdoll, lo que significa que tu figura de palo reaccionará a cada impacto, colisión y explosión. También puedes elegir entre diferentes modos, como modo libre, modo desafío o modo multijugador. </p>
42
- <p>Si desea disfrutar del juego al máximo, usted debe descargar Game Stickman Falling Mod APK en lugar de la versión original. Esta es una versión modificada del juego que le da algunas ventajas y beneficios sobre la versión original. Usted tendrá dinero ilimitado, sin anuncios, y más vehículos y niveles en el juego. También puedes descargar e instalar el juego fácilmente siguiendo los sencillos pasos anteriores. </p>
43
- <p>Entonces, ¿qué estás esperando? Descargar Game Stickman Falling Mod APK ahora y divertirse! </p>
44
- <h2>Preguntas frecuentes</h2>
45
- <ul>
46
- <li><b>Q: ¿Es seguro descargar e instalar Game Stickman Falling Mod APK? </b></li>
47
-
48
- <li><b>Q: ¿Necesito rootear mi dispositivo para usar Game Stickman Falling Mod APK? </b></li>
49
- <li>A: No, no es necesario rootear el dispositivo para usar Game Stickman Falling Mod APK. El juego funciona bien tanto en dispositivos arraigados y no arraigados. </li>
50
- <li><b>Q: ¿Puedo jugar juego stickman caída mod APK fuera de línea? </b></li>
51
- <li>A: Sí, puedes jugar Game Stickman Falling Mod APK offline. El juego no requiere una conexión a Internet para funcionar. Sin embargo, algunas características como el modo multijugador pueden no funcionar sin conexión. </li>
52
- <li><b>Q: ¿Puedo actualizar Game Stickman Falling Mod APK? </b></li>
53
- <li>A: Sí, puede actualizar Game Stickman Falling Mod APK si hay una nueva versión disponible. Sin embargo, puedes perder algunas de las características del mod si actualizas el juego desde Google Play Store. Para mantener las características mod, debes actualizar el juego desde la misma fuente donde lo descargaste. </li>
54
- <li><b>Q: ¿Puedo jugar juego stickman caída mod APK con mis amigos? </b></li>
55
- <li>A: Sí, usted puede jugar Game Stickman Falling Mod APK con tus amigos. El juego tiene un modo multijugador que te permite competir con otros jugadores en línea. También puedes compartir tus repeticiones y logros con tus amigos en las redes sociales. </li>
56
- </ul></p> 64aa2da5cf<br />
57
- <br />
58
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Base De La Fuerza Area Inactiva Mod Apk Dinero Ilimitado.md DELETED
@@ -1,48 +0,0 @@
1
-
2
- <h1>Base de la Fuerza Aérea inactiva Mod APK: Construir su propio imperio militar</h1>
3
- <p>¿Sueñas con convertirte en un poderoso líder militar? ¿Quieres construir y gestionar tu propia base aérea? ¿Quieres entrenar y comandar a los mejores pilotos y aviones del mundo? Si respondiste sí a cualquiera de estas preguntas, entonces deberías probar Idle Air Force Base, un divertido y adictivo juego de ocio que te permite crear tu propio imperio militar. Y si desea hacer su juego aún más emocionante y gratificante, usted debe descargar Idle Air Force Base Mod APK, que le da dinero ilimitado y sin anuncios. En este artículo, le diremos todo lo que necesita saber acerca de este increíble apk mod, incluyendo lo que es, por qué debe descargarlo, y cómo instalarlo en su dispositivo. </p>
4
- <h2>¿Qué es la base aérea inactiva? </h2>
5
- <p>Idle Air Force Base es un juego inactivo que simula el funcionamiento y la gestión de una base de la fuerza aérea. Empiezas con una base pequeña y básica, y tu objetivo es expandirla y hacerla la más poderosa y avanzada del mundo. Puedes hacerlo mejorando tus instalaciones, entrenando a tus pilotos, investigando nuevas tecnologías y lanzando misiones para ganar dinero y prestigio. A medida que avances en el juego, desbloquearás nuevos aviones, como cazas, bombarderos, aviones furtivos, drones, helicópteros y más. También enfrentará diferentes desafíos y escenarios, como guerras, desastres, invasiones y emergencias. Tendrás que usar tu estrategia y habilidades para superarlas y proteger tu base. </p>
6
- <h2>base de la fuerza aérea inactiva mod apk dinero ilimitado</h2><br /><p><b><b>Download File</b> &#9889; <a href="https://bltlly.com/2v6MId">https://bltlly.com/2v6MId</a></b></p><br /><br />
7
- <h3>Un divertido y adictivo juego de ocio</h3>
8
-
9
- <h3>Una simulación realista e inmersiva</h3>
10
- <p>Otra gran característica de Idle Air Force Base es que es muy realista y envolvente. El juego simula el funcionamiento real y la gestión de una base de la fuerza aérea, con todos sus aspectos y detalles. Tendrás que lidiar con varios factores, como las condiciones climáticas, el consumo de combustible, los costos de mantenimiento, los riesgos de seguridad, las amenazas enemigas y más. También tendrá que seguir las reglas y regulaciones de los militares, tales como rangos, medallas, honores, protocolos y códigos. El juego también cuenta con modelos de aviones de la vida real, como F-16, F-22, B-2, C-130, helicópteros Apache, drones Predator, y más. Te sentirás como si estuvieras realmente a cargo de una base real de la fuerza aérea. </p>
11
- <h3>Una gestión estratégica y gratificante</h3>
12
- <p>La última pero no menos importante característica de Idle Air Force Base es que es muy estratégico y gratificante. El juego requiere que uses tu cerebro y habilidades para tomar decisiones inteligentes y optimizar tu rendimiento base. Tendrás que equilibrar tu presupuesto, asignar tus recursos, priorizar tus mejoras, planificar tus misiones, elegir tu avión, asignar tus pilotos y más. También tendrás que enfrentarte a diferentes retos y escenarios que pondrán a prueba tus habilidades y creatividad. El juego te recompensa por tus esfuerzos con dinero, puntos de prestigio, logros y trofeos. Puedes usarlos para mejorar tu base, desbloquear nuevas características y posicionarte en la jerarquía militar. También puede comparar su progreso y logros con otros jugadores de todo el mundo a través de la clasificación en línea y el sistema de chat. El juego te ofrece interminables horas de diversión y satisfacción. </p>
13
- <h2>¿Por qué descargar Idle Air Force Base Mod APK? </h2>
14
-
15
- <h3>Dinero ilimitado para actualizar tu base</h3>
16
- <p>Una de las principales características de Idle Air Force Base Mod APK es que le da dinero ilimitado para gastar en su base. El dinero es la moneda principal en el juego, y lo necesitas para mejorar tus instalaciones, entrenar a tus pilotos, investigar nuevas tecnologías y lanzar misiones. Sin embargo, el dinero no es fácil de conseguir en el juego, ya que tienes que esperar a que tus ganancias se acumulen con el tiempo, o ver anuncios para obtener algo de dinero extra. Esto puede ser frustrante y consume mucho tiempo, especialmente si desea progresar más rápido y desbloquear más funciones. Con Idle Air Force Base Mod APK, usted no tiene que preocuparse por el dinero más, ya que tendrá una cantidad infinita de ella a su disposición. Puede actualizar su base tanto como desee, sin limitaciones ni restricciones. También puedes saltarte los anuncios y disfrutar de un juego más fluido e ininterrumpido. </p>
17
- <h3>No hay anuncios para interrumpir tu juego</h3>
18
- <p>Otra característica de Idle Air Force Base Mod APK es que elimina todos los anuncios del juego. Los anuncios son una característica común en la mayoría de los juegos gratuitos, y se utilizan para generar ingresos para los desarrolladores y editores. Sin embargo, los anuncios también pueden ser molestos e intrusivos, ya que pueden aparecer en cualquier momento e interrumpir el juego. También pueden afectar el rendimiento del dispositivo y la duración de la batería, ya que consumen datos y recursos. Con Idle Air Force Base Mod APK, no tienes que lidiar con los anuncios más, ya que son completamente eliminados del juego. Puedes jugar sin distracciones ni interrupciones y disfrutar de un juego más rápido y fluido. </p>
19
- <h3>Fácil instalación y compatibilidad</h3>
20
-
21
- <h2>¿Cómo descargar e instalar Idle Air Force Base Mod APK? </h2>
22
- <p>Si está interesado en descargar e instalar Idle Air Force Base Mod APK en su dispositivo, puede seguir estos sencillos pasos:</p>
23
- <h3>Paso 1: Descargar el archivo apk mod de una fuente de confianza</h3>
24
- <p>El primer paso es descargar el archivo apk mod de una fuente de confianza. Usted puede encontrar muchos sitios web que ofrecen este archivo apk mod de forma gratuita, pero hay que tener cuidado y evitar cualquier enlaces maliciosos o falsos que pueden dañar su dispositivo o robar sus datos. Le recomendamos utilizar este enlace para descargar el archivo apk mod de forma segura. </p>
25
- <p></p>
26
- <h3>Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
27
- <p>El segundo paso es habilitar fuentes desconocidas en la configuración del dispositivo. Esto es necesario porque este archivo apk mod no es de la tienda oficial de Google Play, y por lo tanto su dispositivo puede bloquear su instalación por defecto. Para habilitar fuentes desconocidas, debe ir a la configuración del dispositivo, luego a la configuración de seguridad o privacidad, y luego buscar y activar la opción que permite la instalación desde fuentes desconocidas. </p>
28
- <h3>Paso 3: Instalar el archivo apk mod y lanzar el juego</h3>
29
- <p>El tercer y último paso es instalar el archivo apk mod y lanzar el juego. Para instalar el archivo apk mod, usted tiene que localizar en su dispositivo de almacenamiento o descargas carpeta, a continuación, toque en él y siga las instrucciones en la pantalla. El proceso de instalación debe tomar solo unos segundos o minutos, dependiendo de la velocidad del dispositivo y la memoria. Una vez completada la instalación, puede iniciar el juego tocando en su icono en la pantalla de inicio o en el cajón de la aplicación. Ahora puede disfrutar de Idle Air Force Base Mod APK con dinero ilimitado y sin anuncios. </p>
30
- <h2>Conclusión</h2>
31
- <h4> Idle Air Force Base Mod APK es una necesidad de probar para los aficionados al juego de inactividad</h4>
32
-
33
- <h4>Preguntas frecuentes</h4>
34
- <p>Aquí están algunas de las preguntas más frecuentes sobre Idle Air Force Base Mod APK:</p>
35
- <ul>
36
- <li><b>Q: ¿Es seguro usar Idle Air Force Base Mod APK? </b></li>
37
- <li>A: Sí, Idle Air Force Base Mod APK es seguro de usar, siempre y cuando lo descargue de una fuente de confianza. Hemos probado el archivo apk mod y no encontramos virus o malware en él. Sin embargo, siempre debe tener cuidado al descargar cualquier archivo de Internet y escanearlo con un software antivirus antes de instalarlo en su dispositivo. </li>
38
- <li><b>Q: ¿Es Idle Air Force Base Mod APK legal de usar? </b></li>
39
- <li>A: Idle Air Force Base Mod APK no es legal de usar, ya que viola los términos y condiciones del juego original. Mediante el uso de este mod apk, que está modificando los archivos del juego y el acceso a las características que no están autorizados por los desarrolladores y editores. Esto puede resultar en que su cuenta sea prohibida o suspendida, o que su dispositivo esté en la lista negra. Por lo tanto, no recomendamos ni apoyamos el uso de este mod apk, y no somos responsables de las consecuencias que puedan surgir de su uso. </li>
40
- <li><b>Q: ¿Requiere una conexión a Internet la Base de la Fuerza Aérea Inactiva Mod APK? </b></li>
41
- <li>A: No, Idle Air Force Base Mod APK no requiere una conexión a Internet para jugar. Puede jugar el juego sin conexión sin ningún problema. Sin embargo, es posible que necesite una conexión a Internet para acceder a algunas funciones en línea, como la clasificación y el sistema de chat. </li>
42
- <li><b>Q: ¿Puedo actualizar Idle Air Force Base Mod APK? </b></li>
43
- <li>A: No, no se puede actualizar Idle Air Force Base Mod APK, ya que no es de la tienda oficial de Google Play. Si intenta actualizar el juego desde la fuente original, puede perder todo su progreso y características de mod. Por lo tanto, siempre debe comprobar si hay nuevas versiones de la apk mod de la misma fuente donde lo descargó. </li>
44
- <li><b>Q: ¿Puedo jugar Idle Air Force Base Mod APK con mis amigos? </b></li>
45
-
46
- </ul></p> 64aa2da5cf<br />
47
- <br />
48
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Campeonato De Cricket Mundial 2 Juego De Ordenador.md DELETED
@@ -1,72 +0,0 @@
1
-
2
- <h1>World Cricket Championship 2: Cómo descargar y jugar en PC</h1>
3
- <p>Si eres un amante del cricket, debes haber oído hablar del Campeonato Mundial de Cricket 2, uno de los juegos de cricket más populares y realistas para dispositivos móviles. ¿Pero sabías que también puedes jugar a este increíble juego en tu PC? En este artículo, le mostraremos cómo descargar y jugar World Cricket Championship 2 en PC usando diferentes emuladores. Pero primero, veamos de qué se trata este juego y por qué deberías jugarlo en PC.</p>
4
- <h2>Introducción</h2>
5
- <h3>¿Qué es el Campeonato Mundial de Cricket 2?</h3>
6
- <p>World Cricket Championship 2, o WCC2 para abreviar, es un juego de deportes desarrollado por Nextwave Multimedia. Es la secuela del aclamado juego del Campeonato Mundial de Cricket, que fue lanzado en 2015. WCC2 está diseñado para proporcionar a los amantes del cricket una experiencia de juego inmersiva y emocionante. Cuenta con gráficos avanzados, física realista, dinámica de juego y una variedad de modos y opciones para adaptarse a las preferencias de cada fanático del cricket. </p>
7
- <h2>campeonato de cricket mundial 2 juego de ordenador</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://bltlly.com/2v6Mc8">https://bltlly.com/2v6Mc8</a></b></p><br /><br />
8
- <p>Algunos de los aspectos más destacados de WCC2 son:</p>
9
- <ul>
10
- <li>Más de 150 animaciones de bateo diferentes y 28 acciones de bolos</li>
11
- <li>18 equipos internacionales, 10 equipos nacionales, 42 estadios y más de 11 torneos</li>
12
- <li>Reproductores personalizables, jerseys, banners, logotipos y accesorios</li>
13
- <li>Comentarios profesionales en inglés e hindi</li>
14
- <li>Modo nocturno con tocones led y condiciones climáticas realistas</li>
15
- <li>Modo desafío, Pandillas de modo de cricket, Blitz modo de torneo, y el modo multijugador en línea</li>
16
- <li>Tablas de clasificación, logros, recompensas y perfiles de jugadores</li>
17
- </ul>
18
- <p>Con tantas características y opciones, WCC2 es sin duda uno de los mejores juegos de cricket disponibles para dispositivos móviles. Pero ¿qué pasa si quieres jugar en una pantalla más grande con mejores controles? Ahí es donde jugar WCC2 en PC es muy útil. </p>
19
- <h3>¿Por qué jugar World Cricket Championship 2 en PC? </h3>
20
-
21
- <ul>
22
- <li>Tamaño de pantalla más grande: Puedes disfrutar de los impresionantes gráficos y animaciones de WCC2 en una pantalla más grande, lo que mejora la calidad visual y la inmersión del juego. </li>
23
- <li>Mejores controles: Puedes usar tu ratón, teclado o gamepad para controlar a tus jugadores y ejecutar disparos con más precisión y precisión. También puede personalizar sus controles para adaptarse a sus preferencias. </li>
24
- <li>Rendimiento más rápido: Puede ejecutar WCC2 sin problemas en su PC sin ningún retraso o problemas técnicos. También puede ajustar la configuración de gráficos para optimizar el rendimiento del juego. </li>
25
- <li>Más espacio de almacenamiento: Puede ahorrar más datos y progreso de WCC2 en su PC sin preocuparse por quedarse sin espacio de almacenamiento o perder sus datos. </li>
26
- <li>No hay problemas de drenaje de la batería o sobrecalentamiento: Puede jugar WCC2 durante horas en su PC sin drenar la batería o sobrecalentar el dispositivo. </li>
27
- </ul>
28
- <p>Como puedes ver, jugar WCC2 en PC tiene muchos beneficios que lo hacen una experiencia de juego más agradable y satisfactoria. Pero, ¿cómo se puede jugar WCC2 en PC? Hay tres métodos que se pueden utilizar para descargar y jugar WCC2 en PC utilizando diferentes emuladores. Veamos qué son y cómo funcionan. </p>
29
- <h2>Cómo descargar World Cricket Championship 2 en PC</h2>
30
- <p>Un emulador es un software que le permite ejecutar aplicaciones y juegos para Android en su PC. Hay muchos emuladores disponibles para PC, pero no todos son compatibles con WCC2. Aquí hay tres de los mejores emuladores que puedes usar para descargar y jugar WCC2 en PC:</p>
31
- <h3>Método 1: Usando el emulador de BlueStacks</h3>
32
- <p>BlueStacks es uno de los emuladores más populares y ampliamente utilizados para PC. Tiene una interfaz fácil de usar, una gran tienda de aplicaciones y una alta compatibilidad con la mayoría de las aplicaciones y juegos de Android. Estos son los pasos para descargar y jugar WCC2 en PC usando BlueStacks:</p>
33
- <h4>Paso 1: Descargar e instalar BlueStacks en su PC</h4>
34
-
35
- <h4>Paso 2: Lanza BlueStacks y busca el Campeonato Mundial de Cricket 2 en la tienda de aplicaciones</h4>
36
- <p>Después de instalar BlueStacks, iniciarlo e iniciar sesión con su cuenta de Google. Luego, vaya a la tienda de aplicaciones y busque el Campeonato Mundial de Cricket 2 en la barra de búsqueda. Verá el icono del juego en los resultados. Haga clic en él para ir a la página del juego. </p>
37
- <p></p>
38
- <h4>Paso 3: Instalar World Cricket Championship 2 y disfrutar jugando en PC</h4>
39
- <p>En la página del juego, haga clic en el botón de instalación para comenzar a descargar e instalar WCC2 en su PC. El proceso puede tomar algún tiempo dependiendo de su velocidad de Internet y el rendimiento del PC. Una vez que la instalación se hace, puede iniciar el juego desde la pantalla de inicio o el cajón de aplicaciones de BlueStacks. Ahora puedes disfrutar jugando WCC2 en PC usando BlueStacks.</p>
40
- <h3>Método 2: Usando emulador LDPlayer</h3>
41
- <p>LDPlayer es otro gran emulador para PC que está diseñado para juegos. Tiene un rendimiento suave, una alta compatibilidad con la mayoría de los juegos de Android, y un montón de características y ajustes para mejorar su experiencia de juego. Estos son los pasos para descargar y jugar WCC2 en PC usando LDPlayer:</p>
42
- <h4>Paso 1: Descargar e instalar LDPlayer en su PC</h4>
43
- <p>Puede descargar LDPlayer desde su sitio web oficial <a href="">aquí</a>. El proceso de instalación es similar a BlueStacks. Solo tienes que seguir las instrucciones en la pantalla y esperar a que la instalación se complete. </p>
44
- <h4>Paso 2: Lanza LDPlayer y busca el Campeonato Mundial de Cricket 2 en el centro del juego</h4>
45
- <p>Después de instalar LDPlayer, inicie e inicie sesión con su cuenta de Google. Luego, ve al centro del juego y busca el Campeonato Mundial de Cricket 2 en la barra de búsqueda. Verás el icono del juego en los resultados. Haga clic en él para ir a la página del juego. </p>
46
- <h4>Paso 3: Instalar World Cricket Championship 2 y disfrutar jugando en PC</h4>
47
-
48
- <h3>Método 3: Usando el emulador de GameLoop</h3>
49
- <p>GameLoop es otro excelente emulador para PC que está especialmente diseñado para juegos de Tencent. Tiene un rendimiento rápido, una alta compatibilidad con la mayoría de los juegos de Tencent y muchas características y configuraciones para optimizar su experiencia de juego. Estos son los pasos para descargar y jugar WCC2 en PC usando GameLoop:</p>
50
- <h4>Paso 1: Descarga e instala GameLoop en tu PC</h4>
51
- <p>Puede descargar GameLoop desde su sitio web oficial <a href=">here</a>. El proceso de instalación es similar a BlueStacks y LDPlayer. Solo tienes que seguir las instrucciones en la pantalla y esperar a que la instalación se complete. </p>
52
- <h4>Paso 2: Lanza GameLoop y busca el Campeonato Mundial de Cricket 2 en la biblioteca de juegos</h4>
53
- <p>Después de instalar GameLoop, inicie e inicie sesión con su cuenta de Google. Luego, vaya a la biblioteca de juegos y busque el Campeonato Mundial de Cricket 2 en la barra de búsqueda. Verá el icono del juego en los resultados. Haga clic en él para ir a la página del juego. </p>
54
- <h4>Paso 3: Instalar World Cricket Championship 2 y disfrutar jugando en PC</h4>
55
- <p>En la página del juego, haga clic en el botón de instalación para comenzar a descargar e instalar WCC2 en su PC. El proceso puede tomar algún tiempo dependiendo de su velocidad de Internet y el rendimiento del PC. Una vez finalizada la instalación, puede iniciar el juego desde la pantalla de inicio o el cajón de aplicaciones de GameLoop. Ahora puedes disfrutar jugando WCC2 en PC usando GameLoop.</p>
56
- <h2>Conclusión</h2>
57
-
58
- <p>Aquí hay algunas preguntas frecuentes que puede tener sobre WCC2 y reproducirlo en PC:</p>
59
- <ul>
60
- <li><b>Q: ¿Es el Campeonato Mundial de Cricket 2 gratis para jugar? </b></li>
61
- <li>A: Sí, World Cricket Championship 2 es gratis para descargar y jugar en dispositivos móviles y PC. Sin embargo, puede contener algunas compras en la aplicación y anuncios que puede desactivar o eliminar pagando una pequeña tarifa. </li>
62
- <li><b>Q: ¿Puedo jugar el Campeonato Mundial de Cricket 2 sin conexión? </b></li>
63
- <li>A: Sí, puedes jugar el Campeonato Mundial de Cricket 2 sin conexión a Internet. Sin embargo, algunas características y modos pueden requerir una conexión a Internet para funcionar correctamente, como el modo multijugador en línea, tablas de clasificación, logros, recompensas y perfiles de jugadores. </li>
64
- <li><b>Q: ¿Puedo jugar el Campeonato Mundial de Cricket 2 con mis amigos? </b></li>
65
- <li>A: Sí, puedes jugar el Campeonato Mundial de Cricket 2 con tus amigos en línea o localmente. Puedes unirte o crear una pandilla de jugadores de cricket y competir con otras pandillas en el modo Pandillas de Cricket. También puedes invitar o retar a tus amigos a jugar contigo en el modo multijugador online. </li>
66
- <li><b>Q: ¿Cómo puedo actualizar World Cricket Championship 2 en PC? </b></li>
67
- <li>A: Puede actualizar World Cricket Championship 2 en PC siguiendo los mismos pasos que descargarlo e instalarlo. Solo tienes que ir a la tienda de aplicaciones o al centro de juegos de tu emulador y comprobar si hay actualizaciones disponibles para WCC2. Luego, haga clic en el botón de actualización para comenzar a descargar e instalar la última versión de WCC2 en su PC.</li>
68
- <li><b>Q: ¿Cómo puedo contactar a los desarrolladores de World Cricket Championship 2?</b></li>
69
- <li>A: Puede ponerse en contacto con los desarrolladores de World Cricket Championship 2 visitando su sitio web oficial <a href="">aquí</a>. También puedes seguirlos en sus plataformas de redes sociales, como Facebook, Twitter, Instagram, YouTube y Discord. También puede enviarles un correo electrónico a [email protected]. </li>
70
- </ul></p> 64aa2da5cf<br />
71
- <br />
72
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Choque Royale Mod Apk Nuevas Tarjetas.md DELETED
@@ -1,121 +0,0 @@
1
- <br />
2
- <h1>Choque Royale Mod APK nuevas tarjetas: Todo lo que necesita saber</h1>
3
- <p>¿Eres fan de Clash Royale, el popular juego de estrategia en tiempo real de Supercell? ¿Quieres darle vida a tu juego con algunas cartas nuevas y emocionantes que no están disponibles en la versión oficial del juego? Si es así, entonces usted podría estar interesado en probar Clash Royale Mod APK, una versión modificada del juego que le permite acceder a nuevas tarjetas, recursos ilimitados, y otras características. En este artículo, le diremos todo lo que necesita saber sobre Clash Royale Mod APK, incluyendo lo que es, ¿cuáles son las nuevas cartas, cómo descargarlo e instalarlo, y cómo jugar en línea con otros jugadores. ¡Vamos a empezar! </p>
4
- <h2>¿Qué es Clash Royale Mod APK? </h2>
5
- <p>Antes de sumergirnos en los detalles de las nuevas cartas, primero vamos a entender lo que es Clash Royale Mod APK y cómo se diferencia del juego original. </p>
6
- <h2>choque royale mod apk nuevas tarjetas</h2><br /><p><b><b>DOWNLOAD</b> &#10027; <a href="https://bltlly.com/2v6LzO">https://bltlly.com/2v6LzO</a></b></p><br /><br />
7
- <h3>Una breve introducción a Clash Royale y su mecánica de juego</h3>
8
- <p>Clash Royale es un juego de torre de defensa, en el que se puede atacar a la torre del enemigo mediante el uso de personajes que se pueden recoger y subir de nivel (la mecánica de tarjetas de colección). Un jugador gana un juego si destruyó toda la torre del enemigo o destruyó más torre que el enemigo. </p>
9
- <p>El juego cuenta con dos conjuntos de torres frente a frente en un campo de batalla de una sola pantalla. Los jugadores usan un elixir para desplegar tropas, edificios y hechizos desde una baraja de ocho cartas (extraídas de una colección de más de 90 cartas) en cualquier lugar de su territorio en el campo. Más cartas se recogen desbloqueando cofres ganados en la batalla o comprados en la tienda, que a su vez desbloqueará nuevas cartas que los jugadores pueden agregar a sus mazos y/ o subir de nivel las cartas que ya tienen. Cada carta requiere una cierta cantidad de elixir para desplegarse, pero el elixir de los jugadores se regenera con el tiempo. El juego también tiene varios modos de juego, como batallas de escalera, torneos, guerras de clanes, eventos especiales y más. </p>
10
- <h3>Los beneficios y riesgos de usar una versión modificada del juego</h3>
11
-
12
- <ul>
13
- <li>Nuevas cartas que no están disponibles en el juego original</li>
14
- <li>Recursos ilimitados como oro, gemas, elixir y cofres</li>
15
- <li>Posibilidad de jugar online con otros jugadores modded o unmodded</li>
16
- <li>Posibilidad de personalizar su cubierta, arena y otros ajustes</li>
17
- </ul>
18
- <p>Estas características pueden hacer el juego más divertido y emocionante para algunos jugadores que quieren probar nuevas estrategias, experimentar con diferentes combinaciones, o simplemente disfrutar de más opciones y libertad. Sin embargo, también hay algunos riesgos involucrados en el uso de una versión modificada del juego. Algunos de estos riesgos incluyen:</p>
19
- <ul>
20
- <li>Potencial <p>Malware o virus potenciales que pueden dañar tu dispositivo o robar tu información personal</li>
21
- <li>Posibles prohibiciones o suspensiones desde el servidor oficial del juego o la cuenta de Supercell</li>
22
- <li> Ventaja injusta o trampa que puede arruinar el equilibrio del juego y la diversión para otros jugadores</li>
23
- <li>Falta de actualizaciones o soporte del desarrollador original o del modder</li>
24
- </ul>
25
- <p>Por lo tanto, si decide usar una versión modificada del juego, debe hacerlo bajo su propio riesgo y responsabilidad. También debe respetar las reglas y los derechos del desarrollador original y otros jugadores, y no utilizar la versión modificada para cualquier propósito ilegal o poco ético. </p>
26
- <h2>¿Cuáles son las nuevas tarjetas en Clash Royale Mod APK? </h2>
27
- <p>Una de las principales atracciones de Clash Royale Mod APK es las nuevas tarjetas que no están disponibles en el juego original. Estas tarjetas son hechas por fans, inspiradas en otros juegos o medios, o basadas en conceptos no utilizados del juego oficial. Pueden añadir más variedad, creatividad y diversión a tu juego. Aquí hay una tabla con algunas de las nuevas cartas, sus estadísticas y sus habilidades:</p>
28
- <tabla>
29
- <tr>
30
- <th>Nombre</th>
31
- <th>Tipo</th>
32
- <th>Rareza</th>
33
- <th>Costo del elixir</th>
34
- <th>Hitpoints</th>
35
- <th>Daño</th>
36
- <th>Capacidad</th>
37
- </tr>
38
- <tr>
39
- <td>Caballero dragón</td>
40
- <td>Tropa</td>
41
- <td>Épica</td>
42
- <td>5</td>
43
- <td>1200</td>
44
- <td>200</td>
45
-
46
- </tr>
47
- <tr>
48
- <td>Mega Horda de Esbirros</td>
49
- <td>Tropa</td>
50
- <td>Raro</td>
51
- <td>7</td>
52
- <td>300 (cada uno)</td>
53
- <td>150 (cada uno)</td>
54
- <td>Un enjambre de seis mega esbirros que pueden infligir daño masivo a objetivos aéreos y terrestres. </td>
55
- </tr>
56
- <tr>
57
- <td>Lanzador de barriles de duende</td>
58
- <td>Construcción</td>
59
- <td>Común</td>
60
- <td>4</td>
61
- <td>800</td>
62
- <td>N/A</td>
63
- <td>Un lanzador estacionario que dispara cañones goblin en las torres del enemigo cada 5 segundos. </td>
64
- </tr>
65
- <tr>
66
- <td>Imagen de espejo</td>
67
- <td>Hechizo</td>
68
-
69
- conexión antes de descargarlo. </p ><h3>Los pasos para instalar el archivo modded en su dispositivo</h3 ><p>El siguiente paso es instalar el archivo modded en su dispositivo. <p>El siguiente paso es instalar el archivo modded en su dispositivo. Para ello, deberá habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Esta es una función de seguridad que impide la instalación de aplicaciones que no son de la tienda oficial de aplicaciones o verificadas por el fabricante del dispositivo. Sin embargo, ya que está instalando un archivo modded, necesitará omitir esta función temporalmente. Así es como puede hacerlo:</p>
70
- <p></p>
71
- <ul>
72
- <li>Vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. </li>
73
- <li> Encontrar la opción que dice "Fuentes desconocidas" o "Permitir la instalación de aplicaciones de fuentes desconocidas" y cambiarlo por. </li>
74
- <li>Puede ver un mensaje de advertencia que le informa sobre los riesgos de instalar aplicaciones de fuentes desconocidas. Toque en "OK" o "Permitir" para continuar. </li>
75
- </ul>
76
- <p>Una vez que haya habilitado la instalación de aplicaciones desde fuentes desconocidas, puede proceder a instalar el archivo modded. Así es como puede hacerlo:</p>
77
- <ul>
78
- <li>Localice el archivo modded que ha descargado en su dispositivo. Debería estar en su carpeta de descargas o en la barra de notificaciones. </li>
79
- <li>Toque en el archivo y siga las instrucciones en la pantalla para instalarlo. </li>
80
- <li>Puede ver un mensaje que le pide que conceda permisos a la aplicación. Toque en "Permitir" o "Aceptar" para otorgarlos. </li>
81
- <li>Espere a que termine el proceso de instalación. Puede tardar unos minutos dependiendo de su dispositivo y la velocidad de Internet. </li>
82
- <li>Una vez que se realiza la instalación, verá un mensaje que dice "App instalado" o "Clash Royale Mod APK instalado". Toca "Abrir" o "Iniciar" para iniciar la aplicación. </li>
83
- </ul>
84
- <h3>Las precauciones a tomar antes y después de instalar el archivo modded</h3>
85
-
86
- <ul>
87
- <li>Antes de instalar el archivo modded, asegúrese de haber hecho una copia de seguridad de sus datos y el progreso del juego original. Puedes hacer esto vinculando tu cuenta de juego a un ID de Supercell, Google Play Games o una cuenta de Facebook. De esta manera, puede restaurar sus datos y el progreso si algo sale mal o si desea volver al juego original. </li>
88
- <li>Después de instalar el archivo modded, asegúrese de desactivar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Esto es para evitar que cualquier aplicación no deseada o maliciosa se instale en su dispositivo sin su conocimiento o consentimiento. Puede hacer esto siguiendo los mismos pasos de arriba, pero cambiando la opción en lugar de on. </li>
89
- <li>Después de instalar el archivo modded, asegúrese de no actualizar la aplicación desde la tienda de aplicaciones oficial o cualquier otra fuente. Esto se debe a que la actualización de la aplicación puede sobrescribir o eliminar el archivo modded y sus características, y causar errores o fallos. Si ves una notificación que te pide que actualices la aplicación, ignórala o cancélala. </li>
90
- <li>Después de instalar el archivo modded, asegúrese de no iniciar sesión con su ID de Supercell, Google Play Games o cuenta de Facebook. Esto se debe a que el inicio de sesión con estas cuentas puede vincular sus datos de juego y el progreso a la versión modificada, y causar problemas con su cuenta de juego original. También puede correr el riesgo de ser prohibido o suspendido por Supercell para el uso de una versión modificada de su juego. En su lugar, usa una cuenta de invitado o crea una nueva cuenta para jugar con la versión modificada. </li>
91
- </ul> <h2>Cómo jugar Clash Royale Mod APK en línea con otros jugadores? </h2>
92
-
93
- <h3>Las opciones para jugar online con otros jugadores modded o unmodded</h3>
94
- <p>Hay dos opciones principales para jugar en línea con otros jugadores usando Clash Royale Mod APK: servidores privados y servidores públicos. </p>
95
- <p>Los servidores privados son servidores alojados por el modder o un proveedor de terceros, y solo son accesibles por los jugadores que tienen la misma versión modificada del juego. Estos servidores suelen ser libres de unirse, pero pueden tener capacidad, estabilidad o características limitadas. También pueden requerir una contraseña o una invitación para unirse. Los servidores privados son ideales para jugar con tus amigos u otros jugadores que comparten tu interés en la versión modificada del juego. Puede encontrar servidores privados buscando en línea, pidiendo el modder o uniéndose a una comunidad de jugadores modded. </p>
96
- <p>Los servidores públicos son servidores alojados por Supercell, el desarrollador original del juego, y son accesibles por todos los jugadores que tienen la versión oficial o cualquier versión modificada del juego. Estos servidores suelen ser más fiables, seguros y actualizados que los servidores privados, pero también tienen más restricciones y riesgos. Los servidores públicos son ideales para jugar con jugadores al azar o para probar tus habilidades contra jugadores sin odded. Sin embargo, debes tener cuidado de no usar ninguna característica que sea exclusiva de la versión modificada del juego, como tarjetas nuevas, recursos ilimitados o configuraciones personalizadas. Esto se debe a que estas características pueden no funcionar correctamente en los servidores públicos, y también pueden ser detectados por el sistema anti-cheat de Supercell, que puede resultar en una prohibición o suspensión del juego. </p>
97
- <h3>Los consejos y trucos para ganar más batallas y trofeos con las nuevas cartas</h3>
98
- <p>Ya sea que juegues en servidores privados o públicos, querrás ganar más batallas y trofeos con las nuevas cartas a las que tienes acceso en Clash Royale Mod APK. Aquí hay algunos consejos y trucos para ayudarte a hacerlo:</p>
99
- <ul>
100
-
101
- <li>Construir una cubierta equilibrada y versátil que puede contrarrestar diferentes tipos de enemigos y situaciones. Puedes hacer esto incluyendo cartas que pueden atacar o defenderse contra objetivos aéreos y terrestres, hacer daño de un solo objetivo o salpicar, apoyar o distraer a otras tropas, etc. </li>
102
- <li>Usa tu elixir sabiamente y eficientemente. Puedes hacer esto desplegando tus tarjetas en el momento y lugar correctos, evitando sobrecomisionar o subcomunicar tu elixir, ciclando tus tarjetas lo suficientemente rápido como para obtener las que necesitas y administrando tu ventaja o desventaja de elixir. </li>
103
- <li>Usa tus nuevas tarjetas de forma creativa y estratégica. Puedes hacer esto sorprendiendo a tu oponente con movimientos inesperados, explotando sus debilidades o errores, adaptándose a sus estrategias o contadores, y creando combos o empujes que son difíciles de detener. </li>
104
- <li>Diviértete y disfruta del juego. Puedes hacer esto probando diferentes mazos y modos, desafiándote con oponentes o objetivos más difíciles, uniéndote a un clan o una comunidad de jugadores modded, y compartiendo tus comentarios o sugerencias con el modder. </li>
105
- </ul>
106
- <h2>Conclusión</h2>
107
- <p>En conclusión, Clash Royale Mod APK es una versión modificada del popular juego de estrategia en tiempo real de Supercell que le permite acceder a nuevas tarjetas, recursos ilimitados y otras características que no están disponibles en la versión oficial del juego. Puede hacer el juego más divertido y emocionante para algunos jugadores que quieren probar nuevas estrategias, experimentar con diferentes combinaciones o simplemente disfrutar de más opciones y libertad. Sin embargo, también tiene algunos riesgos y limitaciones que debe tener en cuenta antes de descargarlo e instalarlo en su dispositivo. También debe respetar las reglas y los derechos del desarrollador original y otros jugadores, y no utilizar la versión modificada para cualquier propósito ilegal o poco ético. </p>
108
-
109
- <h2>Preguntas frecuentes</h2>
110
- <p>Aquí hay algunas preguntas frecuentes y respuestas relacionadas con Clash Royale Mod APK:</p> <h3>Q: Es Clash Royale Mod APK seguro de usar? </h3>
111
- <p>A: Clash Royale Mod APK no es un producto oficial de Supercell, y por lo tanto no está respaldado o apoyado por ellos. Tampoco es verificado o probado por ninguna autoridad o plataforma de renombre. Por lo tanto, no hay garantía de que sea seguro de usar, y puede contener malware, virus o archivos falsos que pueden dañar su dispositivo o robar su información personal. Debe usarlo bajo su propio riesgo y responsabilidad, y solo descargarlo de una fuente confiable. </p>
112
- <h3>Q: ¿Puedo jugar Clash Royale Mod APK offline? </h3>
113
- <p>A: No, no se puede jugar Clash Royale Mod APK fuera de línea. El juego requiere una conexión a Internet para acceder a los servidores, ya sean privados o públicos. Si intenta jugar el juego sin conexión, verá un mensaje de error que dice "No hay conexión a Internet" o "Error de conexión". Tendrás que conectarte a una red Wi-Fi o de datos móvil estable y segura para jugar. </p>
114
- <h3>Q: ¿Puedo usar mi cuenta de juego original para jugar Clash Royale Mod APK? </h3>
115
- <p>A: No, no puede utilizar su cuenta de juego original para jugar Clash Royale Mod APK. Esto se debe a que la versión modificada del juego tiene diferentes características y ajustes que la versión original, y no son compatibles entre sí. Si intentas iniciar sesión con tu cuenta de juego original, puedes encontrar errores, fallos o prohibiciones. Deberías usar una cuenta de invitado o crear una nueva cuenta para jugar con la versión modificada del juego. </p>
116
- <h3>Q: ¿Cómo puedo actualizar Clash Royale Mod APK? </h3>
117
-
118
- <h3>Q: ¿Dónde puedo encontrar más información o soporte para Clash Royale Mod APK? </h3>
119
- <p>A: Puede encontrar más información o soporte para Clash Royale Mod APK visitando el sitio web del modder o la fuente que proporciona el archivo modded. También puede unirse a una comunidad de jugadores modificados en plataformas de redes sociales, foros o grupos de chat. Allí, puedes hacer preguntas, compartir comentarios o reportar problemas relacionados con la versión modificada del juego. Sin embargo, debe tener cuidado de no confiar en ninguna información o soporte que no sea de una fuente confiable o confiable. </p> 64aa2da5cf<br />
120
- <br />
121
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/httpchecksum.py DELETED
@@ -1,483 +0,0 @@
1
- # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
-
14
- """ The interfaces in this module are not intended for public use.
15
-
16
- This module defines interfaces for applying checksums to HTTP requests within
17
- the context of botocore. This involves both resolving the checksum to be used
18
- based on client configuration and environment, as well as application of the
19
- checksum to the request.
20
- """
21
- import base64
22
- import io
23
- import logging
24
- from binascii import crc32
25
- from hashlib import sha1, sha256
26
-
27
- from botocore.compat import HAS_CRT
28
- from botocore.exceptions import (
29
- AwsChunkedWrapperError,
30
- FlexibleChecksumError,
31
- MissingDependencyException,
32
- )
33
- from botocore.response import StreamingBody
34
- from botocore.utils import (
35
- conditionally_calculate_md5,
36
- determine_content_length,
37
- )
38
-
39
- if HAS_CRT:
40
- from awscrt import checksums as crt_checksums
41
- else:
42
- crt_checksums = None
43
-
44
- logger = logging.getLogger(__name__)
45
-
46
-
47
- class BaseChecksum:
48
- _CHUNK_SIZE = 1024 * 1024
49
-
50
- def update(self, chunk):
51
- pass
52
-
53
- def digest(self):
54
- pass
55
-
56
- def b64digest(self):
57
- bs = self.digest()
58
- return base64.b64encode(bs).decode("ascii")
59
-
60
- def _handle_fileobj(self, fileobj):
61
- start_position = fileobj.tell()
62
- for chunk in iter(lambda: fileobj.read(self._CHUNK_SIZE), b""):
63
- self.update(chunk)
64
- fileobj.seek(start_position)
65
-
66
- def handle(self, body):
67
- if isinstance(body, (bytes, bytearray)):
68
- self.update(body)
69
- else:
70
- self._handle_fileobj(body)
71
- return self.b64digest()
72
-
73
-
74
- class Crc32Checksum(BaseChecksum):
75
- def __init__(self):
76
- self._int_crc32 = 0
77
-
78
- def update(self, chunk):
79
- self._int_crc32 = crc32(chunk, self._int_crc32) & 0xFFFFFFFF
80
-
81
- def digest(self):
82
- return self._int_crc32.to_bytes(4, byteorder="big")
83
-
84
-
85
- class CrtCrc32Checksum(BaseChecksum):
86
- # Note: This class is only used if the CRT is available
87
- def __init__(self):
88
- self._int_crc32 = 0
89
-
90
- def update(self, chunk):
91
- new_checksum = crt_checksums.crc32(chunk, self._int_crc32)
92
- self._int_crc32 = new_checksum & 0xFFFFFFFF
93
-
94
- def digest(self):
95
- return self._int_crc32.to_bytes(4, byteorder="big")
96
-
97
-
98
- class CrtCrc32cChecksum(BaseChecksum):
99
- # Note: This class is only used if the CRT is available
100
- def __init__(self):
101
- self._int_crc32c = 0
102
-
103
- def update(self, chunk):
104
- new_checksum = crt_checksums.crc32c(chunk, self._int_crc32c)
105
- self._int_crc32c = new_checksum & 0xFFFFFFFF
106
-
107
- def digest(self):
108
- return self._int_crc32c.to_bytes(4, byteorder="big")
109
-
110
-
111
- class Sha1Checksum(BaseChecksum):
112
- def __init__(self):
113
- self._checksum = sha1()
114
-
115
- def update(self, chunk):
116
- self._checksum.update(chunk)
117
-
118
- def digest(self):
119
- return self._checksum.digest()
120
-
121
-
122
- class Sha256Checksum(BaseChecksum):
123
- def __init__(self):
124
- self._checksum = sha256()
125
-
126
- def update(self, chunk):
127
- self._checksum.update(chunk)
128
-
129
- def digest(self):
130
- return self._checksum.digest()
131
-
132
-
133
- class AwsChunkedWrapper:
134
- _DEFAULT_CHUNK_SIZE = 1024 * 1024
135
-
136
- def __init__(
137
- self,
138
- raw,
139
- checksum_cls=None,
140
- checksum_name="x-amz-checksum",
141
- chunk_size=None,
142
- ):
143
- self._raw = raw
144
- self._checksum_name = checksum_name
145
- self._checksum_cls = checksum_cls
146
- self._reset()
147
-
148
- if chunk_size is None:
149
- chunk_size = self._DEFAULT_CHUNK_SIZE
150
- self._chunk_size = chunk_size
151
-
152
- def _reset(self):
153
- self._remaining = b""
154
- self._complete = False
155
- self._checksum = None
156
- if self._checksum_cls:
157
- self._checksum = self._checksum_cls()
158
-
159
- def seek(self, offset, whence=0):
160
- if offset != 0 or whence != 0:
161
- raise AwsChunkedWrapperError(
162
- error_msg="Can only seek to start of stream"
163
- )
164
- self._reset()
165
- self._raw.seek(0)
166
-
167
- def read(self, size=None):
168
- # Normalize "read all" size values to None
169
- if size is not None and size <= 0:
170
- size = None
171
-
172
- # If the underlying body is done and we have nothing left then
173
- # end the stream
174
- if self._complete and not self._remaining:
175
- return b""
176
-
177
- # While we're not done and want more bytes
178
- want_more_bytes = size is None or size > len(self._remaining)
179
- while not self._complete and want_more_bytes:
180
- self._remaining += self._make_chunk()
181
- want_more_bytes = size is None or size > len(self._remaining)
182
-
183
- # If size was None, we want to return everything
184
- if size is None:
185
- size = len(self._remaining)
186
-
187
- # Return a chunk up to the size asked for
188
- to_return = self._remaining[:size]
189
- self._remaining = self._remaining[size:]
190
- return to_return
191
-
192
- def _make_chunk(self):
193
- # NOTE: Chunk size is not deterministic as read could return less. This
194
- # means we cannot know the content length of the encoded aws-chunked
195
- # stream ahead of time without ensuring a consistent chunk size
196
- raw_chunk = self._raw.read(self._chunk_size)
197
- hex_len = hex(len(raw_chunk))[2:].encode("ascii")
198
- self._complete = not raw_chunk
199
-
200
- if self._checksum:
201
- self._checksum.update(raw_chunk)
202
-
203
- if self._checksum and self._complete:
204
- name = self._checksum_name.encode("ascii")
205
- checksum = self._checksum.b64digest().encode("ascii")
206
- return b"0\r\n%s:%s\r\n\r\n" % (name, checksum)
207
-
208
- return b"%s\r\n%s\r\n" % (hex_len, raw_chunk)
209
-
210
- def __iter__(self):
211
- while not self._complete:
212
- yield self._make_chunk()
213
-
214
-
215
- class StreamingChecksumBody(StreamingBody):
216
- def __init__(self, raw_stream, content_length, checksum, expected):
217
- super().__init__(raw_stream, content_length)
218
- self._checksum = checksum
219
- self._expected = expected
220
-
221
- def read(self, amt=None):
222
- chunk = super().read(amt=amt)
223
- self._checksum.update(chunk)
224
- if amt is None or (not chunk and amt > 0):
225
- self._validate_checksum()
226
- return chunk
227
-
228
- def _validate_checksum(self):
229
- if self._checksum.digest() != base64.b64decode(self._expected):
230
- error_msg = (
231
- f"Expected checksum {self._expected} did not match calculated "
232
- f"checksum: {self._checksum.b64digest()}"
233
- )
234
- raise FlexibleChecksumError(error_msg=error_msg)
235
-
236
-
237
- def resolve_checksum_context(request, operation_model, params):
238
- resolve_request_checksum_algorithm(request, operation_model, params)
239
- resolve_response_checksum_algorithms(request, operation_model, params)
240
-
241
-
242
- def resolve_request_checksum_algorithm(
243
- request,
244
- operation_model,
245
- params,
246
- supported_algorithms=None,
247
- ):
248
- http_checksum = operation_model.http_checksum
249
- algorithm_member = http_checksum.get("requestAlgorithmMember")
250
- if algorithm_member and algorithm_member in params:
251
- # If the client has opted into using flexible checksums and the
252
- # request supports it, use that instead of checksum required
253
- if supported_algorithms is None:
254
- supported_algorithms = _SUPPORTED_CHECKSUM_ALGORITHMS
255
-
256
- algorithm_name = params[algorithm_member].lower()
257
- if algorithm_name not in supported_algorithms:
258
- if not HAS_CRT and algorithm_name in _CRT_CHECKSUM_ALGORITHMS:
259
- raise MissingDependencyException(
260
- msg=(
261
- f"Using {algorithm_name.upper()} requires an "
262
- "additional dependency. You will need to pip install "
263
- "botocore[crt] before proceeding."
264
- )
265
- )
266
- raise FlexibleChecksumError(
267
- error_msg="Unsupported checksum algorithm: %s" % algorithm_name
268
- )
269
-
270
- location_type = "header"
271
- if operation_model.has_streaming_input:
272
- # Operations with streaming input must support trailers.
273
- if request["url"].startswith("https:"):
274
- # We only support unsigned trailer checksums currently. As this
275
- # disables payload signing we'll only use trailers over TLS.
276
- location_type = "trailer"
277
-
278
- algorithm = {
279
- "algorithm": algorithm_name,
280
- "in": location_type,
281
- "name": "x-amz-checksum-%s" % algorithm_name,
282
- }
283
-
284
- if algorithm["name"] in request["headers"]:
285
- # If the header is already set by the customer, skip calculation
286
- return
287
-
288
- checksum_context = request["context"].get("checksum", {})
289
- checksum_context["request_algorithm"] = algorithm
290
- request["context"]["checksum"] = checksum_context
291
- elif operation_model.http_checksum_required or http_checksum.get(
292
- "requestChecksumRequired"
293
- ):
294
- # Otherwise apply the old http checksum behavior via Content-MD5
295
- checksum_context = request["context"].get("checksum", {})
296
- checksum_context["request_algorithm"] = "conditional-md5"
297
- request["context"]["checksum"] = checksum_context
298
-
299
-
300
- def apply_request_checksum(request):
301
- checksum_context = request.get("context", {}).get("checksum", {})
302
- algorithm = checksum_context.get("request_algorithm")
303
-
304
- if not algorithm:
305
- return
306
-
307
- if algorithm == "conditional-md5":
308
- # Special case to handle the http checksum required trait
309
- conditionally_calculate_md5(request)
310
- elif algorithm["in"] == "header":
311
- _apply_request_header_checksum(request)
312
- elif algorithm["in"] == "trailer":
313
- _apply_request_trailer_checksum(request)
314
- else:
315
- raise FlexibleChecksumError(
316
- error_msg="Unknown checksum variant: %s" % algorithm["in"]
317
- )
318
-
319
-
320
- def _apply_request_header_checksum(request):
321
- checksum_context = request.get("context", {}).get("checksum", {})
322
- algorithm = checksum_context.get("request_algorithm")
323
- location_name = algorithm["name"]
324
- if location_name in request["headers"]:
325
- # If the header is already set by the customer, skip calculation
326
- return
327
- checksum_cls = _CHECKSUM_CLS.get(algorithm["algorithm"])
328
- digest = checksum_cls().handle(request["body"])
329
- request["headers"][location_name] = digest
330
-
331
-
332
- def _apply_request_trailer_checksum(request):
333
- checksum_context = request.get("context", {}).get("checksum", {})
334
- algorithm = checksum_context.get("request_algorithm")
335
- location_name = algorithm["name"]
336
- checksum_cls = _CHECKSUM_CLS.get(algorithm["algorithm"])
337
-
338
- headers = request["headers"]
339
- body = request["body"]
340
-
341
- if location_name in headers:
342
- # If the header is already set by the customer, skip calculation
343
- return
344
-
345
- headers["Transfer-Encoding"] = "chunked"
346
- if "Content-Encoding" in headers:
347
- # We need to preserve the existing content encoding and add
348
- # aws-chunked as a new content encoding.
349
- headers["Content-Encoding"] += ",aws-chunked"
350
- else:
351
- headers["Content-Encoding"] = "aws-chunked"
352
- headers["X-Amz-Trailer"] = location_name
353
-
354
- content_length = determine_content_length(body)
355
- if content_length is not None:
356
- # Send the decoded content length if we can determine it. Some
357
- # services such as S3 may require the decoded content length
358
- headers["X-Amz-Decoded-Content-Length"] = str(content_length)
359
-
360
- if isinstance(body, (bytes, bytearray)):
361
- body = io.BytesIO(body)
362
-
363
- request["body"] = AwsChunkedWrapper(
364
- body,
365
- checksum_cls=checksum_cls,
366
- checksum_name=location_name,
367
- )
368
-
369
-
370
- def resolve_response_checksum_algorithms(
371
- request, operation_model, params, supported_algorithms=None
372
- ):
373
- http_checksum = operation_model.http_checksum
374
- mode_member = http_checksum.get("requestValidationModeMember")
375
- if mode_member and mode_member in params:
376
- if supported_algorithms is None:
377
- supported_algorithms = _SUPPORTED_CHECKSUM_ALGORITHMS
378
- response_algorithms = {
379
- a.lower() for a in http_checksum.get("responseAlgorithms", [])
380
- }
381
-
382
- usable_algorithms = []
383
- for algorithm in _ALGORITHMS_PRIORITY_LIST:
384
- if algorithm not in response_algorithms:
385
- continue
386
- if algorithm in supported_algorithms:
387
- usable_algorithms.append(algorithm)
388
-
389
- checksum_context = request["context"].get("checksum", {})
390
- checksum_context["response_algorithms"] = usable_algorithms
391
- request["context"]["checksum"] = checksum_context
392
-
393
-
394
- def handle_checksum_body(http_response, response, context, operation_model):
395
- headers = response["headers"]
396
- checksum_context = context.get("checksum", {})
397
- algorithms = checksum_context.get("response_algorithms")
398
-
399
- if not algorithms:
400
- return
401
-
402
- for algorithm in algorithms:
403
- header_name = "x-amz-checksum-%s" % algorithm
404
- # If the header is not found, check the next algorithm
405
- if header_name not in headers:
406
- continue
407
-
408
- # If a - is in the checksum this is not valid Base64. S3 returns
409
- # checksums that include a -# suffix to indicate a checksum derived
410
- # from the hash of all part checksums. We cannot wrap this response
411
- if "-" in headers[header_name]:
412
- continue
413
-
414
- if operation_model.has_streaming_output:
415
- response["body"] = _handle_streaming_response(
416
- http_response, response, algorithm
417
- )
418
- else:
419
- response["body"] = _handle_bytes_response(
420
- http_response, response, algorithm
421
- )
422
-
423
- # Expose metadata that the checksum check actually occured
424
- checksum_context = response["context"].get("checksum", {})
425
- checksum_context["response_algorithm"] = algorithm
426
- response["context"]["checksum"] = checksum_context
427
- return
428
-
429
- logger.info(
430
- f'Skipping checksum validation. Response did not contain one of the '
431
- f'following algorithms: {algorithms}.'
432
- )
433
-
434
-
435
- def _handle_streaming_response(http_response, response, algorithm):
436
- checksum_cls = _CHECKSUM_CLS.get(algorithm)
437
- header_name = "x-amz-checksum-%s" % algorithm
438
- return StreamingChecksumBody(
439
- http_response.raw,
440
- response["headers"].get("content-length"),
441
- checksum_cls(),
442
- response["headers"][header_name],
443
- )
444
-
445
-
446
- def _handle_bytes_response(http_response, response, algorithm):
447
- body = http_response.content
448
- header_name = "x-amz-checksum-%s" % algorithm
449
- checksum_cls = _CHECKSUM_CLS.get(algorithm)
450
- checksum = checksum_cls()
451
- checksum.update(body)
452
- expected = response["headers"][header_name]
453
- if checksum.digest() != base64.b64decode(expected):
454
- error_msg = (
455
- "Expected checksum %s did not match calculated checksum: %s"
456
- % (
457
- expected,
458
- checksum.b64digest(),
459
- )
460
- )
461
- raise FlexibleChecksumError(error_msg=error_msg)
462
- return body
463
-
464
-
465
- _CHECKSUM_CLS = {
466
- "crc32": Crc32Checksum,
467
- "sha1": Sha1Checksum,
468
- "sha256": Sha256Checksum,
469
- }
470
- _CRT_CHECKSUM_ALGORITHMS = ["crc32", "crc32c"]
471
- if HAS_CRT:
472
- # Use CRT checksum implementations if available
473
- _CRT_CHECKSUM_CLS = {
474
- "crc32": CrtCrc32Checksum,
475
- "crc32c": CrtCrc32cChecksum,
476
- }
477
- _CHECKSUM_CLS.update(_CRT_CHECKSUM_CLS)
478
- # Validate this list isn't out of sync with _CRT_CHECKSUM_CLS keys
479
- assert all(
480
- name in _CRT_CHECKSUM_ALGORITHMS for name in _CRT_CHECKSUM_CLS.keys()
481
- )
482
- _SUPPORTED_CHECKSUM_ALGORITHMS = list(_CHECKSUM_CLS.keys())
483
- _ALGORITHMS_PRIORITY_LIST = ['crc32c', 'crc32', 'sha1', 'sha256']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/monitoring.py DELETED
@@ -1,586 +0,0 @@
1
- # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import json
14
- import logging
15
- import re
16
- import time
17
-
18
- from botocore.compat import ensure_bytes, ensure_unicode, urlparse
19
- from botocore.retryhandler import EXCEPTION_MAP as RETRYABLE_EXCEPTIONS
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
-
24
- class Monitor:
25
- _EVENTS_TO_REGISTER = [
26
- 'before-parameter-build',
27
- 'request-created',
28
- 'response-received',
29
- 'after-call',
30
- 'after-call-error',
31
- ]
32
-
33
- def __init__(self, adapter, publisher):
34
- """Abstraction for monitoring clients API calls
35
-
36
- :param adapter: An adapter that takes event emitter events
37
- and produces monitor events
38
-
39
- :param publisher: A publisher for generated monitor events
40
- """
41
- self._adapter = adapter
42
- self._publisher = publisher
43
-
44
- def register(self, event_emitter):
45
- """Register an event emitter to the monitor"""
46
- for event_to_register in self._EVENTS_TO_REGISTER:
47
- event_emitter.register_last(event_to_register, self.capture)
48
-
49
- def capture(self, event_name, **payload):
50
- """Captures an incoming event from the event emitter
51
-
52
- It will feed an event emitter event to the monitor's adaptor to create
53
- a monitor event and then publish that event to the monitor's publisher.
54
- """
55
- try:
56
- monitor_event = self._adapter.feed(event_name, payload)
57
- if monitor_event:
58
- self._publisher.publish(monitor_event)
59
- except Exception as e:
60
- logger.debug(
61
- 'Exception %s raised by client monitor in handling event %s',
62
- e,
63
- event_name,
64
- exc_info=True,
65
- )
66
-
67
-
68
- class MonitorEventAdapter:
69
- def __init__(self, time=time.time):
70
- """Adapts event emitter events to produce monitor events
71
-
72
- :type time: callable
73
- :param time: A callable that produces the current time
74
- """
75
- self._time = time
76
-
77
- def feed(self, emitter_event_name, emitter_payload):
78
- """Feed an event emitter event to generate a monitor event
79
-
80
- :type emitter_event_name: str
81
- :param emitter_event_name: The name of the event emitted
82
-
83
- :type emitter_payload: dict
84
- :param emitter_payload: The payload to associated to the event
85
- emitted
86
-
87
- :rtype: BaseMonitorEvent
88
- :returns: A monitor event based on the event emitter events
89
- fired
90
- """
91
- return self._get_handler(emitter_event_name)(**emitter_payload)
92
-
93
- def _get_handler(self, event_name):
94
- return getattr(
95
- self, '_handle_' + event_name.split('.')[0].replace('-', '_')
96
- )
97
-
98
- def _handle_before_parameter_build(self, model, context, **kwargs):
99
- context['current_api_call_event'] = APICallEvent(
100
- service=model.service_model.service_id,
101
- operation=model.wire_name,
102
- timestamp=self._get_current_time(),
103
- )
104
-
105
- def _handle_request_created(self, request, **kwargs):
106
- context = request.context
107
- new_attempt_event = context[
108
- 'current_api_call_event'
109
- ].new_api_call_attempt(timestamp=self._get_current_time())
110
- new_attempt_event.request_headers = request.headers
111
- new_attempt_event.url = request.url
112
- context['current_api_call_attempt_event'] = new_attempt_event
113
-
114
- def _handle_response_received(
115
- self, parsed_response, context, exception, **kwargs
116
- ):
117
- attempt_event = context.pop('current_api_call_attempt_event')
118
- attempt_event.latency = self._get_latency(attempt_event)
119
- if parsed_response is not None:
120
- attempt_event.http_status_code = parsed_response[
121
- 'ResponseMetadata'
122
- ]['HTTPStatusCode']
123
- attempt_event.response_headers = parsed_response[
124
- 'ResponseMetadata'
125
- ]['HTTPHeaders']
126
- attempt_event.parsed_error = parsed_response.get('Error')
127
- else:
128
- attempt_event.wire_exception = exception
129
- return attempt_event
130
-
131
- def _handle_after_call(self, context, parsed, **kwargs):
132
- context['current_api_call_event'].retries_exceeded = parsed[
133
- 'ResponseMetadata'
134
- ].get('MaxAttemptsReached', False)
135
- return self._complete_api_call(context)
136
-
137
- def _handle_after_call_error(self, context, exception, **kwargs):
138
- # If the after-call-error was emitted and the error being raised
139
- # was a retryable connection error, then the retries must have exceeded
140
- # for that exception as this event gets emitted **after** retries
141
- # happen.
142
- context[
143
- 'current_api_call_event'
144
- ].retries_exceeded = self._is_retryable_exception(exception)
145
- return self._complete_api_call(context)
146
-
147
- def _is_retryable_exception(self, exception):
148
- return isinstance(
149
- exception, tuple(RETRYABLE_EXCEPTIONS['GENERAL_CONNECTION_ERROR'])
150
- )
151
-
152
- def _complete_api_call(self, context):
153
- call_event = context.pop('current_api_call_event')
154
- call_event.latency = self._get_latency(call_event)
155
- return call_event
156
-
157
- def _get_latency(self, event):
158
- return self._get_current_time() - event.timestamp
159
-
160
- def _get_current_time(self):
161
- return int(self._time() * 1000)
162
-
163
-
164
- class BaseMonitorEvent:
165
- def __init__(self, service, operation, timestamp):
166
- """Base monitor event
167
-
168
- :type service: str
169
- :param service: A string identifying the service associated to
170
- the event
171
-
172
- :type operation: str
173
- :param operation: A string identifying the operation of service
174
- associated to the event
175
-
176
- :type timestamp: int
177
- :param timestamp: Epoch time in milliseconds from when the event began
178
- """
179
- self.service = service
180
- self.operation = operation
181
- self.timestamp = timestamp
182
-
183
- def __repr__(self):
184
- return f'{self.__class__.__name__}({self.__dict__!r})'
185
-
186
- def __eq__(self, other):
187
- if isinstance(other, self.__class__):
188
- return self.__dict__ == other.__dict__
189
- return False
190
-
191
-
192
- class APICallEvent(BaseMonitorEvent):
193
- def __init__(
194
- self,
195
- service,
196
- operation,
197
- timestamp,
198
- latency=None,
199
- attempts=None,
200
- retries_exceeded=False,
201
- ):
202
- """Monitor event for a single API call
203
-
204
- This event corresponds to a single client method call, which includes
205
- every HTTP requests attempt made in order to complete the client call
206
-
207
- :type service: str
208
- :param service: A string identifying the service associated to
209
- the event
210
-
211
- :type operation: str
212
- :param operation: A string identifying the operation of service
213
- associated to the event
214
-
215
- :type timestamp: int
216
- :param timestamp: Epoch time in milliseconds from when the event began
217
-
218
- :type latency: int
219
- :param latency: The time in milliseconds to complete the client call
220
-
221
- :type attempts: list
222
- :param attempts: The list of APICallAttempts associated to the
223
- APICall
224
-
225
- :type retries_exceeded: bool
226
- :param retries_exceeded: True if API call exceeded retries. False
227
- otherwise
228
- """
229
- super().__init__(
230
- service=service, operation=operation, timestamp=timestamp
231
- )
232
- self.latency = latency
233
- self.attempts = attempts
234
- if attempts is None:
235
- self.attempts = []
236
- self.retries_exceeded = retries_exceeded
237
-
238
- def new_api_call_attempt(self, timestamp):
239
- """Instantiates APICallAttemptEvent associated to the APICallEvent
240
-
241
- :type timestamp: int
242
- :param timestamp: Epoch time in milliseconds to associate to the
243
- APICallAttemptEvent
244
- """
245
- attempt_event = APICallAttemptEvent(
246
- service=self.service, operation=self.operation, timestamp=timestamp
247
- )
248
- self.attempts.append(attempt_event)
249
- return attempt_event
250
-
251
-
252
- class APICallAttemptEvent(BaseMonitorEvent):
253
- def __init__(
254
- self,
255
- service,
256
- operation,
257
- timestamp,
258
- latency=None,
259
- url=None,
260
- http_status_code=None,
261
- request_headers=None,
262
- response_headers=None,
263
- parsed_error=None,
264
- wire_exception=None,
265
- ):
266
- """Monitor event for a single API call attempt
267
-
268
- This event corresponds to a single HTTP request attempt in completing
269
- the entire client method call.
270
-
271
- :type service: str
272
- :param service: A string identifying the service associated to
273
- the event
274
-
275
- :type operation: str
276
- :param operation: A string identifying the operation of service
277
- associated to the event
278
-
279
- :type timestamp: int
280
- :param timestamp: Epoch time in milliseconds from when the HTTP request
281
- started
282
-
283
- :type latency: int
284
- :param latency: The time in milliseconds to complete the HTTP request
285
- whether it succeeded or failed
286
-
287
- :type url: str
288
- :param url: The URL the attempt was sent to
289
-
290
- :type http_status_code: int
291
- :param http_status_code: The HTTP status code of the HTTP response
292
- if there was a response
293
-
294
- :type request_headers: dict
295
- :param request_headers: The HTTP headers sent in making the HTTP
296
- request
297
-
298
- :type response_headers: dict
299
- :param response_headers: The HTTP headers returned in the HTTP response
300
- if there was a response
301
-
302
- :type parsed_error: dict
303
- :param parsed_error: The error parsed if the service returned an
304
- error back
305
-
306
- :type wire_exception: Exception
307
- :param wire_exception: The exception raised in sending the HTTP
308
- request (i.e. ConnectionError)
309
- """
310
- super().__init__(
311
- service=service, operation=operation, timestamp=timestamp
312
- )
313
- self.latency = latency
314
- self.url = url
315
- self.http_status_code = http_status_code
316
- self.request_headers = request_headers
317
- self.response_headers = response_headers
318
- self.parsed_error = parsed_error
319
- self.wire_exception = wire_exception
320
-
321
-
322
- class CSMSerializer:
323
- _MAX_CLIENT_ID_LENGTH = 255
324
- _MAX_EXCEPTION_CLASS_LENGTH = 128
325
- _MAX_ERROR_CODE_LENGTH = 128
326
- _MAX_USER_AGENT_LENGTH = 256
327
- _MAX_MESSAGE_LENGTH = 512
328
- _RESPONSE_HEADERS_TO_EVENT_ENTRIES = {
329
- 'x-amzn-requestid': 'XAmznRequestId',
330
- 'x-amz-request-id': 'XAmzRequestId',
331
- 'x-amz-id-2': 'XAmzId2',
332
- }
333
- _AUTH_REGEXS = {
334
- 'v4': re.compile(
335
- r'AWS4-HMAC-SHA256 '
336
- r'Credential=(?P<access_key>\w+)/\d+/'
337
- r'(?P<signing_region>[a-z0-9-]+)/'
338
- ),
339
- 's3': re.compile(r'AWS (?P<access_key>\w+):'),
340
- }
341
- _SERIALIZEABLE_EVENT_PROPERTIES = [
342
- 'service',
343
- 'operation',
344
- 'timestamp',
345
- 'attempts',
346
- 'latency',
347
- 'retries_exceeded',
348
- 'url',
349
- 'request_headers',
350
- 'http_status_code',
351
- 'response_headers',
352
- 'parsed_error',
353
- 'wire_exception',
354
- ]
355
-
356
- def __init__(self, csm_client_id):
357
- """Serializes monitor events to CSM (Client Side Monitoring) format
358
-
359
- :type csm_client_id: str
360
- :param csm_client_id: The application identifier to associate
361
- to the serialized events
362
- """
363
- self._validate_client_id(csm_client_id)
364
- self.csm_client_id = csm_client_id
365
-
366
- def _validate_client_id(self, csm_client_id):
367
- if len(csm_client_id) > self._MAX_CLIENT_ID_LENGTH:
368
- raise ValueError(
369
- f'The value provided for csm_client_id: {csm_client_id} exceeds '
370
- f'the maximum length of {self._MAX_CLIENT_ID_LENGTH} characters'
371
- )
372
-
373
- def serialize(self, event):
374
- """Serializes a monitor event to the CSM format
375
-
376
- :type event: BaseMonitorEvent
377
- :param event: The event to serialize to bytes
378
-
379
- :rtype: bytes
380
- :returns: The CSM serialized form of the event
381
- """
382
- event_dict = self._get_base_event_dict(event)
383
- event_type = self._get_event_type(event)
384
- event_dict['Type'] = event_type
385
- for attr in self._SERIALIZEABLE_EVENT_PROPERTIES:
386
- value = getattr(event, attr, None)
387
- if value is not None:
388
- getattr(self, '_serialize_' + attr)(
389
- value, event_dict, event_type=event_type
390
- )
391
- return ensure_bytes(json.dumps(event_dict, separators=(',', ':')))
392
-
393
- def _get_base_event_dict(self, event):
394
- return {
395
- 'Version': 1,
396
- 'ClientId': self.csm_client_id,
397
- }
398
-
399
- def _serialize_service(self, service, event_dict, **kwargs):
400
- event_dict['Service'] = service
401
-
402
- def _serialize_operation(self, operation, event_dict, **kwargs):
403
- event_dict['Api'] = operation
404
-
405
- def _serialize_timestamp(self, timestamp, event_dict, **kwargs):
406
- event_dict['Timestamp'] = timestamp
407
-
408
- def _serialize_attempts(self, attempts, event_dict, **kwargs):
409
- event_dict['AttemptCount'] = len(attempts)
410
- if attempts:
411
- self._add_fields_from_last_attempt(event_dict, attempts[-1])
412
-
413
- def _add_fields_from_last_attempt(self, event_dict, last_attempt):
414
- if last_attempt.request_headers:
415
- # It does not matter which attempt to use to grab the region
416
- # for the ApiCall event, but SDKs typically do the last one.
417
- region = self._get_region(last_attempt.request_headers)
418
- if region is not None:
419
- event_dict['Region'] = region
420
- event_dict['UserAgent'] = self._get_user_agent(
421
- last_attempt.request_headers
422
- )
423
- if last_attempt.http_status_code is not None:
424
- event_dict['FinalHttpStatusCode'] = last_attempt.http_status_code
425
- if last_attempt.parsed_error is not None:
426
- self._serialize_parsed_error(
427
- last_attempt.parsed_error, event_dict, 'ApiCall'
428
- )
429
- if last_attempt.wire_exception is not None:
430
- self._serialize_wire_exception(
431
- last_attempt.wire_exception, event_dict, 'ApiCall'
432
- )
433
-
434
- def _serialize_latency(self, latency, event_dict, event_type):
435
- if event_type == 'ApiCall':
436
- event_dict['Latency'] = latency
437
- elif event_type == 'ApiCallAttempt':
438
- event_dict['AttemptLatency'] = latency
439
-
440
- def _serialize_retries_exceeded(
441
- self, retries_exceeded, event_dict, **kwargs
442
- ):
443
- event_dict['MaxRetriesExceeded'] = 1 if retries_exceeded else 0
444
-
445
- def _serialize_url(self, url, event_dict, **kwargs):
446
- event_dict['Fqdn'] = urlparse(url).netloc
447
-
448
- def _serialize_request_headers(
449
- self, request_headers, event_dict, **kwargs
450
- ):
451
- event_dict['UserAgent'] = self._get_user_agent(request_headers)
452
- if self._is_signed(request_headers):
453
- event_dict['AccessKey'] = self._get_access_key(request_headers)
454
- region = self._get_region(request_headers)
455
- if region is not None:
456
- event_dict['Region'] = region
457
- if 'X-Amz-Security-Token' in request_headers:
458
- event_dict['SessionToken'] = request_headers[
459
- 'X-Amz-Security-Token'
460
- ]
461
-
462
- def _serialize_http_status_code(
463
- self, http_status_code, event_dict, **kwargs
464
- ):
465
- event_dict['HttpStatusCode'] = http_status_code
466
-
467
- def _serialize_response_headers(
468
- self, response_headers, event_dict, **kwargs
469
- ):
470
- for header, entry in self._RESPONSE_HEADERS_TO_EVENT_ENTRIES.items():
471
- if header in response_headers:
472
- event_dict[entry] = response_headers[header]
473
-
474
- def _serialize_parsed_error(
475
- self, parsed_error, event_dict, event_type, **kwargs
476
- ):
477
- field_prefix = 'Final' if event_type == 'ApiCall' else ''
478
- event_dict[field_prefix + 'AwsException'] = self._truncate(
479
- parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH
480
- )
481
- event_dict[field_prefix + 'AwsExceptionMessage'] = self._truncate(
482
- parsed_error['Message'], self._MAX_MESSAGE_LENGTH
483
- )
484
-
485
- def _serialize_wire_exception(
486
- self, wire_exception, event_dict, event_type, **kwargs
487
- ):
488
- field_prefix = 'Final' if event_type == 'ApiCall' else ''
489
- event_dict[field_prefix + 'SdkException'] = self._truncate(
490
- wire_exception.__class__.__name__, self._MAX_EXCEPTION_CLASS_LENGTH
491
- )
492
- event_dict[field_prefix + 'SdkExceptionMessage'] = self._truncate(
493
- str(wire_exception), self._MAX_MESSAGE_LENGTH
494
- )
495
-
496
- def _get_event_type(self, event):
497
- if isinstance(event, APICallEvent):
498
- return 'ApiCall'
499
- elif isinstance(event, APICallAttemptEvent):
500
- return 'ApiCallAttempt'
501
-
502
- def _get_access_key(self, request_headers):
503
- auth_val = self._get_auth_value(request_headers)
504
- _, auth_match = self._get_auth_match(auth_val)
505
- return auth_match.group('access_key')
506
-
507
- def _get_region(self, request_headers):
508
- if not self._is_signed(request_headers):
509
- return None
510
- auth_val = self._get_auth_value(request_headers)
511
- signature_version, auth_match = self._get_auth_match(auth_val)
512
- if signature_version != 'v4':
513
- return None
514
- return auth_match.group('signing_region')
515
-
516
- def _get_user_agent(self, request_headers):
517
- return self._truncate(
518
- ensure_unicode(request_headers.get('User-Agent', '')),
519
- self._MAX_USER_AGENT_LENGTH,
520
- )
521
-
522
- def _is_signed(self, request_headers):
523
- return 'Authorization' in request_headers
524
-
525
- def _get_auth_value(self, request_headers):
526
- return ensure_unicode(request_headers['Authorization'])
527
-
528
- def _get_auth_match(self, auth_val):
529
- for signature_version, regex in self._AUTH_REGEXS.items():
530
- match = regex.match(auth_val)
531
- if match:
532
- return signature_version, match
533
- return None, None
534
-
535
- def _truncate(self, text, max_length):
536
- if len(text) > max_length:
537
- logger.debug(
538
- 'Truncating following value to maximum length of ' '%s: %s',
539
- text,
540
- max_length,
541
- )
542
- return text[:max_length]
543
- return text
544
-
545
-
546
- class SocketPublisher:
547
- _MAX_MONITOR_EVENT_LENGTH = 8 * 1024
548
-
549
- def __init__(self, socket, host, port, serializer):
550
- """Publishes monitor events to a socket
551
-
552
- :type socket: socket.socket
553
- :param socket: The socket object to use to publish events
554
-
555
- :type host: string
556
- :param host: The host to send events to
557
-
558
- :type port: integer
559
- :param port: The port on the host to send events to
560
-
561
- :param serializer: The serializer to use to serialize the event
562
- to a form that can be published to the socket. This must
563
- have a `serialize()` method that accepts a monitor event
564
- and return bytes
565
- """
566
- self._socket = socket
567
- self._address = (host, port)
568
- self._serializer = serializer
569
-
570
- def publish(self, event):
571
- """Publishes a specified monitor event
572
-
573
- :type event: BaseMonitorEvent
574
- :param event: The monitor event to be sent
575
- over the publisher's socket to the desired address.
576
- """
577
- serialized_event = self._serializer.serialize(event)
578
- if len(serialized_event) > self._MAX_MONITOR_EVENT_LENGTH:
579
- logger.debug(
580
- 'Serialized event of size %s exceeds the maximum length '
581
- 'allowed: %s. Not sending event to socket.',
582
- len(serialized_event),
583
- self._MAX_MONITOR_EVENT_LENGTH,
584
- )
585
- return
586
- self._socket.sendto(serialized_event, self._address)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- from .__about__ import (
6
- __author__,
7
- __copyright__,
8
- __email__,
9
- __license__,
10
- __summary__,
11
- __title__,
12
- __uri__,
13
- __version__,
14
- )
15
-
16
- __all__ = [
17
- "__title__",
18
- "__summary__",
19
- "__uri__",
20
- "__version__",
21
- "__author__",
22
- "__email__",
23
- "__license__",
24
- "__copyright__",
25
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boilin/URetinex-Net/test.py DELETED
@@ -1,200 +0,0 @@
1
- import argparse
2
- import torch
3
- import torch.nn as nn
4
- from network.Math_Module import P, Q
5
- from network.decom import Decom
6
- import os
7
- #import torchvision
8
- import torchvision.transforms as transforms
9
- from PIL import Image
10
- import time
11
- from utils import *
12
- import cv2
13
-
14
- def one2three(x):
15
- return torch.cat([x, x, x], dim=1).to(x)
16
-
17
-
18
- class Inference(nn.Module):
19
- def __init__(self, opts):
20
- super().__init__()
21
- self.opts = opts
22
- # loading decomposition model
23
- self.model_Decom_low = Decom()
24
- self.model_Decom_low = load_initialize(self.model_Decom_low,
25
- self.opts.Decom_model_low_path)
26
- # loading R; old_model_opts; and L model
27
- self.unfolding_opts, self.model_R, self.model_L = load_unfolding(
28
- self.opts.unfolding_model_path)
29
- # loading adjustment model
30
- self.adjust_model = load_adjustment(self.opts.adjust_model_path)
31
- self.P = P()
32
- self.Q = Q()
33
- transform = [
34
- transforms.ToTensor(),
35
- ]
36
- self.transform = transforms.Compose(transform)
37
- print(self.model_Decom_low)
38
- print(self.model_R)
39
- print(self.model_L)
40
- print(self.adjust_model)
41
- #time.sleep(8)
42
-
43
- def unfolding(self, input_low_img):
44
- for t in range(self.unfolding_opts.round):
45
- if t == 0: # initialize R0, L0
46
- P, Q = self.model_Decom_low(input_low_img)
47
- else: # update P and Q
48
- w_p = (self.unfolding_opts.gamma +
49
- self.unfolding_opts.Roffset * t)
50
- w_q = (self.unfolding_opts.lamda +
51
- self.unfolding_opts.Loffset * t)
52
- P = self.P(I=input_low_img, Q=Q, R=R, gamma=w_p)
53
- Q = self.Q(I=input_low_img, P=P, L=L, lamda=w_q)
54
- R = self.model_R(r=P, l=Q)
55
- L = self.model_L(l=Q)
56
- return R, L
57
-
58
- def lllumination_adjust(self, L, ratio):
59
- ratio = torch.ones(L.shape) * self.opts.ratio
60
- return self.adjust_model(l=L, alpha=ratio)
61
-
62
- def forward(self, input_low_img):
63
- # if not torch.cuda.is_available():
64
- # input_low_img = input_low_img.cuda()
65
- with torch.no_grad():
66
- start = time.time()
67
- R, L = self.unfolding(input_low_img)
68
- High_L = self.lllumination_adjust(L, self.opts.ratio)
69
- I_enhance = High_L * R
70
- p_time = (time.time() - start)
71
- return I_enhance, p_time
72
-
73
- def run(self, low_img_path):
74
- file_name = os.path.basename(self.opts.img_path)
75
- name = file_name.split('.')[0]
76
- low_img = self.transform(Image.open(low_img_path)).unsqueeze(0)
77
-
78
- # print('**************************************************************************')
79
- # print(low_img)
80
- # print(type(low_img))
81
- # print(type(Image.open(low_img_path)))
82
- # print(Image.open(low_img_path))
83
-
84
- enhance, p_time = self.forward(input_low_img=low_img)
85
- if not os.path.exists(self.opts.output):
86
- os.makedirs(self.opts.output)
87
- save_path = os.path.join(
88
- self.opts.output,
89
- file_name.replace(name,
90
- "%s_%d_URetinexNet" % (name, self.opts.ratio)))
91
- np_save_TensorImg(enhance, save_path)
92
- print(
93
- "================================= time for %s: %f============================"
94
- % (file_name, p_time))
95
-
96
-
97
-
98
- # 这是我自己修改的 run 函数
99
- # 避免了把图片储存到硬盘上面
100
- # 后续也可以修改把图片储存到硬盘上面
101
- def runForWeb(self, image):
102
- # 首先对输入的图片进行下采样直到符合最低运行像素限制
103
- max_pixel_limit=600*600
104
- pyr_down_times=0
105
- while True:
106
- a=len(image)
107
- b=len(image[0])
108
- c=a*b
109
- if(c<=max_pixel_limit):
110
- break
111
- pyr_down_times+=1
112
- image=cv2.pyrDown(image)
113
-
114
- print(image.shape)
115
- # 输入
116
- low_img = self.transform(Image.fromarray(np.uint8(image))).unsqueeze(0)
117
-
118
-
119
- # low_img=Image.fromarray(image.astype('uint8')).convert('RGB')
120
- # print('#############################################')
121
- # print(type(low_img))
122
- # print(low_img)
123
-
124
-
125
- # 训练
126
- enhance, p_time = self.forward(input_low_img=low_img)
127
-
128
- # print('UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU')
129
-
130
- # 输出
131
- # 这里需要修改一下 utils.py 的结果放回函数,参考上面 run 函数 np_save_TensorImg 这里需要修改一下的位置
132
- # 退训练结果进行上采样,还原原图大小
133
- result_image=result_for_gradio(enhance)
134
- for i in range(pyr_down_times):
135
- result_image=cv2.pyrUp(result_image)
136
- # return result_for_gradio(enhance)
137
- print(result_image.shape)
138
- return result_image
139
-
140
-
141
- # 这是提供�� gradio 框架调用的接口
142
- # gradio 框架负责提供后端操控和前端的页面展示
143
- def functionForGradio(image):
144
- parser = argparse.ArgumentParser(description='Configure')
145
- # specify your data path here!
146
- parser.add_argument('--img_path', type=str, default="./demo/input/3.png")
147
- parser.add_argument('--output', type=str, default="./demo/output")
148
- # ratio are recommended to be 3-5, bigger ratio will lead to over-exposure
149
- parser.add_argument('--ratio', type=int, default=5)
150
- # model path
151
- parser.add_argument('--Decom_model_low_path',
152
- type=str,
153
- default="./ckpt/init_low.pth")
154
- parser.add_argument('--unfolding_model_path',
155
- type=str,
156
- default="./ckpt/unfolding.pth")
157
- parser.add_argument('--adjust_model_path',
158
- type=str,
159
- default="./ckpt/L_adjust.pth")
160
- parser.add_argument('--gpu_id', type=int, default=0)
161
-
162
- opts = parser.parse_args()
163
- for k, v in vars(opts).items():
164
- print(k, v)
165
-
166
- os.environ['CUDA_VISIBLE_DEVICES'] = str(opts.gpu_id)
167
- model = Inference(opts)
168
-
169
- # 这里传入 numpy 数组然后开始训练
170
- return model.runForWeb(image)
171
-
172
-
173
- # 这是算法本来的主函数,上面提供的 gradio 框架调用的接口就是修改自主函数
174
-
175
- if __name__ == "__main__":
176
- parser = argparse.ArgumentParser(description='Configure')
177
- # specify your data path here!
178
- parser.add_argument('--img_path', type=str, default="./demo/input/test3.jpg")
179
- parser.add_argument('--output', type=str, default="./demo/output")
180
- # ratio are recommended to be 3-5, bigger ratio will lead to over-exposure
181
- parser.add_argument('--ratio', type=int, default=5)
182
- # model path
183
- parser.add_argument('--Decom_model_low_path',
184
- type=str,
185
- default="./ckpt/init_low.pth")
186
- parser.add_argument('--unfolding_model_path',
187
- type=str,
188
- default="./ckpt/unfolding.pth")
189
- parser.add_argument('--adjust_model_path',
190
- type=str,
191
- default="./ckpt/L_adjust.pth")
192
- parser.add_argument('--gpu_id', type=int, default=0)
193
-
194
- opts = parser.parse_args()
195
- for k, v in vars(opts).items():
196
- print(k, v)
197
-
198
- os.environ['CUDA_VISIBLE_DEVICES'] = str(opts.gpu_id)
199
- model = Inference(opts)
200
- model.run(opts.img_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/BigDL-Nano_inference/app.py DELETED
@@ -1,193 +0,0 @@
1
- #
2
- # Copyright 2016 The BigDL Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- #
16
- # Part of the code in this file is adapted from
17
- # https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/eval.py and
18
- # https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/train.py
19
-
20
- # MIT License
21
-
22
- # Copyright (c) 2022 Lorenzo Breschi
23
-
24
- # Permission is hereby granted, free of charge, to any person obtaining a copy
25
- # of this software and associated documentation files (the "Software"), to deal
26
- # in the Software without restriction, including without limitation the rights
27
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
28
- # copies of the Software, and to permit persons to whom the Software is
29
- # furnished to do so, subject to the following conditions:
30
-
31
- # The above copyright notice and this permission notice shall be included in all
32
- # copies or substantial portions of the Software.
33
-
34
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
37
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
38
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
39
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40
- # SOFTWARE.
41
-
42
- import gradio as gr
43
- import numpy as np
44
- import time
45
- from data import write_image_tensor, PatchDataModule, prepare_data, image2tensor, tensor2image
46
- import torch
47
- from tqdm import tqdm
48
- from bigdl.nano.pytorch import InferenceOptimizer
49
- from torch.utils.data import DataLoader
50
- from pathlib import Path
51
- from torch.utils.data import Dataset
52
- import datetime
53
- import huggingface_hub
54
-
55
-
56
- device = 'cpu'
57
- dtype = torch.float32
58
- MODEL_REPO = 'CVPR/FSPBT'
59
- ckpt_path = huggingface_hub.hf_hub_download(
60
- MODEL_REPO, 'generator.pt')
61
- generator = torch.load(ckpt_path)
62
- generator.eval()
63
- generator.to(device, dtype)
64
- params = {'batch_size': 1,
65
- 'num_workers': 0}
66
-
67
-
68
- class ImageDataset(Dataset):
69
- def __init__(self, img):
70
- self.imgs = [image2tensor(img)]
71
- def __getitem__(self, idx: int) -> dict:
72
- return self.imgs[idx]
73
-
74
- def __len__(self) -> int:
75
- return len(self.imgs)
76
-
77
-
78
- data_path = Path('data')
79
- train_image_dd = prepare_data(data_path)
80
- dm = PatchDataModule(train_image_dd, patch_size=2**6,
81
- batch_size=2**3, patch_num=2**6)
82
-
83
- # quantize model
84
- train_loader = dm.train_dataloader()
85
- train_loader_iter = iter(train_loader)
86
- quantized_model = InferenceOptimizer.quantize(generator,
87
- accelerator=None,
88
- calib_dataloader=train_loader)
89
-
90
-
91
- def original_transfer(input_img):
92
- w, h, _ = input_img.shape
93
- print(datetime.datetime.now())
94
- print("input size: ", w, h)
95
- # resize too large image
96
- if w > 3000 or h > 3000:
97
- ratio = min(3000 / w, 3000 / h)
98
- w = int(w * ratio)
99
- h = int(h * ratio)
100
- if w % 4 != 0 or h % 4 != 0:
101
- NW = int((w // 4) * 4)
102
- NH = int((h // 4) * 4)
103
- input_img = np.resize(input_img,(NW,NH,3))
104
- st = time.perf_counter()
105
- dataset = ImageDataset(input_img)
106
- loader = DataLoader(dataset, **params)
107
- with torch.no_grad():
108
- for inputs in tqdm(loader):
109
- inputs = inputs.to(device, dtype)
110
- st = time.perf_counter()
111
- outputs = generator(inputs)
112
- ori_time = time.perf_counter() - st
113
- ori_time = "{:.3f}s".format(ori_time)
114
- ori_image = np.array(tensor2image(outputs[0]))
115
- del inputs
116
- del outputs
117
- return ori_image, ori_time
118
-
119
- def nano_transfer(input_img):
120
- w, h, _ = input_img.shape
121
- print(datetime.datetime.now())
122
- print("input size: ", w, h)
123
- # resize too large image
124
- if w > 3000 or h > 3000:
125
- ratio = min(3000 / w, 3000 / h)
126
- w = int(w * ratio)
127
- h = int(h * ratio)
128
- if w % 4 != 0 or h % 4 != 0:
129
- NW = int((w // 4) * 4)
130
- NH = int((h // 4) * 4)
131
- input_img = np.resize(input_img,(NW,NH,3))
132
- st = time.perf_counter()
133
- dataset = ImageDataset(input_img)
134
- loader = DataLoader(dataset, **params)
135
- with torch.no_grad():
136
- for inputs in tqdm(loader):
137
- inputs = inputs.to(device, dtype)
138
- st = time.perf_counter()
139
- outputs = quantized_model(inputs)
140
- nano_time = time.perf_counter() - st
141
- nano_time = "{:.3f}s".format(nano_time)
142
- nano_image = np.array(tensor2image(outputs[0]))
143
- del inputs
144
- del outputs
145
- return nano_image, nano_time
146
-
147
-
148
- def clear():
149
- return None, None, None, None
150
-
151
-
152
- demo = gr.Blocks()
153
-
154
- with demo:
155
- gr.Markdown("<h1><center>BigDL-Nano inference demo</center></h1>")
156
- with gr.Row().style(equal_height=False):
157
- with gr.Column():
158
- gr.Markdown('''
159
- <h2>Overview</h2>
160
-
161
- BigDL-Nano is a library in [BigDL 2.0](https://github.com/intel-analytics/BigDL) that allows the users to transparently accelerate their deep learning pipelines (including data processing, training and inference) by automatically integrating optimized libraries, best-known configurations, and software optimizations. </p>
162
-
163
- The video on the right shows how the user can easily enable quantization using BigDL-Nano (with just a couple of lines of code); you may refer to our [CVPR 2022 demo paper](https://arxiv.org/abs/2204.01715) for more details.
164
- ''')
165
- with gr.Column():
166
- gr.Video(value="data/nano_quantize_api.mp4")
167
- gr.Markdown('''
168
- <h2>Demo</h2>
169
-
170
- This section uses an image stylization example to demostrate the speedup of the above code when using quantization in BigDL-Nano (about 2~3x inference time speedup).
171
- The demo is adapted from the original [FSPBT-Image-Translation code](https://github.com/rnwzd/FSPBT-Image-Translation),
172
- and the default image is from [the COCO dataset](https://cocodataset.org/#home).
173
- ''')
174
- with gr.Row().style(equal_height=False):
175
- input_img = gr.Image(label="input image", value="data/COCO_image.jpg", source="upload")
176
- with gr.Column():
177
- ori_but = gr.Button("Standard PyTorch")
178
- nano_but = gr.Button("BigDL-Nano")
179
- clear_but = gr.Button("Clear Output")
180
- with gr.Row().style(equal_height=False):
181
- with gr.Column():
182
- ori_time = gr.Text(label="Standard PyTorch latency")
183
- ori_image = gr.Image(label="Standard PyTorch output image")
184
- with gr.Column():
185
- nano_time = gr.Text(label="BigDL-Nano latency")
186
- nano_image = gr.Image(label="BigDL-Nano output image")
187
-
188
- ori_but.click(original_transfer, inputs=input_img, outputs=[ori_image, ori_time])
189
- nano_but.click(nano_transfer, inputs=input_img, outputs=[nano_image, nano_time])
190
- clear_but.click(clear, inputs=None, outputs=[ori_image, ori_time, nano_image, nano_time])
191
-
192
-
193
- demo.launch(share=True, enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/fpn.py DELETED
@@ -1,245 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import math
3
- import fvcore.nn.weight_init as weight_init
4
- import torch.nn.functional as F
5
- from torch import nn
6
-
7
- from detectron2.layers import Conv2d, ShapeSpec, get_norm
8
-
9
- from .backbone import Backbone
10
- from .build import BACKBONE_REGISTRY
11
- from .resnet import build_resnet_backbone
12
-
13
- __all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"]
14
-
15
-
16
- class FPN(Backbone):
17
- """
18
- This module implements Feature Pyramid Network.
19
- It creates pyramid features built on top of some input feature maps.
20
- """
21
-
22
- def __init__(
23
- self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
24
- ):
25
- """
26
- Args:
27
- bottom_up (Backbone): module representing the bottom up subnetwork.
28
- Must be a subclass of :class:`Backbone`. The multi-scale feature
29
- maps generated by the bottom up network, and listed in `in_features`,
30
- are used to generate FPN levels.
31
- in_features (list[str]): names of the input feature maps coming
32
- from the backbone to which FPN is attached. For example, if the
33
- backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
34
- of these may be used; order must be from high to low resolution.
35
- out_channels (int): number of channels in the output feature maps.
36
- norm (str): the normalization to use.
37
- top_block (nn.Module or None): if provided, an extra operation will
38
- be performed on the output of the last (smallest resolution)
39
- FPN output, and the result will extend the result list. The top_block
40
- further downsamples the feature map. It must have an attribute
41
- "num_levels", meaning the number of extra FPN levels added by
42
- this block, and "in_feature", which is a string representing
43
- its input feature (e.g., p5).
44
- fuse_type (str): types for fusing the top down features and the lateral
45
- ones. It can be "sum" (default), which sums up element-wise; or "avg",
46
- which takes the element-wise mean of the two.
47
- """
48
- super(FPN, self).__init__()
49
- assert isinstance(bottom_up, Backbone)
50
-
51
- # Feature map strides and channels from the bottom up network (e.g. ResNet)
52
- input_shapes = bottom_up.output_shape()
53
- in_strides = [input_shapes[f].stride for f in in_features]
54
- in_channels = [input_shapes[f].channels for f in in_features]
55
-
56
- _assert_strides_are_log2_contiguous(in_strides)
57
- lateral_convs = []
58
- output_convs = []
59
-
60
- use_bias = norm == ""
61
- for idx, in_channels in enumerate(in_channels):
62
- lateral_norm = get_norm(norm, out_channels)
63
- output_norm = get_norm(norm, out_channels)
64
-
65
- lateral_conv = Conv2d(
66
- in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
67
- )
68
- output_conv = Conv2d(
69
- out_channels,
70
- out_channels,
71
- kernel_size=3,
72
- stride=1,
73
- padding=1,
74
- bias=use_bias,
75
- norm=output_norm,
76
- )
77
- weight_init.c2_xavier_fill(lateral_conv)
78
- weight_init.c2_xavier_fill(output_conv)
79
- stage = int(math.log2(in_strides[idx]))
80
- self.add_module("fpn_lateral{}".format(stage), lateral_conv)
81
- self.add_module("fpn_output{}".format(stage), output_conv)
82
-
83
- lateral_convs.append(lateral_conv)
84
- output_convs.append(output_conv)
85
- # Place convs into top-down order (from low to high resolution)
86
- # to make the top-down computation in forward clearer.
87
- self.lateral_convs = lateral_convs[::-1]
88
- self.output_convs = output_convs[::-1]
89
- self.top_block = top_block
90
- self.in_features = in_features
91
- self.bottom_up = bottom_up
92
- # Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
93
- self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in in_strides}
94
- # top block output feature maps.
95
- if self.top_block is not None:
96
- for s in range(stage, stage + self.top_block.num_levels):
97
- self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
98
-
99
- self._out_features = list(self._out_feature_strides.keys())
100
- self._out_feature_channels = {k: out_channels for k in self._out_features}
101
- self._size_divisibility = in_strides[-1]
102
- assert fuse_type in {"avg", "sum"}
103
- self._fuse_type = fuse_type
104
-
105
- @property
106
- def size_divisibility(self):
107
- return self._size_divisibility
108
-
109
- def forward(self, x):
110
- """
111
- Args:
112
- input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
113
- feature map tensor for each feature level in high to low resolution order.
114
-
115
- Returns:
116
- dict[str->Tensor]:
117
- mapping from feature map name to FPN feature map tensor
118
- in high to low resolution order. Returned feature names follow the FPN
119
- paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
120
- ["p2", "p3", ..., "p6"].
121
- """
122
- # Reverse feature maps into top-down order (from low to high resolution)
123
- bottom_up_features = self.bottom_up(x)
124
- x = [bottom_up_features[f] for f in self.in_features[::-1]]
125
- results = []
126
- prev_features = self.lateral_convs[0](x[0])
127
- results.append(self.output_convs[0](prev_features))
128
- for features, lateral_conv, output_conv in zip(
129
- x[1:], self.lateral_convs[1:], self.output_convs[1:]
130
- ):
131
- top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest")
132
- lateral_features = lateral_conv(features)
133
- prev_features = lateral_features + top_down_features
134
- if self._fuse_type == "avg":
135
- prev_features /= 2
136
- results.insert(0, output_conv(prev_features))
137
-
138
- if self.top_block is not None:
139
- top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None)
140
- if top_block_in_feature is None:
141
- top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
142
- results.extend(self.top_block(top_block_in_feature))
143
- assert len(self._out_features) == len(results)
144
- return dict(zip(self._out_features, results))
145
-
146
- def output_shape(self):
147
- return {
148
- name: ShapeSpec(
149
- channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
150
- )
151
- for name in self._out_features
152
- }
153
-
154
-
155
- def _assert_strides_are_log2_contiguous(strides):
156
- """
157
- Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
158
- """
159
- for i, stride in enumerate(strides[1:], 1):
160
- assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
161
- stride, strides[i - 1]
162
- )
163
-
164
-
165
- class LastLevelMaxPool(nn.Module):
166
- """
167
- This module is used in the original FPN to generate a downsampled
168
- P6 feature from P5.
169
- """
170
-
171
- def __init__(self):
172
- super().__init__()
173
- self.num_levels = 1
174
- self.in_feature = "p5"
175
-
176
- def forward(self, x):
177
- return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
178
-
179
-
180
- class LastLevelP6P7(nn.Module):
181
- """
182
- This module is used in RetinaNet to generate extra layers, P6 and P7 from
183
- C5 feature.
184
- """
185
-
186
- def __init__(self, in_channels, out_channels, in_feature="res5"):
187
- super().__init__()
188
- self.num_levels = 2
189
- self.in_feature = in_feature
190
- self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
191
- self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
192
- for module in [self.p6, self.p7]:
193
- weight_init.c2_xavier_fill(module)
194
-
195
- def forward(self, c5):
196
- p6 = self.p6(c5)
197
- p7 = self.p7(F.relu(p6))
198
- return [p6, p7]
199
-
200
-
201
- @BACKBONE_REGISTRY.register()
202
- def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
203
- """
204
- Args:
205
- cfg: a detectron2 CfgNode
206
-
207
- Returns:
208
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
209
- """
210
- bottom_up = build_resnet_backbone(cfg, input_shape)
211
- in_features = cfg.MODEL.FPN.IN_FEATURES
212
- out_channels = cfg.MODEL.FPN.OUT_CHANNELS
213
- backbone = FPN(
214
- bottom_up=bottom_up,
215
- in_features=in_features,
216
- out_channels=out_channels,
217
- norm=cfg.MODEL.FPN.NORM,
218
- top_block=LastLevelMaxPool(),
219
- fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
220
- )
221
- return backbone
222
-
223
-
224
- @BACKBONE_REGISTRY.register()
225
- def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
226
- """
227
- Args:
228
- cfg: a detectron2 CfgNode
229
-
230
- Returns:
231
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
232
- """
233
- bottom_up = build_resnet_backbone(cfg, input_shape)
234
- in_features = cfg.MODEL.FPN.IN_FEATURES
235
- out_channels = cfg.MODEL.FPN.OUT_CHANNELS
236
- in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
237
- backbone = FPN(
238
- bottom_up=bottom_up,
239
- in_features=in_features,
240
- out_channels=out_channels,
241
- norm=cfg.MODEL.FPN.NORM,
242
- top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
243
- fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
244
- )
245
- return backbone
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/adjacent_difference.h DELETED
@@ -1,540 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
-
30
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
31
- #include <thrust/system/cuda/config.h>
32
-
33
- #include <thrust/detail/cstdint.h>
34
- #include <thrust/detail/temporary_array.h>
35
- #include <thrust/system/cuda/detail/util.h>
36
- #include <cub/device/device_select.cuh>
37
- #include <cub/block/block_adjacent_difference.cuh>
38
- #include <thrust/system/cuda/detail/core/agent_launcher.h>
39
- #include <thrust/system/cuda/detail/par_to_seq.h>
40
- #include <thrust/system/cuda/detail/dispatch.h>
41
- #include <thrust/functional.h>
42
- #include <thrust/distance.h>
43
- #include <thrust/detail/mpl/math.h>
44
- #include <thrust/detail/minmax.h>
45
-
46
- namespace thrust
47
- {
48
-
49
- template <typename DerivedPolicy, typename InputIterator, typename OutputIterator, typename BinaryFunction>
50
- __host__ __device__ OutputIterator
51
- adjacent_difference(
52
- const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
53
- InputIterator first,
54
- InputIterator last,
55
- OutputIterator result,
56
- BinaryFunction binary_op);
57
-
58
- namespace cuda_cub {
59
-
60
- namespace __adjacent_difference {
61
-
62
- namespace mpl = thrust::detail::mpl::math;
63
-
64
- template <int _BLOCK_THREADS,
65
- int _ITEMS_PER_THREAD = 1,
66
- cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT,
67
- cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_DEFAULT,
68
- cub::BlockStoreAlgorithm _STORE_ALGORITHM = cub::BLOCK_STORE_DIRECT>
69
- struct PtxPolicy
70
- {
71
- enum
72
- {
73
- BLOCK_THREADS = _BLOCK_THREADS,
74
- ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
75
- ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD
76
- };
77
-
78
- static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM;
79
- static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
80
- static const cub::BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM;
81
- };
82
-
83
- template<int INPUT_SIZE, int NOMINAL_4B_ITEMS_PER_THREAD>
84
- struct items_per_thread
85
- {
86
- enum
87
- {
88
- value = (INPUT_SIZE <= 8)
89
- ? NOMINAL_4B_ITEMS_PER_THREAD
90
- : mpl::min<
91
- int,
92
- NOMINAL_4B_ITEMS_PER_THREAD,
93
- mpl::max<int,
94
- 1,
95
- ((NOMINAL_4B_ITEMS_PER_THREAD * 8) +
96
- INPUT_SIZE - 1) /
97
- INPUT_SIZE>::value>::value
98
- };
99
- };
100
-
101
- template<class Arch, class T>
102
- struct Tuning;
103
-
104
- template <class T>
105
- struct Tuning<sm30, T>
106
- {
107
- enum
108
- {
109
- INPUT_SIZE = sizeof(T),
110
- NOMINAL_4B_ITEMS_PER_THREAD = 7,
111
- ITEMS_PER_THREAD = items_per_thread<INPUT_SIZE,
112
- NOMINAL_4B_ITEMS_PER_THREAD>::value
113
- };
114
- typedef PtxPolicy<128,
115
- ITEMS_PER_THREAD,
116
- cub::BLOCK_LOAD_WARP_TRANSPOSE,
117
- cub::LOAD_DEFAULT,
118
- cub::BLOCK_STORE_WARP_TRANSPOSE>
119
- type;
120
- };
121
- template <class T>
122
- struct Tuning<sm35, T> : Tuning<sm30,T>
123
- {
124
- enum
125
- {
126
- NOMINAL_4B_ITEMS_PER_THREAD = 7,
127
- ITEMS_PER_THREAD = items_per_thread<Tuning::INPUT_SIZE,
128
- NOMINAL_4B_ITEMS_PER_THREAD>::value
129
- };
130
- typedef PtxPolicy<128,
131
- ITEMS_PER_THREAD,
132
- cub::BLOCK_LOAD_WARP_TRANSPOSE,
133
- cub::LOAD_LDG,
134
- cub::BLOCK_STORE_WARP_TRANSPOSE>
135
- type;
136
- };
137
-
138
- template <class InputIt,
139
- class OutputIt,
140
- class Size,
141
- class BinaryOp>
142
- struct AdjacentDifferenceAgent
143
- {
144
- typedef typename iterator_traits<InputIt>::value_type input_type;
145
-
146
- // XXX output type must be result of BinaryOp(input_type,input_type);
147
- typedef input_type output_type;
148
-
149
- template<class Arch>
150
- struct PtxPlan : Tuning<Arch,input_type>::type
151
- {
152
- typedef Tuning<Arch,input_type> tuning;
153
-
154
- typedef typename core::LoadIterator<PtxPlan, InputIt>::type LoadIt;
155
- typedef typename core::BlockLoad<PtxPlan, LoadIt>::type BlockLoad;
156
-
157
- typedef typename core::BlockStore<PtxPlan, OutputIt, input_type>::type
158
- BlockStore;
159
-
160
- typedef cub::BlockAdjacentDifference<input_type,
161
- PtxPlan::BLOCK_THREADS,
162
- 1,
163
- 1,
164
- Arch::ver>
165
- BlockAdjacentDifference;
166
-
167
- union TempStorage
168
- {
169
- typename BlockAdjacentDifference::TempStorage discontinuity;
170
- typename BlockLoad::TempStorage load;
171
- typename BlockStore::TempStorage store;
172
- }; // union TempStorage
173
- }; // struct PtxPlan
174
-
175
- typedef typename core::specialize_plan_msvc10_war<PtxPlan>::type::type ptx_plan;
176
-
177
- typedef typename ptx_plan::LoadIt LoadIt;
178
- typedef typename ptx_plan::BlockLoad BlockLoad;
179
- typedef typename ptx_plan::BlockStore BlockStore;
180
- typedef typename ptx_plan::BlockAdjacentDifference BlockAdjacentDifference;
181
- typedef typename ptx_plan::TempStorage TempStorage;
182
-
183
-
184
- enum
185
- {
186
- ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD,
187
- BLOCK_THREADS = ptx_plan::BLOCK_THREADS,
188
- ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE,
189
- };
190
-
191
- struct impl
192
- {
193
-
194
- //---------------------------------------------------------------------
195
- // Per-thread fields
196
- //---------------------------------------------------------------------
197
-
198
- TempStorage &temp_storage;
199
- LoadIt load_it; // iterator to the first element
200
- input_type * first_tile_previous; // iterator to the first element of previous tile value
201
- OutputIt output_it;
202
- BinaryOp binary_op;
203
-
204
- template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
205
- void THRUST_DEVICE_FUNCTION
206
- consume_tile_impl(int num_remaining,
207
- int tile_idx,
208
- Size tile_base)
209
- {
210
- input_type input[ITEMS_PER_THREAD];
211
- input_type input_prev[ITEMS_PER_THREAD];
212
- output_type output[ITEMS_PER_THREAD];
213
-
214
- if (IS_LAST_TILE)
215
- {
216
- // Fill last elements with the first element
217
- // because collectives are not suffix guarded
218
- BlockLoad(temp_storage.load)
219
- .Load(load_it + tile_base,
220
- input,
221
- num_remaining,
222
- *(load_it + tile_base));
223
- }
224
- else
225
- {
226
- BlockLoad(temp_storage.load).Load(load_it + tile_base, input);
227
- }
228
-
229
-
230
- core::sync_threadblock();
231
-
232
- if (IS_FIRST_TILE)
233
- {
234
- BlockAdjacentDifference(temp_storage.discontinuity)
235
- .FlagHeads(output, input, input_prev, binary_op);
236
- if (threadIdx.x == 0)
237
- output[0] = input[0];
238
- }
239
- else
240
- {
241
- input_type tile_prev_input = first_tile_previous[tile_idx];
242
- BlockAdjacentDifference(temp_storage.discontinuity)
243
- .FlagHeads(output, input, input_prev, binary_op, tile_prev_input);
244
- }
245
-
246
- core::sync_threadblock();
247
-
248
- if (IS_LAST_TILE)
249
- {
250
- BlockStore(temp_storage.store)
251
- .Store(output_it + tile_base, output, num_remaining);
252
- }
253
- else
254
- {
255
- BlockStore(temp_storage.store).Store(output_it + tile_base, output);
256
- }
257
- }
258
-
259
-
260
- template <bool IS_LAST_TILE>
261
- void THRUST_DEVICE_FUNCTION
262
- consume_tile(int num_remaining,
263
- int tile_idx,
264
- Size tile_base)
265
- {
266
- if (tile_idx == 0)
267
- {
268
- consume_tile_impl<IS_LAST_TILE, true>(num_remaining,
269
- tile_idx,
270
- tile_base);
271
- }
272
- else
273
- {
274
- consume_tile_impl<IS_LAST_TILE, false>(num_remaining,
275
- tile_idx,
276
- tile_base);
277
- }
278
- }
279
-
280
- void THRUST_DEVICE_FUNCTION
281
- consume_range(Size num_items)
282
- {
283
- int tile_idx = blockIdx.x;
284
- Size tile_base = static_cast<Size>(tile_idx) * ITEMS_PER_TILE;
285
- Size num_remaining = num_items - tile_base;
286
-
287
- if (num_remaining > ITEMS_PER_TILE) // not a last tile
288
- {
289
- consume_tile<false>(num_remaining, tile_idx, tile_base);
290
- }
291
- else if (num_remaining > 0)
292
- {
293
- consume_tile<true>(num_remaining, tile_idx, tile_base);
294
- }
295
- }
296
-
297
- //---------------------------------------------------------------------
298
- // Constructor
299
- //---------------------------------------------------------------------
300
-
301
- THRUST_DEVICE_FUNCTION
302
- impl(TempStorage &temp_storage_,
303
- InputIt input_it_,
304
- input_type * first_tile_previous_,
305
- OutputIt result_,
306
- BinaryOp binary_op_,
307
- Size num_items)
308
- : temp_storage(temp_storage_),
309
- load_it(core::make_load_iterator(ptx_plan(), input_it_)),
310
- first_tile_previous(first_tile_previous_),
311
- output_it(result_),
312
- binary_op(binary_op_)
313
- {
314
- consume_range(num_items);
315
- }
316
- }; // struct impl
317
-
318
- //---------------------------------------------------------------------
319
- // Agent entry point
320
- //---------------------------------------------------------------------
321
-
322
- THRUST_AGENT_ENTRY(InputIt first,
323
- input_type *first_element,
324
- OutputIt result,
325
- BinaryOp binary_op,
326
- Size num_items,
327
- char * shmem)
328
- {
329
- TempStorage &storage = *reinterpret_cast<TempStorage *>(shmem);
330
- impl(storage, first, first_element, result, binary_op, num_items);
331
- }
332
- }; // struct AdjacentDifferenceAgent
333
-
334
- template <class InputIt,
335
- class OutputIt,
336
- class Size>
337
- struct InitAgent
338
- {
339
- template <class Arch>
340
- struct PtxPlan : PtxPolicy<128> {};
341
- typedef core::specialize_plan<PtxPlan> ptx_plan;
342
-
343
- //---------------------------------------------------------------------
344
- // Agent entry point
345
- //---------------------------------------------------------------------
346
-
347
- THRUST_AGENT_ENTRY(InputIt first,
348
- OutputIt result,
349
- Size num_tiles,
350
- int items_per_tile,
351
- char * /*shmem*/)
352
- {
353
- int tile_idx = blockIdx.x * blockDim.x + threadIdx.x;
354
- Size tile_base = static_cast<Size>(tile_idx) * items_per_tile;
355
- if (tile_base > 0 && tile_idx < num_tiles)
356
- result[tile_idx] = first[tile_base - 1];
357
- }
358
- }; // struct InitAgent
359
-
360
- template <class InputIt,
361
- class OutputIt,
362
- class BinaryOp,
363
- class Size>
364
- cudaError_t THRUST_RUNTIME_FUNCTION
365
- doit_step(void * d_temp_storage,
366
- size_t & temp_storage_bytes,
367
- InputIt first,
368
- OutputIt result,
369
- BinaryOp binary_op,
370
- Size num_items,
371
- cudaStream_t stream,
372
- bool debug_sync)
373
- {
374
- if (num_items == 0)
375
- return cudaSuccess;
376
-
377
- using core::AgentPlan;
378
- using core::AgentLauncher;
379
-
380
- cudaError_t status = cudaSuccess;
381
-
382
- typedef AgentLauncher<
383
- AdjacentDifferenceAgent<InputIt,
384
- OutputIt,
385
- Size,
386
- BinaryOp> >
387
- difference_agent;
388
-
389
- typedef typename iterator_traits<InputIt>::value_type input_type;
390
- typedef AgentLauncher<InitAgent<InputIt, input_type *, Size> > init_agent;
391
-
392
- AgentPlan difference_plan = difference_agent::get_plan(stream);
393
- AgentPlan init_plan = init_agent::get_plan();
394
-
395
-
396
- Size tile_size = difference_plan.items_per_tile;
397
- Size num_tiles = (num_items + tile_size - 1) / tile_size;
398
-
399
- size_t tmp1 = num_tiles * sizeof(input_type);
400
- size_t vshmem_size = core::vshmem_size(difference_plan.shared_memory_size,
401
- num_tiles);
402
-
403
- size_t allocation_sizes[2] = {tmp1, vshmem_size};
404
- void * allocations[2] = {NULL, NULL};
405
-
406
- status = core::alias_storage(d_temp_storage,
407
- temp_storage_bytes,
408
- allocations,
409
- allocation_sizes);
410
- CUDA_CUB_RET_IF_FAIL(status);
411
-
412
- if (d_temp_storage == NULL)
413
- {
414
- return status;
415
- }
416
-
417
- input_type *first_tile_previous = (input_type *)allocations[0];
418
- char *vshmem_ptr = vshmem_size > 0 ? (char *)allocations[1] : NULL;
419
-
420
- init_agent ia(init_plan, num_tiles, stream, "adjacent_difference::init_agent", debug_sync);
421
- ia.launch(first, first_tile_previous, num_tiles, tile_size);
422
- CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
423
-
424
- difference_agent da(difference_plan, num_items, stream, vshmem_ptr, "adjacent_difference::difference_agent", debug_sync);
425
- da.launch(first,
426
- first_tile_previous,
427
- result,
428
- binary_op,
429
- num_items);
430
- CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
431
- return status;
432
- }
433
-
434
- template <typename Derived,
435
- typename InputIt,
436
- typename OutputIt,
437
- typename BinaryOp>
438
- OutputIt THRUST_RUNTIME_FUNCTION
439
- adjacent_difference(execution_policy<Derived>& policy,
440
- InputIt first,
441
- InputIt last,
442
- OutputIt result,
443
- BinaryOp binary_op)
444
- {
445
- typedef typename iterator_traits<InputIt>::difference_type size_type;
446
-
447
- size_type num_items = thrust::distance(first, last);
448
- size_t storage_size = 0;
449
- cudaStream_t stream = cuda_cub::stream(policy);
450
- bool debug_sync = THRUST_DEBUG_SYNC_FLAG;
451
-
452
- cudaError_t status;
453
- THRUST_INDEX_TYPE_DISPATCH(status, doit_step, num_items,
454
- (NULL, storage_size, first, result, binary_op,
455
- num_items_fixed, stream, debug_sync));
456
- cuda_cub::throw_on_error(status, "adjacent_difference failed on 1st step");
457
-
458
- // Allocate temporary storage.
459
- thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
460
- tmp(policy, storage_size);
461
- void *ptr = static_cast<void*>(tmp.data().get());
462
-
463
- THRUST_INDEX_TYPE_DISPATCH(status, doit_step, num_items,
464
- (ptr, storage_size, first, result, binary_op,
465
- num_items_fixed, stream, debug_sync));
466
- cuda_cub::throw_on_error(status, "adjacent_difference failed on 2nd step");
467
-
468
- status = cuda_cub::synchronize(policy);
469
- cuda_cub::throw_on_error(status, "adjacent_difference failed to synchronize");
470
-
471
- return result + num_items;
472
- }
473
-
474
- } // namespace __adjacent_difference
475
-
476
- //-------------------------
477
- // Thrust API entry points
478
- //-------------------------
479
-
480
- __thrust_exec_check_disable__
481
- template <class Derived,
482
- class InputIt,
483
- class OutputIt,
484
- class BinaryOp>
485
- OutputIt __host__ __device__
486
- adjacent_difference(execution_policy<Derived> &policy,
487
- InputIt first,
488
- InputIt last,
489
- OutputIt result,
490
- BinaryOp binary_op)
491
- {
492
- OutputIt ret = result;
493
- if (__THRUST_HAS_CUDART__)
494
- {
495
- ret = __adjacent_difference::adjacent_difference(policy,
496
- first,
497
- last,
498
- result,
499
- binary_op);
500
- }
501
- else
502
- {
503
- #if !__THRUST_HAS_CUDART__
504
- ret = thrust::adjacent_difference(cvt_to_seq(derived_cast(policy)),
505
- first,
506
- last,
507
- result,
508
- binary_op);
509
- #endif
510
- }
511
-
512
- return ret;
513
- }
514
-
515
- template <class Derived,
516
- class InputIt,
517
- class OutputIt>
518
- OutputIt __host__ __device__
519
- adjacent_difference(execution_policy<Derived> &policy,
520
- InputIt first,
521
- InputIt last,
522
- OutputIt result)
523
- {
524
- typedef typename iterator_traits<InputIt>::value_type input_type;
525
- return cuda_cub::adjacent_difference(policy,
526
- first,
527
- last,
528
- result,
529
- minus<input_type>());
530
- }
531
-
532
-
533
- } // namespace cuda_cub
534
- } // end namespace thrust
535
-
536
- //
537
- #include <thrust/memory.h>
538
- #include <thrust/adjacent_difference.h>
539
- #endif
540
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/set_operations.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits set_operations
22
- #include <thrust/system/cpp/detail/set_operations.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/necks/bfp.py DELETED
@@ -1,104 +0,0 @@
1
- import torch.nn as nn
2
- import torch.nn.functional as F
3
- from mmcv.cnn import ConvModule, xavier_init
4
- from mmcv.cnn.bricks import NonLocal2d
5
-
6
- from ..builder import NECKS
7
-
8
-
9
- @NECKS.register_module()
10
- class BFP(nn.Module):
11
- """BFP (Balanced Feature Pyramids)
12
-
13
- BFP takes multi-level features as inputs and gather them into a single one,
14
- then refine the gathered feature and scatter the refined results to
15
- multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
16
- the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
17
- <https://arxiv.org/abs/1904.02701>`_ for details.
18
-
19
- Args:
20
- in_channels (int): Number of input channels (feature maps of all levels
21
- should have the same channels).
22
- num_levels (int): Number of input feature levels.
23
- conv_cfg (dict): The config dict for convolution layers.
24
- norm_cfg (dict): The config dict for normalization layers.
25
- refine_level (int): Index of integration and refine level of BSF in
26
- multi-level features from bottom to top.
27
- refine_type (str): Type of the refine op, currently support
28
- [None, 'conv', 'non_local'].
29
- """
30
-
31
- def __init__(self,
32
- in_channels,
33
- num_levels,
34
- refine_level=2,
35
- refine_type=None,
36
- conv_cfg=None,
37
- norm_cfg=None):
38
- super(BFP, self).__init__()
39
- assert refine_type in [None, 'conv', 'non_local']
40
-
41
- self.in_channels = in_channels
42
- self.num_levels = num_levels
43
- self.conv_cfg = conv_cfg
44
- self.norm_cfg = norm_cfg
45
-
46
- self.refine_level = refine_level
47
- self.refine_type = refine_type
48
- assert 0 <= self.refine_level < self.num_levels
49
-
50
- if self.refine_type == 'conv':
51
- self.refine = ConvModule(
52
- self.in_channels,
53
- self.in_channels,
54
- 3,
55
- padding=1,
56
- conv_cfg=self.conv_cfg,
57
- norm_cfg=self.norm_cfg)
58
- elif self.refine_type == 'non_local':
59
- self.refine = NonLocal2d(
60
- self.in_channels,
61
- reduction=1,
62
- use_scale=False,
63
- conv_cfg=self.conv_cfg,
64
- norm_cfg=self.norm_cfg)
65
-
66
- def init_weights(self):
67
- """Initialize the weights of FPN module."""
68
- for m in self.modules():
69
- if isinstance(m, nn.Conv2d):
70
- xavier_init(m, distribution='uniform')
71
-
72
- def forward(self, inputs):
73
- """Forward function."""
74
- assert len(inputs) == self.num_levels
75
-
76
- # step 1: gather multi-level features by resize and average
77
- feats = []
78
- gather_size = inputs[self.refine_level].size()[2:]
79
- for i in range(self.num_levels):
80
- if i < self.refine_level:
81
- gathered = F.adaptive_max_pool2d(
82
- inputs[i], output_size=gather_size)
83
- else:
84
- gathered = F.interpolate(
85
- inputs[i], size=gather_size, mode='nearest')
86
- feats.append(gathered)
87
-
88
- bsf = sum(feats) / len(feats)
89
-
90
- # step 2: refine gathered features
91
- if self.refine_type is not None:
92
- bsf = self.refine(bsf)
93
-
94
- # step 3: scatter refined features to multi-levels by a residual path
95
- outs = []
96
- for i in range(self.num_levels):
97
- out_size = inputs[i].size()[2:]
98
- if i < self.refine_level:
99
- residual = F.interpolate(bsf, size=out_size, mode='nearest')
100
- else:
101
- residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
102
- outs.append(residual + inputs[i])
103
-
104
- return tuple(outs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/winoground-explorer/app.py DELETED
@@ -1,31 +0,0 @@
1
- from datasets import load_dataset
2
- import gradio as gr
3
- import os
4
- import random
5
-
6
- auth_token = os.environ.get("token")
7
- winoground = load_dataset("facebook/winoground", use_auth_token=auth_token)["test"]
8
-
9
- def func(index):
10
- example = winoground[index]
11
- return example["image_0"], example["caption_0"], example["image_1"], example["caption_1"]
12
-
13
- demo = gr.Blocks()
14
-
15
- with demo:
16
- gr.Markdown("# Slide across the slider to see various examples from WinoGround")
17
-
18
- with gr.Column():
19
- slider = gr.Slider(minimum=0, maximum=400)
20
- with gr.Row():
21
- index = random.choice(range(0, 400))
22
- with gr.Column():
23
- image_input_1 = gr.Image(value=winoground[index]["image_0"])
24
- text_input_1 = gr.Textbox(value=winoground[index]["caption_0"])
25
- with gr.Column():
26
- image_input_2 = gr.Image(value=winoground[index]["image_1"])
27
- text_input_2 = gr.Textbox(value=winoground[index]["caption_1"])
28
-
29
- slider.change(func, inputs=[slider], outputs=[image_input_1, text_input_1, image_input_2, text_input_2])
30
-
31
- demo.launch()