parquet-converter commited on
Commit
907e826
·
1 Parent(s): 9311d0c

Update parquet files (step 21 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Vray Sketchup 2022 Crackeado VERIFIED.md +0 -25
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gothic 3 Forsaken Gods Enhanced Edition [key serial] - The Ultimate Guide to Activate and Play.md +0 -106
  3. spaces/1gistliPinn/ChatGPT4/Examples/CRACK PreSonus Studio One 3 Professional V6.1.0.35191-R2R FREE.md +0 -18
  4. spaces/1gistliPinn/ChatGPT4/Examples/Down Coreldraw X5 Full Crack.md +0 -80
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale 3D A Fan-Made Game You Can Download and Play Now.md +0 -141
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CricHD App APK Live Cricket Streaming at Your Fingertips.md +0 -128
  7. spaces/1phancelerku/anime-remove-background/Free Download Video Game Player The Ultimate Guide to Playing Any Game on Any Device.md +0 -107
  8. spaces/1toTree/lora_test/ppdiffusers/schedulers/preconfig/preconfig_scheduling_lms_discrete.py +0 -299
  9. spaces/ASJMO/freegpt/client/css/checkbox.css +0 -55
  10. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/admin/export/+server.ts +0 -166
  11. spaces/AdamGustavsson/AnimeganV2Webcam/app.py +0 -3
  12. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/sde_team.py +0 -72
  13. spaces/AgentVerse/agentVerse/agentverse/memory_manipulator/plan.py +0 -79
  14. spaces/AhmedM20/Email_Marketing_Content_Generator/README.md +0 -12
  15. spaces/AlexWang/lama/bin/paper_runfiles/blur_tests.sh +0 -37
  16. spaces/AlexZou/Deploy_Restoration/README.md +0 -13
  17. spaces/AlignmentResearch/tuned-lens/README.md +0 -9
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/multistep_dpm_solver_inverse.md +0 -22
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/imagic_stable_diffusion.py +0 -496
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/onnxruntime/unconditional_image_generation/README.md +0 -50
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_repaint.py +0 -344
  22. spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py +0 -86
  23. spaces/Andy1621/uniformer_image_detection/exp/cascade_mask_rcnn_3x_ms_hybrid_small/config.py +0 -142
  24. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py +0 -83
  25. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/default_runtime.py +0 -14
  26. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/iter_timer.py +0 -18
  27. spaces/Anonymous-sub/Rerender/ControlNet/gradio_annotator.py +0 -160
  28. spaces/Armandoliv/document_parser/README.md +0 -13
  29. spaces/Arnx/MusicGenXvAKN/tests/utils/__init__.py +0 -5
  30. spaces/Artrajz/vits-simple-api/vits/text/english.py +0 -188
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/subprocess.py +0 -260
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/vcs/versioncontrol.py +0 -705
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/compat.py +0 -13
  34. spaces/BAAI/vid2vid-zero/test_vid2vid_zero.py +0 -267
  35. spaces/Benson/text-generation/Examples/Descarga De La Aplicacin Tiktok Lite Para Windows Pc 8.md +0 -81
  36. spaces/Benson/text-generation/Examples/Descargar Bagaimana Cara Stumble Chicos Di Laptop.md +0 -121
  37. spaces/Benson/text-generation/Examples/Descargar Gratis Final Cut Pro.md +0 -78
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/s3/inject.py +0 -891
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/jaraco/context.py +0 -213
  40. spaces/Bishnupada/Fine-tuning-using-Hugging-face-transformers/app.py +0 -0
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/build.py +0 -24
  42. spaces/CVPR/LIVE/pybind11/docs/_static/theme_overrides.css +0 -11
  43. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/find.h +0 -219
  44. spaces/CVPR/WALT/configs/_base_/datasets/parking_instance_coco.py +0 -49
  45. spaces/CVPR/regionclip-demo/detectron2/layers/shape_spec.py +0 -20
  46. spaces/Cambino/dog-classifier-gradio/README.md +0 -13
  47. spaces/Caoyunkang/Segment-Any-Anomaly/SAA/prompts/ksdd2_parameters.py +0 -11
  48. spaces/Cat125/text-generator-v3/files.py +0 -7
  49. spaces/Covert1107/sd-diffusers-webui/modules/prompt_parser.py +0 -391
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_subprocesses.py +0 -79
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Vray Sketchup 2022 Crackeado VERIFIED.md DELETED
@@ -1,25 +0,0 @@
1
- <br />
2
- <h1>How to Download Vray Sketchup 2022 Crackeado for Free</h1>
3
- <p>Vray Sketchup 2022 is a powerful rendering software that can create realistic and stunning images from 3D models. It is widely used by architects, designers, and artists for various projects. However, Vray Sketchup 2022 is not a cheap software, and it requires a license to use. If you want to download Vray Sketchup 2022 crackeado for free, you might be tempted to look for online sources that offer cracked versions of the software. But is it worth it?</p>
4
- <h2>download vray sketchup 2022 crackeado</h2><br /><p><b><b>DOWNLOAD</b> &#9745; <a href="https://byltly.com/2uKxZa">https://byltly.com/2uKxZa</a></b></p><br /><br />
5
- <p>In this article, we will explain why you should avoid downloading Vray Sketchup 2022 crackeado for free, and what are the risks and consequences of doing so. We will also provide some alternatives that can help you use Vray Sketchup 2022 legally and safely.</p>
6
- <h2>Why You Should Not Download Vray Sketchup 2022 Crackeado for Free</h2>
7
- <p>Downloading Vray Sketchup 2022 crackeado for free might seem like a good idea at first, but it comes with many drawbacks and dangers. Here are some of the reasons why you should not download Vray Sketchup 2022 crackeado for free:</p>
8
- <ul>
9
- <li>It is illegal. Downloading Vray Sketchup 2022 crackeado for free is a violation of the software's terms of service and intellectual property rights. You are essentially stealing the software from the developers and distributors, who have invested time and money to create and maintain it. If you are caught downloading or using Vray Sketchup 2022 crackeado for free, you could face legal actions, fines, or even jail time.</li>
10
- <li>It is unsafe. Downloading Vray Sketchup 2022 crackeado for free from unknown or untrusted sources can expose your computer to viruses, malware, spyware, or ransomware. These malicious programs can damage your system, steal your personal information, or lock your files until you pay a ransom. You could also lose your data or compromise your privacy and security.</li>
11
- <li>It is unreliable. Downloading Vray Sketchup 2022 crackeado for free can result in a poor performance of the software. The cracked version might not work properly, crash frequently, or have missing or corrupted features. You might also experience compatibility issues with other software or hardware. You could also miss out on updates, bug fixes, or new features that the official version offers.</li>
12
- <li>It is unethical. Downloading Vray Sketchup 2022 crackeado for free is unfair to the creators and users of the software. You are depriving them of their rightful income and recognition for their work. You are also disrespecting their efforts and skills. You are also harming the software industry and the quality of the products they produce.</li>
13
- </ul>
14
- <h2>What Are Some Alternatives to Downloading Vray Sketchup 2022 Crackeado for Free</h2>
15
- <p>If you want to use Vray Sketchup 2022 legally and safely, you should avoid downloading Vray Sketchup 2022 crackeado for free. Instead, you should consider some of these alternatives:</p>
16
- <ul>
17
- <li>Buy a license. The best way to use Vray Sketchup 2022 is to purchase a license from the official website or an authorized reseller. This way, you can enjoy all the benefits and features of the software without any risks or limitations. You can also get technical support and customer service if you encounter any problems.</li>
18
- <li>Use a trial version. If you want to try Vray Sketchup 2022 before buying it, you can download a trial version from the official website. The trial version allows you to use the software for 30 days with full functionality. You can then decide whether you want to buy a license or not.</li>
19
- <li>Use an alternative software. If you cannot afford or do not want to buy a license for Vray Sketchup 2022, you can look for other rendering software that are similar or compatible with Sketchup. Some examples are Blender, Lumion, Enscape, or Twinmotion. These software might have different features or prices than Vray Sketchup 2022, but they can still help you create realistic and stunning images from your 3D models.</li>
20
- </ul>
21
- <h2>Conclusion</h2>
22
- <p>Vray Sketchup 2022 is a great rendering software that can help you create amazing images from your 3D models. However, downloading Vray Sketchup 2022 crackeado for</p>
23
- <p></p> ddb901b051<br />
24
- <br />
25
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gothic 3 Forsaken Gods Enhanced Edition [key serial] - The Ultimate Guide to Activate and Play.md DELETED
@@ -1,106 +0,0 @@
1
- <br />
2
- <h1>Gothic 3: Forsaken Gods Enhanced Edition [key serial]</h1>
3
- <p>If you are a fan of open-world action role-playing games, you might have heard of Gothic 3, a game that was released in 2006 by JoWooD Productions. The game received mixed reviews from critics and players, mainly due to its technical issues and bugs. However, it also had a loyal fan base that appreciated its immersive world, rich lore and freedom of choice.</p>
4
- <h2>Gothic 3: Forsaken Gods Enhanced Edition [key serial]</h2><br /><p><b><b>Download File</b> &#9913; <a href="https://byltly.com/2uKxkz">https://byltly.com/2uKxkz</a></b></p><br /><br />
5
- <p>In 2008, JoWooD Productions released a standalone expansion for Gothic 3, called Gothic 3: Forsaken Gods. The expansion was developed by Trine Games and aimed to answer some of the questions that were left unresolved in the original game. However, the expansion also suffered from many problems, such as poor graphics, gameplay glitches and a lackluster story.</p>
6
- <p>In 2011, JoWooD Productions released a new version of the expansion, called Gothic 3: Forsaken Gods Enhanced Edition. The new version was developed by Mad Vulture Games, a team that also worked on community patches for Gothic 3. The enhanced edition improved many aspects of the game, such as graphics, sound, combat system, quests, monsters and characters.</p>
7
- <p>If you are interested in playing Gothic 3: Forsaken Gods Enhanced Edition, you will need a key serial to activate the game on Steam. A key serial is a unique code that verifies your ownership of the game and allows you to download and play it online. You can get a key serial for Gothic 3: Forsaken Gods Enhanced Edition by buying it from Steam or other authorized retailers.</p>
8
- <h2>Gameplay</h2>
9
- <p>Gothic 3: Forsaken Gods Enhanced Edition is an open-world action role-playing game that takes place in Myrtana, a fantasy continent that is divided by various factions. You play as the Nameless Hero, a legendary warrior who has banished the influence of the gods from Myrtana with the help of his friend Xardas, a powerful mage.</p>
10
- <p>However, your actions have caused more chaos and conflict in Myrtana, as different groups fight for power and resources. You wake up from a coma in a secret realm between space and time, where you witness the events that unfold in Myrtana. You decide to return to Myrtana and try to unite it once again under a new empire for both humans and orcs.</p>
11
- <p>The gameplay of Gothic 3: Forsaken Gods Enhanced Edition is similar to that of Gothic 3, but with some differences. The combat system has been revised and now relies on endurance. You can use various weapons, armors, spells and skills to fight your enemies. You can also interact with many characters, join factions, complete quests and explore the world.</p>
12
- <p>The enhanced edition adds new quests, monsters and characters to the game. Some of these are old friends or foes from previous Gothic games, such as Diego, Gorn, Milten and Lee. You can also encounter new creatures, such as giant spiders, undead warriors and fire golems. The enhanced edition also fixes some bugs and glitches that were present in the original expansion.</p>
13
- <h2>Graphics and Sound</h2>
14
- <p>Gothic 3: Forsaken Gods Enhanced Edition improves the graphics quality of the game compared to the original expansion. The enhanced edition uses an updated engine that enhances the lighting, shadows, textures and animations of the game. The enhanced edition also adds new visual effects, such as blood splatter, fire sparks and smoke trails.</p>
15
- <p>The sound quality of Gothic 3: Forsaken Gods Enhanced Edition is also improved compared to the original expansion. The enhanced edition features better voice acting, sound effects and music for the game. The enhanced edition also adds new soundtracks composed by Kai Rosenkranz, who also worked on previous Gothic games.</p>
16
- <p>Gothic 3 Forsaken Gods Enhanced Edition Steam key<br />
17
- How to activate Gothic 3 Forsaken Gods Enhanced Edition key<br />
18
- Gothic 3 Forsaken Gods Enhanced Edition CD key generator<br />
19
- Gothic 3 Forsaken Gods Enhanced Edition key serial free download<br />
20
- Gothic 3 Forsaken Gods Enhanced Edition license key crack<br />
21
- Gothic 3 Forsaken Gods Enhanced Edition product key online<br />
22
- Gothic 3 Forsaken Gods Enhanced Edition activation key code<br />
23
- Gothic 3 Forsaken Gods Enhanced Edition serial number giveaway<br />
24
- Gothic 3 Forsaken Gods Enhanced Edition registration key cheap<br />
25
- Gothic 3 Forsaken Gods Enhanced Edition game key purchase<br />
26
- Gothic 3 Forsaken Gods Enhanced Edition steam code redeem<br />
27
- Gothic 3 Forsaken Gods Enhanced Edition origin key buy<br />
28
- Gothic 3 Forsaken Gods Enhanced Edition gog key sale<br />
29
- Gothic 3 Forsaken Gods Enhanced Edition epic games key discount<br />
30
- Gothic 3 Forsaken Gods Enhanced Edition uplay key offer<br />
31
- Gothic 3 Forsaken Gods Enhanced Edition xbox one key deal<br />
32
- Gothic 3 Forsaken Gods Enhanced Edition ps4 key coupon<br />
33
- Gothic 3 Forsaken Gods Enhanced Edition switch key price<br />
34
- Gothic 3 Forsaken Gods Enhanced Edition pc key comparison<br />
35
- Gothic 3 Forsaken Gods Enhanced Edition mac key review<br />
36
- Gothic 3 Forsaken Gods Enhanced Edition linux key rating<br />
37
- Gothic 3 Forsaken Gods Enhanced Edition windows key delivery<br />
38
- Gothic 3 Forsaken Gods Enhanced Edition digital key instant<br />
39
- Gothic 3 Forsaken Gods Enhanced Edition physical key shipping<br />
40
- Gothic 3 Forsaken Gods Enhanced Edition collector's edition key bonus<br />
41
- Gothic 3 Forsaken Gods Enhanced Edition deluxe edition key features<br />
42
- Gothic 3 Forsaken Gods Enhanced Edition gold edition key extras<br />
43
- Gothic 3 Forsaken Gods Enhanced Edition ultimate edition key benefits<br />
44
- Gothic 3 Forsaken Gods Enhanced Edition complete edition key content<br />
45
- Gothic 3 Forsaken Gods Enhanced Edition definitive edition key difference<br />
46
- Gothic 3 Forsaken Gods Enhanced Edition remastered edition key upgrade<br />
47
- Gothic 3 Forsaken Gods Enhanced Edition enhanced edition key serial vs original<br />
48
- Gothic 3 Forsaken Gods Enhanced Edition expansion pack key add-on<br />
49
- Gothic 3 Forsaken Gods Enhanced Edition DLC pack key download<br />
50
- Gothic 3 Forsaken Gods Enhanced Edition update patch key install<br />
51
- Gothic 3 Forsaken Gods Enhanced Edition mod pack key enable<br />
52
- Gothic 3 Forsaken Gods Enhanced Edition cheat code key unlock<br />
53
- Gothic 3 Forsaken Gods Enhanced Edition trainer tool key use<br />
54
- Gothic 3 Forsaken Gods Enhanced Edition guide book key access<br />
55
- Gothic 3 Forsaken Gods Enhanced Edition walkthrough video key watch<br />
56
- Gothic 3 Forsaken Gods Enhanced Edition gameplay stream key view<br />
57
- Gothic 3 Forsaken Gods Enhanced Edition soundtrack music key listen<br />
58
- Gothic 3 Forsaken Gods Enhanced Edition art book pdf key read<br />
59
- Gothic 3 Forsaken Gods Enhanced Edition wallpaper image key download<br />
60
- Gothic 3 Forsaken Gods Enhanced Edition poster print key order<br />
61
- Gothic 3 Forsaken Gods Enhanced Edition t-shirt design key buy<br />
62
- Gothic 3 Forsaken Gods Enhanced Edition mug gift key get<br />
63
- Gothic 3 Forsaken Gods Enhanced Edition sticker pack key collect<br />
64
- Gothic 3 Forsaken Gods Enhanced Edition fan art gallery key admire<br />
65
- Gothic 3 Forsaken Gods Enhanced Edition review blog post key comment</p>
66
- <p>The technical requirements and performance of Gothic 3: Forsaken Gods Enhanced Edition are similar to those of Gothic 3. You will need a Windows XP/Vista/7 operating system, an Intel or AMD single-core processor (2.5 GHz), 1 GB of RAM (1.5 GB with Windows Vista/7), 4 GB of free disk space, an ATI X1900 or nVidia 7900 video card with 256 MB of RAM and DirectX 9.0c or higher.</p>
67
- <p>You can customize the graphics and sound settings of Gothic 3: Forsaken Gods Enhanced Edition according to your preferences and system specifications. You can adjust options such as resolution, anti-aliasing, texture quality, shadow quality, ambient occlusion, sound volume and subtitles.</p>
68
- <h2>Conclusion</h2>
69
- <p>Gothic 3: Forsaken Gods Enhanced Edition is a game that offers an immersive open-world experience with plenty of freedom and choice. If you enjoyed playing Gothic 3 or other games in the series, you will find this game appealing. The enhanced edition improves many aspects of the original expansion, such as graphics, sound, combat system, quests, monsters and characters.</p>
70
- <p>However, Gothic 3: Forsaken Gods Enhanced Edition is not a perfect game. It still has some flaws, such as a weak story, repetitive gameplay, clunky controls, and some remaining bugs and issues. The enhanced edition also does not add much new content, so you might find it short and lacking. The gameplay area is limited to one part of Myrtana, unlike in Gothic 3 which had three parts.</p>
71
- <p>If you want to play Gothic 3: Forsaken Gods Enhanced Edition, you can buy or download it from Steam or other authorized retailers. The game costs $9.99 on Steam, but you can also find it on sale or in bundles with other games. You will need a key serial to activate the game on Steam, which you will receive after your purchase. You can also check out other games in the Gothic series, such as Gothic 1, Gothic II: Gold Edition, Gothic® 3, ArcaniA, and ArcaniA: Fall of Setarrif.</p>
72
- <h2>FAQs</h2>
73
- <ul>
74
- <li>Is Gothic 3: Forsaken Gods Enhanced Edition a standalone game or an expansion?</li>
75
- <p>Gothic 3: Forsaken Gods Enhanced Edition is a standalone game, which means you do not need to have Gothic 3 installed to play it. However, it is also an expansion for Gothic 3, which means it continues the story and gameplay of Gothic 3.</p>
76
- <li>How long is Gothic 3: Forsaken Gods Enhanced Edition?</li>
77
- <p>Gothic 3: Forsaken Gods Enhanced Edition is a relatively short game compared to other open-world RPGs. It can take you around 10-15 hours to complete all the main quests and side quests in the game. However, you can spend more time exploring, fighting, looting, and crafting in the game if you want.</p>
78
- <li>Is Gothic 3: Forsaken Gods Enhanced Edition compatible with Gothic 3 mods and patches?</li>
79
- <p>Gothic 3: Forsaken Gods Enhanced Edition is not compatible with most mods and patches made for Gothic 3. have different files and scripts. However, you can use some mods and patches that are specifically made for Gothic 3: Forsaken Gods Enhanced Edition. You can find them on websites such as World of Gothic or Nexus Mods.</p>
80
- <li>How to fix common bugs and issues in Gothic 3: Forsaken Gods Enhanced Edition?</li>
81
- <p>Gothic 3: Forsaken Gods Enhanced Edition is a more stable and polished game than the original expansion, but it still has some bugs and issues that can affect your gameplay. Some of the common problems are crashes, freezes, stuttering, low FPS, graphical glitches, sound errors, and save game corruption. To fix these problems, you can try the following solutions:</p>
82
- <ul>
83
- <li>Update your drivers and DirectX to the latest versions.</li>
84
- <li>Run the game as an administrator and in compatibility mode for Windows XP SP3.</li>
85
- <li>Disable any antivirus or firewall programs that might interfere with the game.</li>
86
- <li>Lower your graphics and sound settings to reduce the load on your system.</li>
87
- <li>Verify the integrity of your game files on Steam or reinstall the game if necessary.</li>
88
- <li>Apply the latest official patch or community patch for the game.</li>
89
- <li>Use a save game cleaner or editor to fix corrupted save files.</li>
90
- <li>Check online forums or guides for specific solutions to specific problems.</li>
91
- </ul>
92
- <li>What are some tips and tricks for Gothic 3: Forsaken Gods Enhanced Edition?</li>
93
- <p>Gothic 3: Forsaken Gods Enhanced Edition is a challenging game that requires you to use your skills, strategy, and resources wisely. Here are some tips and tricks that can help you survive and thrive in the game:</p>
94
- <ul>
95
- <li>Save your game often and in different slots, especially before entering a new area or starting a new quest.</li>
96
- <li>Explore every corner of the world and loot everything you can find. You never know what useful items or secrets you might discover.</li>
97
- <li>Talk to every character you meet and listen to their stories and requests. You might gain valuable information, allies, or rewards.</li>
98
- <li>Choose your faction carefully and be aware of the consequences of your actions. Your reputation and alignment will affect how other factions and characters treat you.</li>
99
- <li>Learn new skills and spells from trainers and books. They will enhance your abilities and give you an edge in combat.</li>
100
- <li>Craft your own weapons, armors, potions, and scrolls using the materials you find or buy. They will be more powerful and customized than the ones you find or loot.</li>
101
- <li>Use stealth, ranged attacks, magic, or melee combat depending on the situation and your preference. You can also combine them for more effectiveness.</li>
102
- <li>Avoid fighting multiple enemies at once or enemies that are too strong for you. You can use distractions, traps, environmental hazards, or allies to help you.</li>
103
- </ul>
104
- </p> 0a6ba089eb<br />
105
- <br />
106
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CRACK PreSonus Studio One 3 Professional V6.1.0.35191-R2R FREE.md DELETED
@@ -1,18 +0,0 @@
1
- <h2>CRACK PreSonus Studio One 3 Professional v6.1.0.35191-R2R</h2><br /><p><b><b>Download File</b> ->>> <a href="https://imgfil.com/2uy0Xd">https://imgfil.com/2uy0Xd</a></b></p><br /><br />
2
- <br />
3
- PART1 - Instructions - What's New and Important - Support and Troubleshooting |. How to install and use: 1..4,6-Dihydropyridine derivatives of the formula (I) have shown antithrombotic activity and are useful as a medicinal product for the prophylaxis and treatment of thrombosis in humans and animals.
4
-
5
- 1. Therapeutic Uses
6
-
7
- 2. Pharmaceutical Compositions
8
-
9
- 3. Pharmaceutical Packages
10
-
11
- 4. Methods of Preparing the Compositions
12
-
13
- 5. Applications of the Compositions
14
-
15
- P2Y.sub.12 antagonists are useful for treating mammals (e.g., humans) having or being susceptible to having one or more diseases or conditions (e.g., thrombotic disorders), wherein said disease or condition is associated with, or caused by or caused by, the inappropriate activation of platelets (e.g., by the inappropriate aggregation of platelets) or leukocytes (e.g., by the inappropriate activation of leukocytes). In one embodiment, the disease or condition is associated with, or caused by, the inappropriate activation of platelets and/or leukocytes. These include, but are not limited to, atherothrombotic events, such as myocardial infarction, stroke, unstable angina pectoris, intermittent claudication, arteriosclerosis, restenosis, reocclusion or complications associated with balloon angioplasty, coronary bypass surgery, and the like. In another embodiment, the disease or condition is associated with, or caused by, the inappropriate activation of platelets. These include, but are not limited to, acute coronary syndromes such as unstable angina, ST-segment elevation myocardial infarction, non-ST-segment elevation myocardial infarction and other coronary reperfusion syndromes, thrombolytic therapy, coronary artery bypass graft (CABG) surgery, carotid angioplasty, primary coronary angioplasty, cardiac bypass, angioplasty or stent placement after acute coronary syndromes and the like. In a further embodiment, the disease or condition is associated with, or caused by, the inappropriate activation of leukocytes. These include, but are not limited to, the inappropriate activation of monocytes/macrophages or neutrophils, which is associated with, or causes, atherosclerosis, resten 4fefd39f24<br />
16
- <br />
17
- <br />
18
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Down Coreldraw X5 Full Crack.md DELETED
@@ -1,80 +0,0 @@
1
-
2
- <h1>Down Coreldraw X5 Full Crack: A Guide to Download and Install the Graphics Suite</h1>
3
-
4
- <p>If you are looking for a powerful and versatile graphics design software, you might want to consider <strong>Down Coreldraw X5 Full Crack</strong>. This is a cracked version of CorelDraw Graphics Suite X5, which is one of the most popular and widely used vector graphics applications in the world. With this software, you can create stunning logos, illustrations, banners, flyers, web graphics, and more with ease and efficiency.</p>
5
- <h2>Down Coreldraw X5 Full Crack</h2><br /><p><b><b>Download Zip</b> &#9989; <a href="https://imgfil.com/2uxZvO">https://imgfil.com/2uxZvO</a></b></p><br /><br />
6
-
7
- <p>However, downloading and installing <strong>Down Coreldraw X5 Full Crack</strong> is not as simple as clicking a button. You need to follow some steps and precautions to make sure that you get the full version of the software without any errors or viruses. In this article, we will show you how to do that in a safe and reliable way.</p>
8
-
9
- <h2>What is Down Coreldraw X5 Full Crack?</h2>
10
-
11
- <p><strong>Down Coreldraw X5 Full Crack</strong> is a modified version of CorelDraw Graphics Suite X5 that bypasses the activation process and allows you to use the software for free. Normally, you would need to purchase a license key or subscribe to a monthly plan to use CorelDraw X5 legally. However, with <strong>Down Coreldraw X5 Full Crack</strong>, you can get access to all the features and tools of the software without paying anything.</p>
12
-
13
- <p>This might sound tempting, but you should also be aware of the risks and disadvantages of using <strong>Down Coreldraw X5 Full Crack</strong>. First of all, it is illegal and unethical to use cracked software, as it violates the intellectual property rights of the developers. You could face legal consequences or fines if you are caught using <strong>Down Coreldraw X5 Full Crack</strong>. Secondly, cracked software often comes with malware or viruses that can harm your computer or steal your personal information. You could lose your data or compromise your security if you download <strong>Down Coreldraw X5 Full Crack</strong> from untrusted sources. Thirdly, cracked software usually does not receive updates or support from the developers. You could miss out on new features, bug fixes, or compatibility improvements if you use <strong>Down Coreldraw X5 Full Crack</strong>.</p>
14
-
15
- <p>Therefore, we do not recommend using <strong>Down Coreldraw X5 Full Crack</strong>, and we advise you to purchase a legitimate copy of CorelDraw X5 from the official website. However, if you still want to try <strong>Down Coreldraw X5 Full Crack</strong>, we will show you how to download and install it in the next section.</p>
16
-
17
- <h2>How to Download and Install Down Coreldraw X5 Full Crack?</h2>
18
-
19
- <p>To download and install <strong>Down Coreldraw X5 Full Crack</strong>, you will need two files: the setup file and the keygen file. The setup file is the installer of CorelDraw X5 that contains all the necessary files for the software. The keygen file is a program that generates serial numbers and activation codes for CorelDraw X5. You will need both files to activate <strong>Down Coreldraw X5 Full Crack</strong>.</p>
20
- <p></p>
21
-
22
- <p>You can find both files on various websites that offer cracked software downloads. However, you should be careful when choosing a website, as some of them might contain fake or malicious links that can infect your computer with malware or viruses. To avoid this, you should look for websites that have positive reviews, ratings, or feedback from other users. You should also scan the files with an antivirus program before opening them.</p>
23
-
24
- <p>Once you have downloaded both files, you can follow these steps to install <strong>Down Coreldraw X5 Full Crack</strong>:</p>
25
-
26
- <ol>
27
- <li>Turn off your internet connection and antivirus program.</li>
28
- <li>Extract the setup file with WinRAR or any other file compression tool.</li>
29
- <li>Run the setup.exe file and select "I don't have a serial number" > Next.</li>
30
- <li>Select Typical Installation and start installing.</li>
31
- <li>Run the CGSX5HF4.exe file for updates.</li>
32
- <li>When finished, open the CorelDRW.exe file in C:\\Program Files (x86)\\Corel\\CorelDRAW Graphics Suite x5\\Programs.</li>
33
- <li>Select Register Later > Once open, just exit the program immediately.</li>
34
- <li>The Activation dialog will then appear.</li>
35
- <li>Click "Already Purchased?" > Copy the Serial Number that appears in the dialog.</li>
36
- <li>Run keygen.exe as administrator, Paste the Serial Number earlier.</li>
37
- <li>You copy the Installation Code that appears to Keygen.</li>
38
- <li>Now click Activation, copy Activation Code.</li>
39
- <li>Paste and click the Continue button.</li>
40
- <li>Congratulations! You have successfully installed <strong>Down Coreldraw X5 Full Crack</strong>.</li>
41
- </ol>
42
-
43
- <h2>Conclusion</h2>
44
-
45
- <p><strong>Down Coreldraw X5 Full Crack</strong> is a cracked version of CorelDraw Graphics Suite X5 that allows you to use the software for free without activation. However, it also comes with many risks and disadvantages, such as legal issues, malware infections, or lack of updates. Therefore, we do not recommend using <strong>Down Coreldraw X5 Full Crack</strong>, and we suggest you buy a legitimate copy of CorelDraw X5 from the official website instead.</p>
46
-
47
- <p>If you still want to try <strong>Down Coreldraw X5 Full Crack</strong>, we have shown you how to download and install it in this article. However, you should do this at your own risk and responsibility. We hope this article was helpful for you. Thank you for reading!</p>
48
- <h2>What are the Benefits of Down Coreldraw X5 Full Crack?</h2>
49
-
50
- <p>Despite the risks and disadvantages of using <strong>Down Coreldraw X5 Full Crack</strong>, some users might still find some benefits from using it. Here are some of the possible benefits of using <strong>Down Coreldraw X5 Full Crack</strong>:</p>
51
-
52
- <ul>
53
- <li>You can save money by not paying for a license key or a subscription plan.</li>
54
- <li>You can access all the features and tools of CorelDraw X5 without any limitations or restrictions.</li>
55
- <li>You can create professional and stunning graphics for various purposes and industries.</li>
56
- <li>You can enjoy the new and enhanced features of CorelDraw X5, such as the B-Spline tool, the Mesh Fill tool, the Web Graphics tools, and more.</li>
57
- <li>You can work with vector graphics easily and efficiently with CorelDraw X5's minimalist and user-friendly interface.</li>
58
- </ul>
59
-
60
- <p>These are some of the potential benefits of using <strong>Down Coreldraw X5 Full Crack</strong>. However, you should also weigh them against the risks and disadvantages that we mentioned earlier. You might find that the benefits are not worth the costs and consequences of using <strong>Down Coreldraw X5 Full Crack</strong>.</p>
61
-
62
- <h2>What are the Alternatives to Down Coreldraw X5 Full Crack?</h2>
63
-
64
- <p>If you are looking for a graphics design software that is similar to CorelDraw X5 but does not require cracking or activation, you might want to consider some of the alternatives that are available in the market. Here are some of the possible alternatives to <strong>Down Coreldraw X5 Full Crack</strong>:</p>
65
-
66
- <ul>
67
- <li><a href="https://www.adobe.com/products/illustrator.html">Adobe Illustrator</a>: This is one of the most popular and widely used vector graphics software in the world. It offers a comprehensive set of tools and features for creating logos, icons, illustrations, typography, and more. It also integrates well with other Adobe products, such as Photoshop, InDesign, and After Effects. However, Adobe Illustrator is not free, and you need to pay a monthly or annual subscription fee to use it.</li>
68
- <li><a href="https://inkscape.org/">Inkscape</a>: This is a free and open-source vector graphics software that can run on Windows, Mac, and Linux. It has a similar interface and functionality to CorelDraw X5, and it supports many file formats, such as SVG, PNG, PDF, EPS, and more. It also has a large community of users and developers who provide support and resources for Inkscape users. However, Inkscape might not have all the advanced features or performance that CorelDraw X5 has.</li>
69
- <li><a href="https://affinity.serif.com/en-gb/designer/">Affinity Designer</a>: This is a relatively new but powerful vector graphics software that can compete with CorelDraw X5 and Adobe Illustrator. It has a sleek and modern interface that is easy to use and customize. It also has a fast and smooth performance that can handle complex graphics and large files. It also supports many file formats, such as PSD, AI, PDF, SVG, EPS, and more. Affinity Designer is not free, but it has a one-time payment option that is cheaper than Adobe Illustrator's subscription plan.</li>
70
- </ul>
71
-
72
- <p>These are some of the possible alternatives to <strong>Down Coreldraw X5 Full Crack</strong>. You might want to try them out and see which one suits your needs and preferences better. You might find that they are more reliable, secure, and updated than <strong>Down Coreldraw X5 Full Crack</strong>.</p>
73
-
74
- <h2>Conclusion</h2>
75
-
76
- <p><strong>Down Coreldraw X5 Full Crack</strong> is a cracked version of CorelDraw Graphics Suite X5 that allows you to use the software for free without activation. However, it also comes with many risks and disadvantages, such as legal issues, malware infections, or lack of updates. Therefore, we do not recommend using <strong>Down Coreldraw X5 Full Crack</strong>, and we suggest you buy a legitimate copy of CorelDraw X5 from the official website instead.</p>
77
-
78
- <p>If you still want to try <strong>Down Coreldraw X5 Full Crack</strong>, we have shown you how to download and install it in this article. However, you should do this at your own risk and responsibility. We hope this article was helpful for you. Thank you for reading!</p> 3cee63e6c2<br />
79
- <br />
80
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale 3D A Fan-Made Game You Can Download and Play Now.md DELETED
@@ -1,141 +0,0 @@
1
-
2
- <h1>Clash Royale 3D Download: How to Play Your Favorite Game in a New Dimension</h1>
3
- <p>If you are a fan of Clash Royale, the popular mobile game that combines card collecting, tower defense, and real-time strategy, you might be wondering if there is a way to play it in a more immersive and realistic way. Well, you are in luck, because there is a fan-made project that allows you to play Clash Royale in 3D! In this article, we will tell you everything you need to know about Clash Royale 3D, how to download and install it, and how to enjoy it to the fullest. So, get ready to experience your favorite game in a new dimension!</p>
4
- <h2>clash royale 3d download</h2><br /><p><b><b>Download File</b> >>>>> <a href="https://urlin.us/2uSShM">https://urlin.us/2uSShM</a></b></p><br /><br />
5
- <h2>What is Clash Royale 3D?</h2>
6
- <p>Clash Royale 3D is a fan-made project that brings Clash Royale to life in 3D. It is not an official game by Supercell, the developer of Clash Royale, but rather a tribute by some passionate fans who wanted to create something unique and amazing. Clash Royale 3D is not a simple port or remake of the original game, but rather a new way to play it with enhanced graphics, animations, sounds, and gameplay.</p>
7
- <h3>A fan-made project that brings Clash Royale to life in 3D</h3>
8
- <p>Clash Royale 3D is created by using Sketchfab, a platform that allows anyone to create, share, and discover 3D models online. Sketchfab has a large community of artists and enthusiasts who create and upload various 3D models, including those based on popular games, movies, characters, and more. Some of these models are inspired by Clash Royale, such as the characters, cards, towers, arenas, and effects. By using Sketchfab's viewer and API, the developers of Clash Royale 3D were able to combine these models into a playable game that runs on your browser.</p>
9
- <h3>The features and benefits of playing Clash Royale 3D</h3>
10
- <p>Clash Royale 3D has many features and benefits that make it worth playing. Here are some of them:</p>
11
- <ul>
12
- <li>You can play Clash Royale in a more realistic and immersive way. You can see your cards come to life in 3D, watch them move and attack on the battlefield, and hear them make sounds and voices. You can also zoom in and out, rotate the camera, and change the perspective to get a better view of the action.</li>
13
- <li>You can explore different arenas and environments in 3D. You can see the details and textures of each arena, such as the grass, rocks, trees, buildings, flags, and more. You can also see the weather effects, such as rain, snow, fog, and night. You can even interact with some elements of the arena, such as breaking barrels or opening chests.</li>
14
- <li>You can enjoy the same gameplay and mechanics as the original game. You can still collect cards, build decks, join clans, chat with other players, participate in events, earn rewards, and more. You can also play against other players online or against bots offline.</li>
15
- <li>You can customize your game settings and preferences. You can adjust the quality, resolution, sound, and performance of the game. You can also choose the language, theme, and mode of the game. You can even enable or disable some features, such as shadows, reflections, particles, and animations.</li>
16
- </ul>
17
- <h2>How to download and install Clash Royale 3D?</h2>
18
- <p>Clash Royale 3D is not available on the official app stores, such as Google Play or Apple Store, because it is not an official game by Supercell. However, you can still download and install it easily by following these steps:</p>
19
- <p>clash royale 3d models free download<br />
20
- clash royale 3d apk download for android<br />
21
- clash royale 3d arena 7 royal arena<br />
22
- clash royale 3d mod apk download latest version<br />
23
- clash royale 3d game download for pc<br />
24
- clash royale 3d characters download<br />
25
- clash royale 3d animation download<br />
26
- clash royale 3d wallpaper download<br />
27
- clash royale 3d offline download<br />
28
- clash royale 3d hack download<br />
29
- clash royale 3d skins download<br />
30
- clash royale 3d online download<br />
31
- clash royale 3d cards download<br />
32
- clash royale 3d sound effects download<br />
33
- clash royale 3d logo download<br />
34
- clash royale 3d fan made download<br />
35
- clash royale 3d update download<br />
36
- clash royale 3d ios download<br />
37
- clash royale 3d tower defense download<br />
38
- clash royale 3d simulator download<br />
39
- clash royale 3d chest opening download<br />
40
- clash royale 3d private server download<br />
41
- clash royale 3d beta download<br />
42
- clash royale 3d minecraft map download<br />
43
- clash royale 3d battle pass download<br />
44
- clash royale 3d clan wars download<br />
45
- clash royale 3d tutorial download<br />
46
- clash royale 3d editor download<br />
47
- clash royale 3d gameplay video download<br />
48
- clash royale 3d theme song download<br />
49
- clash royale 3d stickers download<br />
50
- clash royale 3d memes download<br />
51
- clash royale 3d tips and tricks download<br />
52
- clash royale 3d reddit download<br />
53
- clash royale 3d discord server download<br />
54
- clash royale 3d tournament mode download<br />
55
- clash royale 3d brawlers download<br />
56
- clash royale 3d heroes download<br />
57
- clash royale 3d spells download<br />
58
- clash royale 3d buildings download<br />
59
- clash royale 3d troops download<br />
60
- clash royale 3d emotes download<br />
61
- clash royale 3d star levels download<br />
62
- clash royale 3d quests download<br />
63
- clash royale 3d shop offers download<br />
64
- clash royale 3d replays download<br />
65
- clash royale 3d stats tracker download<br />
66
- clash royale 3d deck builder download</p>
67
- <h3>The requirements and steps for downloading Clash Royale 3D</h3>
68
- <p>Before you download Clash Royale 3D, you need to make sure that your device meets the minimum requirements for running the game. These are:</p>
69
- <ul>
70
- <li>A device that supports WebGL, which is a technology that enables 3D graphics on the web. Most modern browsers and devices support WebGL, but you can check if yours does by visiting this link: [WebGL Report].</li>
71
- <li>A stable internet connection, preferably with a high speed and low latency. This is because Clash Royale 3D is an online game that requires constant communication with the server and other players.</li>
72
- <li>A sufficient amount of storage space on your device, depending on the size of the game files. The current version of Clash Royale 3D is about 300 MB, but it may vary depending on the updates and patches.</li>
73
- </ul>
74
- <p>Once you have verified that your device meets the requirements, you can proceed to download Clash Royale 3D by following these steps:</p>
75
- <ol>
76
- <li>Visit the official website of Clash Royale 3D at [clashroyale3d.com].</li>
77
- <li>Click on the "Download" button and choose the version that matches your device (Android or iOS).</li>
78
- <li>Wait for the download to finish and then open the downloaded file.</li>
79
- <li>Follow the instructions on the screen to install Clash Royale 3D on your device.</li>
80
- <li>Launch Clash Royale 3D and enjoy!</li>
81
- </ol>
82
- <h3>The tips and tricks for playing Clash Royale 3D smoothly</h3>
83
- <p>Clash Royale 3D is a fun and exciting game, but it can also be challenging and frustrating at times. To help you play Clash Royale 3D smoothly and avoid any problems or issues, here are some tips and tricks that you should keep in mind:</p>
84
- <ul>
85
- <li>Make sure that your device is fully charged or plugged in before playing Clash Royale 3D. The game can drain your battery quickly because of its high graphics and performance demands.</li>
86
- <li>Close any other apps or programs that are running in the background while playing Clash Royale 3D. This can free up some memory and CPU resources for the game and prevent any lag or crashes.</li>
87
- <li>Adjust your game settings according to your device's capabilities and preferences. You can lower the quality, resolution, sound, and performance of the game if you experience any slowness or stuttering. You can also enable or disable some features, such as shadows, reflections, particles, and animations, to improve the game's performance.</li>
88
- <li>Use a reliable and secure network connection when playing Clash Royale 3D online. Avoid using public or unsecured Wi-Fi networks that may expose your data or interfere with your gameplay. You can also use a VPN service to protect your privacy and bypass any geo-restrictions or firewalls.</li>
89
- <li>Update your game regularly to get the latest features, fixes, and improvements. You can check for updates by visiting the official website of Clash Royale 3D or by opening the game's settings menu.</li>
90
- </ul> <h2>How to enjoy Clash Royale 3D to the fullest?</h2>
91
- <p>Clash Royale 3D is not only a game, but also a way to express your creativity and passion for Clash Royale. There are many ways to enjoy Clash Royale 3D to the fullest, such as:</p>
92
- <h3>The best 3D models and arenas to explore in Clash Royale 3D</h3>
93
- <p>One of the main attractions of Clash Royale 3D is the variety and quality of the 3D models and arenas that you can explore. You can see your favorite characters, cards, towers, and effects in a new light, with more details, colors, and animations. You can also discover new and unique models and arenas that are not available in the original game, such as custom-made ones by other fans or artists. Here are some of the best 3D models and arenas that you can explore in Clash Royale 3D:</p>
94
- <table>
95
- <tr>
96
- <th>Model/Arena</th>
97
- <th>Description</th>
98
- </tr>
99
- <tr>
100
- <td>King Tower</td>
101
- <td>The King Tower is the main tower that you have to protect in Clash Royale. In Clash Royale 3D, you can see the King Tower in 3D, with its crown, cannons, flags, and windows. You can also see the King himself, sitting on his throne and cheering or taunting you.</td>
102
- </tr>
103
- <tr>
104
- <td>Princess</td>
105
- <td>The Princess is one of the most popular and iconic cards in Clash Royale. She is a legendary card that can shoot arrows from a long distance. In Clash Royale 3D, you can see the Princess in 3D, with her dress, hair, bow, and quiver. You can also see her facial expressions and hear her voice.</td>
106
- </tr>
107
- <tr>
108
- <td>Fireball</td>
109
- <td>The Fireball is one of the most powerful and versatile spells in Clash Royale. It is a rare card that can deal high damage to a large area. In Clash Royale 3D, you can see the Fireball in 3D, with its flames, sparks, and smoke. You can also feel its impact and hear its sound.</td>
110
- </tr>
111
- <tr>
112
- <td>Hog Mountain</td>
113
- <td>Hog Mountain is one of the most fun and colorful arenas in Clash Royale. It is the arena for players who have reached 3000 trophies. In Clash Royale 3D, you can see Hog Mountain in 3D, with its hills, bridges, balloons, hogs, and fireworks. You can also interact with some of the elements of the arena, such as popping balloons or riding hogs.</td>
114
- </tr>
115
- <tr>
116
- <td>Clashmas Village</td>
117
- <td>Clashmas Village is one of the most festive and special arenas in Clash Royale. It is a seasonal arena that appears during the Christmas period. In Clash Royale 3D, you can see Clashmas Village in 3D, with its snow, trees, lights, presents, and snowmen. You can also enjoy the Christmas music and atmosphere.</td>
118
- </tr>
119
- </table>
120
- <h3>The challenges and rewards of playing Clash Royale 3D</h3>
121
- <p>Clash Royale 3D is not only a game to admire, but also a game to challenge yourself and improve your skills. There are many challenges and rewards that you can face and earn while playing Clash Royale 3D, such as:</p>
122
- <ul>
123
- <li>You can compete with other players online or offline in different modes and formats. You can play classic matches or tournaments with standard rules or custom rules. You can also play special events or modes that have different objectives or conditions.</li>
124
- <li>You can collect cards and build decks that suit your style and strategy. You can unlock new cards by opening chests or buying them from the shop. You can also upgrade your cards by using gold or gems. You can create different decks for different situations or preferences.</li>
125
- <li>You can join clans and chat with other players who share your passion for Clash Royale. You can exchange cards, tips, and ideas with your clanmates. You can also participate in clan wars or clan games to earn rewards and glory for your clan.</li>
126
- <li>You can earn trophies and climb the ladder of rankings. You can gain trophies by winning matches or lose trophies by losing matches. You can also reach new arenas or leagues that have different rewards and challenges.</li>
127
- <li>You can achieve goals and milestones that show your progress and achievements. You can complete quests or missions that have specific tasks or requirements. You can also earn badges or stars that indicate your level or skill.</li>
128
- </ul>
129
- <h2>Conclusion</h2>
130
- <p>Clash Royale 3D is a fan-made project that allows you to play Clash Royale in 3D. It is not an official game by Supercell, but rather a tribute by by visiting this link: [WebGL Report]. However, some devices and platforms may have better performance and compatibility than others, depending on their specifications and features. For example, Android devices may run Clash Royale 3D better than iOS devices, and Chrome browsers may run Clash Royale 3D better than Safari browsers.</p>
131
- <h4>Q4: Is Clash Royale 3D updated regularly?</h4>
132
- <p>A4: Clash Royale 3D is updated regularly. The developers of Clash Royale 3D are constantly working on improving the game and adding new features, fixes, and improvements. You can check for updates by visiting the official website of Clash Royale 3D or by opening the game's settings menu. You can also follow the developers on their social media accounts or join their Discord server to get the latest news and updates about Clash Royale 3D.</p>
133
- <h4>Q5: How can I support the developers of Clash Royale 3D?</h4>
134
- <p>A5: You can support the developers of Clash Royale 3D by doing the following things:</p>
135
- <ul>
136
- <li>Share your feedback and suggestions with them. You can contact them via email, social media, or Discord. You can also rate and review the game on the website or app store.</li>
137
- <li>Spread the word and invite your friends to play Clash Royale 3D. You can share the game's link or screenshots on your social media accounts or chat apps. You can also challenge your friends to play with you online or offline.</li>
138
- <li>Donate or contribute to the game's development. You can donate money or resources to the developers via PayPal or Patreon. You can also contribute your skills or talents to the game, such as creating 3D models, graphics, sounds, or codes.</li>
139
- </ul></p> 197e85843d<br />
140
- <br />
141
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CricHD App APK Live Cricket Streaming at Your Fingertips.md DELETED
@@ -1,128 +0,0 @@
1
- <br />
2
- <h1>Live Cricket Streaming App Download APK: How to Watch Cricket Matches on Your Phone</h1>
3
- <p>If you are a cricket fan, you probably don't want to miss any of the exciting matches happening around the world. Whether it's the ICC World Cup, the Ashes, or the IPL, you want to watch every ball and every run live. But what if you don't have access to a TV or a cable subscription? Or what if you are on the go and can't sit in front of a screen for hours? That's where live cricket streaming app download apk comes in handy.</p>
4
- <h2>live cricket streaming app download apk</h2><br /><p><b><b>Download Zip</b> &#9889; <a href="https://urlin.us/2uSYoX">https://urlin.us/2uSYoX</a></b></p><br /><br />
5
- <p>Live cricket streaming app download apk is a way of downloading and installing an application that allows you to watch live cricket matches on your phone. You don't need to pay any fees or sign up for any subscriptions. You just need to have a stable internet connection and enough storage space on your phone. With live cricket streaming app download apk, you can enjoy watching cricket anytime and anywhere.</p>
6
- <p>But why do you need live cricket streaming app download apk? What are the benefits of using it? Here are some of the reasons why you should try it out:</p>
7
- <ul>
8
- <li>You can watch live cricket matches from different leagues and tournaments, such as the ICC World Cup, the Ashes, the IPL, the BBL, and more.</li>
9
- <li>You can watch live cricket matches from different countries and regions, such as India, Australia, England, Pakistan, South Africa, New Zealand, and more.</li>
10
- <li>You can watch live cricket matches in different languages and commentary options, such as English, Hindi, Urdu, Tamil, Telugu, Bengali, and more.</li>
11
- <li>You can watch live cricket matches in high-definition quality and smooth streaming without any buffering or lagging.</li>
12
- <li>You can watch live cricket matches on your phone screen or cast them to your TV or laptop for a bigger view.</li>
13
- <li>You can watch live cricket matches with interactive features, such as live scores, stats, highlights, replays, polls, chat, and more.</li>
14
- </ul>
15
- <p>So how do you download and use live cricket streaming app download apk? Here are the steps you need to follow:</p>
16
- <h2>How to Download Live Cricket Streaming App APK</h2>
17
- <h3>Step 1: Find a reliable source for the APK file</h3>
18
- <p>An APK file is an Android package file that contains all the necessary files and data for an application to run on your phone. You can find many sources for live cricket streaming app APK files online, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, or spyware that can harm your phone or steal your personal information. Therefore, you need to be careful and choose a reliable source for the APK file.</p>
19
- <p>One way to find a reliable source is to check the reviews and ratings of other users who have downloaded and used the app. You can also look for official websites or social media pages of the app developers or publishers. You can also ask for recommendations from your friends or fellow cricket fans who have used live cricket streaming apps before.</p>
20
- <p>Once you have found a reliable source for the APK file, you need to download it to your phone. You can do this by clicking on the download link or scanning the QR code provided by the source. The download process may take a few minutes depending on the size of the file and the speed of your internet connection.</p>
21
- <p>live cricket match watch online free app apk<br />
22
- download crichd live cricket streaming app for android<br />
23
- watch live cricket tv hd apk app free download<br />
24
- rts tv apk download latest version - live sports and movies app<br />
25
- icc.tv app download - official home of icc cricket video<br />
26
- live cricket streaming app for pc - bluestacks emulator download<br />
27
- watch live cricket online free hd quality app apk<br />
28
- best live cricket streaming app for android 2023 download<br />
29
- live cricket score and news app apk download<br />
30
- free live cricket tv channel app download apk</p>
31
- <h3>Step 2: Enable unknown sources on your phone settings</h3>
32
- <p>Before you can install the APK file on your phone, you need to enable unknown sources on your phone settings. This is because most live cricket streaming apps are not available on the official Google Play Store or Apple App Store, and your phone may block the installation of apps from unknown sources by default.</p>
33
- <p>To enable unknown sources on your phone settings, you need to follow these steps:</p>
34
- <ul>
35
- <li>Go to your phone settings and look for security or privacy options.</li>
36
- <li>Find the option that says unknown sources or allow installation of apps from unknown sources and toggle it on.</li>
37
- <li>You may see a warning message that says installing apps from unknown sources may harm your device or data. Tap on OK or Continue to proceed.</li>
38
- </ul>
39
- <p>Once you have enabled unknown sources on your phone settings, you are ready to install the APK file on your phone.</p>
40
- <h3>Step 3: Install the APK file on your phone</h3>
41
- <p>To install the APK file on your phone, you need to follow these steps:</p>
42
- <ul>
43
- <li>Locate the APK file on your phone storage. You can use a file manager app or search for it in your downloads folder.</li>
44
- <li>Tap on the APK file and you will see a prompt that asks you to confirm the installation. Tap on Install or Next to start the installation process.</li>
45
- <li>The installation process may take a few seconds or minutes depending on the size of the app and the performance of your phone.</li>
46
- <li>Once the installation is complete, you will see a message that says App installed or Done. Tap on Open or Launch to open the app or tap on Done to exit the installer.</li>
47
- </ul>
48
- <p>Congratulations! You have successfully downloaded and installed live cricket streaming app APK on your phone. Now you can start using it to watch live cricket matches on your phone.</p>
49
- <h2>How to Use Live Cricket Streaming App APK</h2>
50
- <h3>Step 1: Open the app and sign up or log in</h3>
51
- <p>Once you have installed live cricket streaming app APK on your phone, you need to open it and sign up or log in. Some apps may require you to create an account or provide some personal information before you can use them. Others may allow you to use them without signing up or logging in. You can choose the option that suits you best.</p>
52
- <p>To sign up or log in, you need to follow these steps:</p>
53
- <ul>
54
- <li>Open the app and look for the sign up or log in option. It may be on the home screen, the menu, or the settings of the app.</li>
55
- <li>If you choose to sign up, you need to provide some basic information, such as your name, email address, password, country, etc. You may also need to verify your email address or phone number by entering a code sent to you by the app.</li>
56
- <li>If you choose to log in, you need to enter your email address and password that you used to sign up. You may also need to enter a captcha code or a verification code sent to you by the app.</li>
57
- <li>Once you have signed up or logged in, you will see your profile or dashboard where you can access different features and options of the app.</li>
58
- </ul>
59
- <h3>Step 2: Choose a live match or a replay from the list</h3>
60
- <p>Now that you have signed up or logged in, you can choose a live match or a replay from the list of available matches. You can find the list of matches on the home screen, the menu, or the categories of the app. You can also use the search function or filter function to find a specific match or league that you want to watch.</p>
61
- <p>To choose a live match or a replay from the list, you need to follow these steps:</p>
62
- <ul>
63
- <li>Browse through the list of matches and look for the one that interests you. You can see some information about each match, such as the teams, the date, the time, the venue, etc.</li>
64
- <li>Tap on the match that you want to watch and you will see more details about it, such as the score, the overs, the wickets, the run rate, etc.</li>
65
- <li>If the match is live, you will see a button that says Watch Live or Live Stream. Tap on it and you will be directed to the live streaming page where you can watch the match in real time.</li>
66
- <li>If the match is not live, you will see a button that says Watch Replay or Replay Stream. Tap on it and you will be directed to the replay streaming page where you can watch the match from the beginning or from any point that you want.</li>
67
- </ul>
68
- <h3>Step 3: Enjoy watching cricket on your phone</h3>
69
- <p>Now that you have chosen a live match or a replay from the list, you can enjoy watching cricket on your phone. You can adjust the video quality, the volume, the brightness, and the orientation of your phone according to your preference. You can also pause, resume, rewind, fast forward, or skip the match as you wish.</p>
70
- <p>While watching cricket on your phone, you can also access some interactive features that enhance your viewing experience. For example, you can:</p>
71
- <ul>
72
- <li>Check the live scores, stats, highlights, replays, polls, chat, and more on the app.</li>
73
- <li>Share your thoughts and opinions with other cricket fans on social media platforms, such as Facebook, Twitter, Instagram, etc.</li>
74
- <li>Cast your phone screen to your TV or laptop for a bigger view using Chromecast, Airplay, Miracast, or other devices.</li>
75
- <li>Use headphones or earphones for a better sound quality and avoid disturbing others.</li>
76
- </ul>
77
- <p>With live cricket streaming app download apk, you can watch cricket matches on your phone anytime and anywhere. You don't need to worry about missing any action or excitement. You can stay updated and entertained with live cricket streaming app download apk.</p>
78
- <h2>Comparison of Different Live Cricket Streaming Apps APK</h2>
79
- <p>There are many live cricket streaming apps APK available online, but not all of them are equally good. Some of them may have better features, quality, and performance than others. Some of them may also have more matches, leagues, and options than others. Therefore, it is important to compare different live cricket streaming apps APK before choosing one.</p>
80
- <p>Here are some of the popular and reliable live cricket streaming apps APK that you can try out:</p>
81
- <h4>ICC.tv</h4>
82
- <p>ICC.tv is the official app of the International Cricket Council (ICC), the governing body of world cricket. It offers live streaming of all ICC events and tournaments, such as the ICC World Cup, the ICC Champions Trophy, the ICC World Test Championship, the ICC Women's World Cup, and more. It also offers live streaming of other domestic and international matches from various countries and regions.</p>
83
- <p>Some of the features of ICC.tv are:</p>
84
- <ul>
85
- <li>It has high-definition quality and smooth streaming without any buffering or lagging.</li>
86
- <li>It has multiple languages and commentary options for different matches.</li>
87
- <li>It has interactive features such as live scores, stats, highlights, replays, polls, chat, and more on the app.</li>
88
- <li>It is free to download and use, but it may require registration or verification for some matches.</li>
89
- </ul>
90
- <p>You can download ICC.tv APK from its official website or from other sources online.</p>
91
- <h4>CricHD</h4>
92
- <p>CricHD is one of the most popular and widely used live cricket streaming apps APK. It offers live streaming of all kinds of cricket matches, such as Test, ODI, T20, IPL, BBL, PSL, CPL, and more. It also offers live streaming of other sports, such as football, basketball, tennis, hockey, rugby, and more.</p>
93
- <p>Some of the features of CricHD are:</p>
94
- <ul>
95
- <li>It has high-definition quality and smooth streaming without any buffering or lagging.</li>
96
- <li>It has multiple languages and commentary options for different matches.</li>
97
- <li>It has interactive features such as live scores, stats, highlights, replays, polls, chat, and more on the app.</li>
98
- <li>It is free to download and use, but it may show some ads or pop-ups during the streaming.</li>
99
- </ul>
100
- <p>You can download CricHD APK from its official website or from other sources online.</p>
101
- <h4>Other options</h4>
102
- <p>There are many other live cricket streaming apps APK that you can try out, such as:</p>
103
- <ul>
104
- <li>Hotstar: It is a popular streaming platform that offers live cricket matches from India and other countries. It also offers other entertainment content, such as movies, TV shows, news, etc. It is free to download and use, but it may require a subscription for some content.</li>
105
- <li>SonyLIV: It is another popular streaming platform that offers live cricket matches from India and other countries. It also offers other sports and entertainment content, such as football, WWE, comedy, etc. It is free to download and use, but it may require a subscription for some content.</li>
106
- <li>ThopTV: It is a third-party streaming app that offers live cricket matches from various sources and channels. It also offers other content, such as movies, TV shows, music, etc. It is free to download and use, but it may not be safe or legal to use.</li>
107
- </ul>
108
- <p>You can compare these and other live cricket streaming apps APK based on your preferences and needs. You can also check the reviews and ratings of other users who have used them before. You can also try out different apps and see which one works best for you.</p>
109
- <h2>Conclusion</h2>
110
- <p>Live cricket streaming app download apk is a great way to watch live cricket matches on your phone. You don't need to have a TV or a cable subscription to enjoy watching cricket. You just need to have a stable internet connection and enough storage space on your phone. You can watch live cricket matches from different leagues, tournaments, countries, regions, languages, and commentary options. You can watch live cricket matches in high-definition quality and smooth streaming without any buffering or lagging. You can watch live cricket matches with interactive features, such as live scores, stats, highlights, replays, polls, chat, and more.</p>
111
- <p>To watch live cricket matches on your phone, you need to download and install live cricket streaming app APK on your phone. You need to find a reliable source for the APK file, enable unknown sources on your phone settings, and install the APK file on your phone. Then you need to open the app and sign up or log in, choose a live match or a replay from the list, and enjoy watching cricket on your phone.</p>
112
- <p>You can also compare different live cricket streaming apps APK based on their features, quality, performance, and options. You can try out some of the popular and reliable apps, such as ICC.tv, CricHD, Hotstar, SonyLIV, ThopTV, and more. You can also check the reviews and ratings of other users who have used them before. You can also try out different apps and see which one works best for you.</p>
113
- <p>Live cricket streaming app download apk is a must-have for any cricket fan who wants to watch live cricket matches on their phone. It is easy to use, convenient, and fun. It is the best way to stay updated and entertained with live cricket.</p>
114
- <p>So what are you waiting for? Download live cricket streaming app APK now and start watching live cricket matches on your phone!</p>
115
- <h2>FAQs</h2>
116
- <p>Here are some of the frequently asked questions about live cricket streaming app download apk:</p>
117
- <h3>Q1: Is live cricket streaming app download apk safe and legal?</h3>
118
- <p>A1: Live cricket streaming app download apk is safe and legal as long as you download it from a reliable source and use it for personal and non-commercial purposes. However, some apps may not have the rights or permissions to stream some matches or content, and they may violate the intellectual property or privacy rights of the owners or providers. Therefore, you should be careful and responsible when using live cricket streaming app download apk.</p>
119
- <h3>Q2: How much data does live cricket streaming app download apk consume?</h3>
120
- <p>A2: Live cricket streaming app download apk consumes data depending on the video quality, the duration, and the frequency of your streaming. Generally, the higher the video quality, the more data it consumes. For example, streaming a match in HD quality may consume about 1 GB of data per hour, while streaming it in SD quality may consume about 300 MB of data per hour. Therefore, you should monitor your data usage and choose a suitable video quality according to your data plan.</p>
121
- <h3>Q3: What are the best live cricket streaming apps for Android and iOS?</h3>
122
- <p>A3: There are many live cricket streaming apps for Android and iOS devices, but some of them may be better than others in terms of features, quality, performance, and options. Some of the best live cricket streaming apps for Android and iOS devices are: - ICC.tv: It is the official app of the International Cricket Council (ICC), the governing body of world cricket. It offers live streaming of all ICC events and tournaments, as well as other domestic and international matches from various countries and regions. It has high-definition quality and smooth streaming without any buffering or lagging. It has multiple languages and commentary options for different matches. It has interactive features such as live scores, stats, highlights, replays, polls, chat, and more on the app. It is free to download and use, but it may require registration or verification for some matches. - CricHD: It is one of the most popular and widely used live cricket streaming apps. It offers live streaming of all kinds of cricket matches, such as Test, ODI, T20, IPL, BBL, PSL, CPL, and more. It also offers live streaming of other sports, such as football, basketball, tennis, hockey, rugby, and more. It has high-definition quality and smooth streaming without any buffering or lagging. It has multiple languages and commentary options for different matches. It has interactive features such as live scores, stats, highlights, replays, polls, chat, and more on the app. It is free to download and use, but it may show some ads or pop-ups during the streaming. - Hotstar: It is a popular streaming platform that offers live cricket matches from India and other countries. It also offers other entertainment content, such as movies, TV shows, news, etc. It has high-definition quality and smooth streaming without any buffering or lagging. It has multiple languages and commentary options for different matches. It has interactive features such as live scores, stats, highlights, replays, polls, chat, and more on the app. It is free to download and use, but it may require a subscription for some content. - SonyLIV: It is another popular streaming platform that offers live cricket matches from India and other countries. It also offers other sports and entertainment content, such as football, WWE, comedy, etc. It has high-definition quality and smooth streaming without any buffering or lagging. It has multiple languages and commentary options for different matches. It has interactive features such as live scores, stats, highlights, replays, polls, chat, and more on the app. It is free to download and use, but it may require a subscription for some content. These are some of the best live cricket streaming apps for Android and iOS devices that you can try out. You can also check out other apps that may suit your preferences and needs.</p>
123
- <h3>Q4: How can I watch live cricket streaming on my TV or laptop?</h3>
124
- <p>A4: If you want to watch live cricket streaming on your TV or laptop instead of your phone, you have a few options to do so. For example, you can: - Use a device that supports casting or mirroring, such as Chromecast, Airplay, Miracast, or others. You can connect your phone to your TV or laptop using these devices and stream the live cricket match from your phone to your TV or laptop screen. You may need to download and install an app or a software that supports casting or mirroring on your phone and your TV or laptop. - Use a device that supports HDMI, such as a cable, an adapter, or a dongle. You can connect your phone to your TV or laptop using these devices and stream the live cricket match from your phone to your TV or laptop screen. You may need to adjust the settings on your phone and your TV or laptop to enable HDMI output and input. - Use a device that supports USB, such as a cable, an adapter, or a dongle. You can connect your phone to your TV or laptop using these devices and stream the live cricket match from your phone to your TV or laptop screen. You may need to enable USB debugging on your phone and install a driver or a software on your TV or laptop to recognize your phone. These are some of the ways you can watch live cricket streaming on your TV or laptop using live cricket streaming app download apk. You can also check out other methods that may work for you.</p>
125
- <h3>Q5: How can I improve the quality and speed of live cricket streaming?</h3>
126
- <p>A5: If you want to improve the quality and speed of live cricket streaming, you need to consider some factors that may affect them, such as: - Your internet connection: You need to have a stable and fast internet connection to stream live cricket matches without any buffering or lagging. You can check your internet speed and signal strength using an app or a website. You can also use a Wi-Fi network instead of a mobile data network if possible. You can also close any other apps or programs that may consume bandwidth on your phone or your router. - Your phone storage: You need to have enough storage space on your phone to download and install live cricket streaming app APK and store the data and cache of the app. You can check your phone storage using an app or a setting. You can also delete any unnecessary files or apps that may take up space on your phone. - Your phone performance: You need to have a good phone performance to run live cricket streaming app APK smoothly and efficiently. You can check your phone performance using an app or a setting. You can also clear any background processes or tasks that may slow down your phone. You can also update your phone software and firmware if available. These are some of the factors that may affect the quality and speed of live cricket streaming. You can try to optimize them as much as possible to improve your streaming experience.</p> 197e85843d<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Free Download Video Game Player The Ultimate Guide to Playing Any Game on Any Device.md DELETED
@@ -1,107 +0,0 @@
1
-
2
- <h1>How to Find and Download the Best Free Video Game Players</h1>
3
- <p>If you love playing video games, you probably know how important it is to have a good video game player. A video game player is a software that allows you to run and enjoy various types of video games on your device. But how do you find and download the best free video game players? In this article, we will answer this question and give you some tips and recommendations.</p>
4
- <h2>free download video game player</h2><br /><p><b><b>Download File</b> &#10037;&#10037;&#10037; <a href="https://jinyurl.com/2uNTMm">https://jinyurl.com/2uNTMm</a></b></p><br /><br />
5
- <h2>What is a Video Game Player?</h2>
6
- <h3>Definition and Features</h3>
7
- <p>A video game player is a program that can play video games that are stored on your device or streamed from the internet. A video game player can support different formats and genres of games, such as action, adventure, puzzle, simulation, sports, etc. A video game player can also have various features, such as graphics settings, sound options, controller support, online multiplayer, achievements, etc.</p>
8
- <h3>Benefits of Using a Video Game Player</h3>
9
- <p>Using a video game player has many benefits, such as:</p>
10
- <ul>
11
- <li>You can play games without buying or installing them separately.</li>
12
- <li>You can access a large library of games from different sources and platforms.</li>
13
- <li>You can enjoy high-quality graphics and sound effects.</li>
14
- <li>You can customize your gaming experience according to your preferences.</li>
15
- <li>You can save space and money on your device.</li>
16
- </ul>
17
- <h2>How to Choose the Right Video Game Player for Your Needs</h2>
18
- <h3>Compatibility and Performance</h3>
19
- <p>The first thing you need to consider when choosing a video game player is its compatibility and performance. You need to make sure that the video game player can run smoothly on your device and operating system. You also need to check the system requirements and specifications of the games you want to play. You don't want to download a video game player that will crash or lag frequently.</p>
20
- <p>free download VLC media player for PC<br />
21
- free download EA games on console and mobile<br />
22
- free download top games from Microsoft Store<br />
23
- free download Steam game player for Windows<br />
24
- free download Epic Games launcher for PC and Mac<br />
25
- free download GOG Galaxy game player and store<br />
26
- free download Origin game player and library<br />
27
- free download Uplay game player and platform<br />
28
- free download Battle.net game player and service<br />
29
- free download Xbox Game Pass for PC and console<br />
30
- free download PlayStation Now for PC and PS4<br />
31
- free download Nintendo Switch Online for games and cloud<br />
32
- free download Stadia game player and streaming<br />
33
- free download GeForce Now game player and cloud gaming<br />
34
- free download Roblox game player and creator<br />
35
- free download Minecraft game player and server<br />
36
- free download Fortnite game player and battle royale<br />
37
- free download PUBG Mobile game player and emulator<br />
38
- free download Call of Duty Mobile game player and shooter<br />
39
- free download Genshin Impact game player and RPG<br />
40
- free download Among Us game player and social deduction<br />
41
- free download Fall Guys game player and party game<br />
42
- free download Valorant game player and tactical shooter<br />
43
- free download League of Legends game player and MOBA<br />
44
- free download Dota 2 game player and strategy<br />
45
- free download Counter-Strike: Global Offensive game player and FPS<br />
46
- free download Apex Legends game player and hero shooter<br />
47
- free download Overwatch game player and team-based shooter<br />
48
- free download Hearthstone game player and card game<br />
49
- free download World of Warcraft game player and MMORPG<br />
50
- free download Star Wars: The Old Republic game player and online RPG<br />
51
- free download Star Wars: Galaxy of Heroes game player and mobile RPG<br />
52
- free download FIFA Mobile game player and soccer simulator<br />
53
- free download NBA 2K Mobile game player and basketball simulator<br />
54
- free download Madden NFL Mobile game player and football simulator<br />
55
- free download MLB Tap Sports Baseball 2021 game player and baseball simulator<br />
56
- free download Golf Clash game player and golf simulator<br />
57
- free download Asphalt 9: Legends game player and racing simulator<br />
58
- free download Real Racing 3 game player and car simulator<br />
59
- free download Need for Speed No Limits game player and street racing simulator<br />
60
- free download Candy Crush Saga game player and puzzle solver<br />
61
- free download Angry Birds 2 game player and physics-based puzzle solver <br />
62
- free download Plants vs. Zombies 2 game player and tower defense puzzle solver <br />
63
- free download Cut the Rope 2 game player and logic-based puzzle solver <br />
64
- free download Monument Valley 2 game player and optical illusion puzzle solver <br />
65
- free download Limbo game player and dark-themed puzzle solver <br />
66
- free download The Room Two game player and escape room puzzle solver <br />
67
- free download Sudoku.com - Free Sudoku Puzzles game player <br />
68
- free download Words With Friends 2 - Free Word Games & Puzzles</p>
69
- <h3>Variety and Quality of Games</h3>
70
- <p>The second thing you need to consider is the variety and quality of games that the video game player offers. You want to choose a video game player that has a wide range of games from different genres and categories. You also want to choose a video game player that has high-quality games that are fun, engaging, and original. You don't want to download a video game player that has boring or outdated games.</p>
71
- <h3>User Interface and Customization</h3>
72
- <p>The third thing you need to consider is the user interface and customization of the video game player. You want to choose a video game player that has a simple and intuitive user interface that is easy to navigate and use. You also want to choose a video game player that has various options and settings that allow you to customize your gaming experience. You don't want to download a video game player that has a complicated or cluttered user interface.</p>
73
- <h2>Where to Download Free Video Game Players Safely and Legally</h2>
74
- <h3>Official Websites of Developers and Publishers</h3>
75
- <p>One of the best places to download free video game players safely and legally is the official websites of the developers and publishers of the games. These websites usually offer free downloads or trials of their video game players, as well as updates, patches, support, and information. For example, you can download VLC media player from its official website. This is a free and open source cross-platform multimedia player that can play most multimedia files as well as DVDs, Audio CDs, VCDs, and various streaming protocols.</p>
76
- <h3>Trusted <h3>Trusted Platforms and Stores</h3>
77
- <p>Another good place to download free video game players safely and legally is the trusted platforms and stores that offer games and software. These platforms and stores usually have a large collection of free and paid video game players, as well as reviews, ratings, recommendations, and security checks. For example, you can download EA's free-to-play games from its platform Origin. This is a digital distribution platform that allows you to play games from EA and other publishers, as well as access online features, social networking, and cloud saves.</p>
78
- <h3>Tips and Precautions for Downloading</h3>
79
- <p>Before you download any video game player, you should follow some tips and precautions to ensure a safe and legal download. Here are some of them:</p>
80
- <ul>
81
- <li>Always download from official or trusted sources. Avoid downloading from unknown or suspicious websites that may contain malware or viruses.</li>
82
- <li>Always read the terms and conditions, privacy policy, and license agreement of the video game player. Make sure you understand what you are agreeing to and what rights you have.</li>
83
- <li>Always scan the downloaded file with an antivirus or anti-malware program. Make sure the file is clean and does not contain any harmful or unwanted components.</li>
84
- <li>Always backup your device and data before installing the video game player. In case something goes wrong or you don't like the video game player, you can restore your device and data to the previous state.</li>
85
- </ul>
86
- <h2>Some of the Best Free Video Game Players You Can Try Today</h2>
87
- <h3>VLC Media Player</h3>
88
- <p>As mentioned earlier, VLC media player is a free and open source cross-platform multimedia player that can play most multimedia files as well as DVDs, Audio CDs, VCDs, and various streaming protocols. It can also play video games that are stored on your device or streamed from the internet. Some of the video games that VLC media player can play are Doom, Quake, SuperTuxKart, Super Mario Bros., etc. You can find more information on how to play video games with VLC media player here.</p>
89
- <h3>EA's Free-to-Play Games</h3>
90
- <p>EA is one of the biggest video game publishers in the world, and it offers some of its games for free on its platform Origin. Some of the free-to-play games that EA offers are Apex Legends, FIFA Online 4, Star Wars: The Old Republic, Need for Speed World, etc. These games are high-quality and have online multiplayer features. You can find more information on how to download and play EA's free-to-play games here.</p>
91
- <h3>Microsoft's Top Free Games</h3>
92
- <p>Microsoft is another giant in the video game industry, and it also offers some of its games for free on its platform Microsoft Store. Some of the top free games that Microsoft offers are Forza Street, Asphalt 9: Legends, Roblox, Minecraft: Windows 10 Edition, etc. These games are also high-quality and have online multiplayer features. You can find more information on how to download and play Microsoft's top free games here.</p>
93
- <h2>Conclusion</h2>
94
- <p>In conclusion, finding and downloading the best free video game players is not difficult if you know where to look and what to consider. A video game player is a software that allows you to play various types of video games on your device. You should choose a video game player that is compatible with your device and operating system, has a wide variety of high-quality games, and has a simple and customizable user interface. You should also download from official or trusted sources, read the terms and conditions, scan the file with an antivirus program, and backup your device before installing. Some of the best free video game players you can try today are VLC media player, EA's free-to-play games, and Microsoft's top free games.</p>
95
- <h2>FAQs</h2>
96
- <h4>What is the difference between a video game player and a video game emulator?</h4>
97
- <p>A video game player is a software that can play video games that are designed for your device or platform. A video game emulator is a software that can simulate another device or platform and allow you to play video games that are not designed for your device or platform.</p>
98
- <h4>Can I play online multiplayer games with a video game player?</h4>
99
- <p>Yes, you can play online multiplayer games with a video game player if the video game player supports online features and has an internet connection. However, you may need to create an account or register with the developer or publisher of the game to access online multiplayer modes.</p>
100
- <h4>How can I update my video game player?</h4>
101
- <p>You can update your video game player by checking for updates on the official website of the video game player or the platform or store where you downloaded it from. You can also enable automatic updates if the video game player has this option.</p>
102
- <h4>How can I uninstall my video game player?</h4>
103
- <p>You can uninstall your video game player by following the instructions on the official website of the video game player or the platform or store where you downloaded it from. You can also use the uninstaller program that comes with the video game player or use the control panel or settings of your device.</p>
104
- <h4>What are some alternatives to video game players?</h4>
105
- <p>Some alternatives to video game players are video game consoles, handheld devices, streaming services, and web browsers. Video game consoles are dedicated devices that can play video games on a TV or monitor. Handheld devices are portable devices that can play video games on a small screen. Streaming services are online platforms that can stream video games to your device without downloading them. Web browsers are programs that can access and play web-based games on your device.</p> 197e85843d<br />
106
- <br />
107
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/schedulers/preconfig/preconfig_scheduling_lms_discrete.py DELETED
@@ -1,299 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 Katherine Crowson and The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- import warnings
16
- from dataclasses import dataclass
17
- from typing import List, Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import paddle
21
- from scipy import integrate
22
-
23
- from ...configuration_utils import ConfigMixin, register_to_config
24
- from ...utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, BaseOutput
25
- from ..scheduling_utils import SchedulerMixin
26
-
27
-
28
- @dataclass
29
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete
30
- class PreconfigLMSDiscreteSchedulerOutput(BaseOutput):
31
- """
32
- Output class for the scheduler's step function output.
33
-
34
- Args:
35
- prev_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
36
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
37
- denoising loop.
38
- pred_original_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
39
- The predicted denoised sample (x_{0}) based on the model output from the current timestep.
40
- `pred_original_sample` can be used to preview progress or for guidance.
41
- """
42
-
43
- prev_sample: paddle.Tensor
44
- pred_original_sample: Optional[paddle.Tensor] = None
45
-
46
-
47
- class PreconfigLMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
48
- """
49
- Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
50
- Katherine Crowson:
51
- https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
52
-
53
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
54
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
55
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
56
- [`~SchedulerMixin.from_pretrained`] functions.
57
-
58
- Args:
59
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
60
- beta_start (`float`): the starting `beta` value of inference.
61
- beta_end (`float`): the final `beta` value.
62
- beta_schedule (`str`):
63
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
64
- `linear` or `scaled_linear`.
65
- trained_betas (`np.ndarray`, optional):
66
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
67
- prediction_type (`str`, default `epsilon`, optional):
68
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
69
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
70
- https://imagen.research.google/video/paper.pdf)
71
- """
72
-
73
- _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
74
- order = 1
75
-
76
- @register_to_config
77
- def __init__(
78
- self,
79
- num_train_timesteps: int = 1000,
80
- beta_start: float = 0.0001,
81
- beta_end: float = 0.02,
82
- beta_schedule: str = "linear",
83
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
84
- prediction_type: str = "epsilon",
85
- preconfig=True,
86
- ):
87
- if trained_betas is not None:
88
- self.betas = paddle.to_tensor(trained_betas, dtype="float32")
89
- elif beta_schedule == "linear":
90
- self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype="float32")
91
- elif beta_schedule == "scaled_linear":
92
- # this schedule is very specific to the latent diffusion model.
93
- self.betas = paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype="float32") ** 2
94
- else:
95
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
96
-
97
- self.alphas = 1.0 - self.betas
98
- self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
99
-
100
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
101
- sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32)
102
- self.sigmas = paddle.to_tensor(sigmas)
103
-
104
- # standard deviation of the initial noise distribution
105
- self.init_noise_sigma = self.sigmas.max()
106
-
107
- # setable values
108
- self.num_inference_steps = None
109
- timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy()
110
- self.timesteps = paddle.to_tensor(timesteps, dtype="float32")
111
- self.derivatives = []
112
- self.is_scale_input_called = False
113
- self.preconfig = preconfig
114
-
115
- def scale_model_input(
116
- self, sample: paddle.Tensor, timestep: Union[float, paddle.Tensor], **kwargs
117
- ) -> paddle.Tensor:
118
- """
119
- Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm.
120
-
121
- Args:
122
- sample (`paddle.Tensor`): input sample
123
- timestep (`float` or `paddle.Tensor`): the current timestep in the diffusion chain
124
-
125
- Returns:
126
- `paddle.Tensor`: scaled input sample
127
- """
128
- if kwargs.get("step_index") is not None:
129
- step_index = kwargs["step_index"]
130
- else:
131
- step_index = (self.timesteps == timestep).nonzero().item()
132
- self.is_scale_input_called = True
133
- if not self.preconfig:
134
- sigma = self.sigmas[step_index]
135
- sample = sample / ((sigma**2 + 1) ** 0.5)
136
- return sample
137
- else:
138
- return sample * self.latent_scales[step_index]
139
-
140
- def get_lms_coefficient(self, order, t, current_order):
141
- """
142
- Compute a linear multistep coefficient.
143
-
144
- Args:
145
- order (TODO):
146
- t (TODO):
147
- current_order (TODO):
148
- """
149
-
150
- def lms_derivative(tau):
151
- prod = 1.0
152
- for k in range(order):
153
- if current_order == k:
154
- continue
155
- prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k])
156
- return prod
157
-
158
- integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0]
159
-
160
- return integrated_coeff
161
-
162
- def set_timesteps(self, num_inference_steps: int, preconfig_order: int = 4):
163
- """
164
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
165
-
166
- Args:
167
- num_inference_steps (`int`):
168
- the number of diffusion steps used when generating samples with a pre-trained model.
169
- """
170
- self.num_inference_steps = num_inference_steps
171
-
172
- timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy()
173
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
174
- sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
175
- sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
176
- self.sigmas = paddle.to_tensor(sigmas)
177
- self.timesteps = paddle.to_tensor(timesteps, dtype="float32")
178
-
179
- self.derivatives = []
180
- if self.preconfig:
181
- self.order = preconfig_order
182
- self.lms_coeffs = []
183
- self.latent_scales = [1.0 / ((sigma**2 + 1) ** 0.5) for sigma in self.sigmas]
184
- for step_index in range(self.num_inference_steps):
185
- order = min(step_index + 1, preconfig_order)
186
- self.lms_coeffs.append(
187
- [self.get_lms_coefficient(order, step_index, curr_order) for curr_order in range(order)]
188
- )
189
-
190
- def step(
191
- self,
192
- model_output: paddle.Tensor,
193
- timestep: Union[float, paddle.Tensor],
194
- sample: paddle.Tensor,
195
- order: int = 4,
196
- return_dict: bool = True,
197
- **kwargs
198
- ) -> Union[PreconfigLMSDiscreteSchedulerOutput, Tuple]:
199
- """
200
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
201
- process from the learned model outputs (most often the predicted noise).
202
-
203
- Args:
204
- model_output (`paddle.Tensor`): direct output from learned diffusion model.
205
- timestep (`float`): current timestep in the diffusion chain.
206
- sample (`paddle.Tensor`):
207
- current instance of sample being created by diffusion process.
208
- order: coefficient for multi-step inference.
209
- return_dict (`bool`): option for returning tuple rather than PreconfigLMSDiscreteSchedulerOutput class
210
- Args in kwargs:
211
- step_index (`int`):
212
- return_pred_original_sample (`bool`): option for return pred_original_sample
213
-
214
- Returns:
215
- [`~schedulers.scheduling_utils.PreconfigLMSDiscreteSchedulerOutput`] or `tuple`:
216
- [`~schedulers.scheduling_utils.PreconfigLMSDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`.
217
- When returning a tuple, the first element is the sample tensor.
218
-
219
- """
220
- if not self.is_scale_input_called:
221
- warnings.warn(
222
- "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
223
- "See `StableDiffusionPipeline` for a usage example."
224
- )
225
- if kwargs.get("return_pred_original_sample") is not None:
226
- return_pred_original_sample = kwargs["return_pred_original_sample"]
227
- else:
228
- return_pred_original_sample = True
229
- if kwargs.get("step_index") is not None:
230
- step_index = kwargs["step_index"]
231
- else:
232
- step_index = (self.timesteps == timestep).nonzero().item()
233
- if self.config.prediction_type == "epsilon" and not return_pred_original_sample:
234
- # if pred_original_sample is no need
235
- self.derivatives.append(model_output)
236
- pred_original_sample = None
237
- else:
238
- sigma = self.sigmas[step_index]
239
- # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
240
- if self.config.prediction_type == "epsilon":
241
- pred_original_sample = sample - sigma * model_output
242
- elif self.config.prediction_type == "v_prediction":
243
- # * c_out + input * c_skip
244
- pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
245
- else:
246
- raise ValueError(
247
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
248
- )
249
- # 2. Convert to an ODE derivative
250
- derivative = (sample - pred_original_sample) / sigma
251
- self.derivatives.append(derivative)
252
-
253
- if len(self.derivatives) > order:
254
- self.derivatives.pop(0)
255
-
256
- if not self.preconfig:
257
- # 3. If not preconfiged, compute linear multistep coefficients.
258
- order = min(step_index + 1, order)
259
- lms_coeffs = [self.get_lms_coefficient(order, step_index, curr_order) for curr_order in range(order)]
260
- # 4. Compute previous sample based on the derivatives path
261
- prev_sample = sample + sum(
262
- coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives))
263
- )
264
- else:
265
- # 3. If preconfiged, direct compute previous sample based on the derivatives path
266
- prev_sample = sample + sum(
267
- coeff * derivative
268
- for coeff, derivative in zip(self.lms_coeffs[step_index], reversed(self.derivatives))
269
- )
270
-
271
- if not return_dict:
272
- if not return_pred_original_sample:
273
- return (prev_sample,)
274
- else:
275
- return (prev_sample, pred_original_sample)
276
-
277
- return PreconfigLMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
278
-
279
- def add_noise(
280
- self,
281
- original_samples: paddle.Tensor,
282
- noise: paddle.Tensor,
283
- timesteps: paddle.Tensor,
284
- ) -> paddle.Tensor:
285
- # Make sure sigmas and timesteps have the same dtype as original_samples
286
- sigmas = self.sigmas.cast(original_samples.dtype)
287
- schedule_timesteps = self.timesteps
288
-
289
- step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
290
-
291
- sigma = sigmas[step_indices].flatten()
292
- while len(sigma.shape) < len(original_samples.shape):
293
- sigma = sigma.unsqueeze(-1)
294
-
295
- noisy_samples = original_samples + noise * sigma
296
- return noisy_samples
297
-
298
- def __len__(self):
299
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/css/checkbox.css DELETED
@@ -1,55 +0,0 @@
1
- .checkbox input {
2
- height: 0;
3
- width: 0;
4
- display: none;
5
- }
6
-
7
- .checkbox span {
8
- font-size: 0.875rem;
9
- color: var(--colour-2);
10
- margin-left: 4px;
11
- }
12
-
13
- .checkbox label:after {
14
- content: "";
15
- position: absolute;
16
- top: 50%;
17
- transform: translateY(-50%);
18
- left: 5px;
19
- width: 20px;
20
- height: 20px;
21
- background: var(--blur-border);
22
- border-radius: 90px;
23
- transition: 0.33s;
24
- }
25
-
26
- .checkbox input + label:after,
27
- .checkbox input:checked + label {
28
- background: var(--colour-3);
29
- }
30
-
31
- .checkbox input + label,
32
- .checkbox input:checked + label:after {
33
- background: var(--blur-border);
34
- }
35
-
36
- .checkbox input:checked + label:after {
37
- left: calc(100% - 5px - 20px);
38
- }
39
-
40
- @media screen and (max-width: 990px) {
41
- .checkbox label {
42
- width: 25px;
43
- height: 15px;
44
- }
45
-
46
- .checkbox label:after {
47
- left: 2px;
48
- width: 10px;
49
- height: 10px;
50
- }
51
-
52
- .checkbox input:checked + label:after {
53
- left: calc(100% - 2px - 10px);
54
- }
55
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/admin/export/+server.ts DELETED
@@ -1,166 +0,0 @@
1
- import {
2
- PARQUET_EXPORT_DATASET,
3
- PARQUET_EXPORT_HF_TOKEN,
4
- PARQUET_EXPORT_SECRET,
5
- } from "$env/static/private";
6
- import { collections } from "$lib/server/database";
7
- import type { Message } from "$lib/types/Message";
8
- import { error } from "@sveltejs/kit";
9
- import { pathToFileURL } from "node:url";
10
- import { unlink } from "node:fs/promises";
11
- import { uploadFile } from "@huggingface/hub";
12
- import parquet from "parquetjs";
13
- import { z } from "zod";
14
-
15
- // Triger like this:
16
- // curl -X POST "http://localhost:5173/chat/admin/export" -H "Authorization: Bearer <PARQUET_EXPORT_SECRET>" -H "Content-Type: application/json" -d '{"model": "OpenAssistant/oasst-sft-6-llama-30b-xor"}'
17
-
18
- export async function POST({ request }) {
19
- if (!PARQUET_EXPORT_SECRET || !PARQUET_EXPORT_DATASET || !PARQUET_EXPORT_HF_TOKEN) {
20
- throw error(500, "Parquet export is not configured.");
21
- }
22
-
23
- if (request.headers.get("Authorization") !== `Bearer ${PARQUET_EXPORT_SECRET}`) {
24
- throw error(403);
25
- }
26
-
27
- const { model } = z
28
- .object({
29
- model: z.string(),
30
- })
31
- .parse(await request.json());
32
-
33
- const schema = new parquet.ParquetSchema({
34
- title: { type: "UTF8" },
35
- created_at: { type: "TIMESTAMP_MILLIS" },
36
- updated_at: { type: "TIMESTAMP_MILLIS" },
37
- messages: {
38
- repeated: true,
39
- fields: {
40
- from: { type: "UTF8" },
41
- content: { type: "UTF8" },
42
- score: { type: "INT_8", optional: true },
43
- },
44
- },
45
- });
46
-
47
- const fileName = `/tmp/conversations-${new Date().toJSON().slice(0, 10)}-${Date.now()}.parquet`;
48
-
49
- const writer = await parquet.ParquetWriter.openFile(schema, fileName);
50
-
51
- let count = 0;
52
- console.log("Exporting conversations for model", model);
53
-
54
- for await (const conversation of collections.settings.aggregate<{
55
- title: string;
56
- created_at: Date;
57
- updated_at: Date;
58
- messages: Message[];
59
- }>([
60
- {
61
- $match: {
62
- shareConversationsWithModelAuthors: true,
63
- sessionId: { $exists: true },
64
- userId: { $exists: false },
65
- },
66
- },
67
- {
68
- $lookup: {
69
- from: "conversations",
70
- localField: "sessionId",
71
- foreignField: "sessionId",
72
- as: "conversations",
73
- pipeline: [{ $match: { model, userId: { $exists: false } } }],
74
- },
75
- },
76
- { $unwind: "$conversations" },
77
- {
78
- $project: {
79
- title: "$conversations.title",
80
- created_at: "$conversations.createdAt",
81
- updated_at: "$conversations.updatedAt",
82
- messages: "$conversations.messages",
83
- },
84
- },
85
- ])) {
86
- await writer.appendRow({
87
- title: conversation.title,
88
- created_at: conversation.created_at,
89
- updated_at: conversation.updated_at,
90
- messages: conversation.messages.map((message: Message) => ({
91
- from: message.from,
92
- content: message.content,
93
- ...(message.score ? { score: message.score } : undefined),
94
- })),
95
- });
96
- ++count;
97
-
98
- if (count % 1_000 === 0) {
99
- console.log("Exported", count, "conversations");
100
- }
101
- }
102
-
103
- console.log("exporting convos with userId");
104
-
105
- for await (const conversation of collections.settings.aggregate<{
106
- title: string;
107
- created_at: Date;
108
- updated_at: Date;
109
- messages: Message[];
110
- }>([
111
- { $match: { shareConversationsWithModelAuthors: true, userId: { $exists: true } } },
112
- {
113
- $lookup: {
114
- from: "conversations",
115
- localField: "userId",
116
- foreignField: "userId",
117
- as: "conversations",
118
- pipeline: [{ $match: { model } }],
119
- },
120
- },
121
- { $unwind: "$conversations" },
122
- {
123
- $project: {
124
- title: "$conversations.title",
125
- created_at: "$conversations.createdAt",
126
- updated_at: "$conversations.updatedAt",
127
- messages: "$conversations.messages",
128
- },
129
- },
130
- ])) {
131
- await writer.appendRow({
132
- title: conversation.title,
133
- created_at: conversation.created_at,
134
- updated_at: conversation.updated_at,
135
- messages: conversation.messages.map((message: Message) => ({
136
- from: message.from,
137
- content: message.content,
138
- ...(message.score ? { score: message.score } : undefined),
139
- })),
140
- });
141
- ++count;
142
-
143
- if (count % 1_000 === 0) {
144
- console.log("Exported", count, "conversations");
145
- }
146
- }
147
-
148
- await writer.close();
149
-
150
- console.log("Uploading", fileName, "to Hugging Face Hub");
151
-
152
- await uploadFile({
153
- file: pathToFileURL(fileName),
154
- credentials: { accessToken: PARQUET_EXPORT_HF_TOKEN },
155
- repo: {
156
- type: "dataset",
157
- name: PARQUET_EXPORT_DATASET,
158
- },
159
- });
160
-
161
- console.log("Upload done");
162
-
163
- await unlink(fileName);
164
-
165
- return new Response();
166
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AdamGustavsson/AnimeganV2Webcam/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("spaces/akhaliq/AnimeGANv2", inputs="webcam").launch()
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/sde_team.py DELETED
@@ -1,72 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List
4
-
5
- from agentverse.message import Message
6
-
7
- from . import selector_registry as SelectorRegistry
8
- from .base import BaseSelector
9
-
10
- import json
11
- import re
12
-
13
- if TYPE_CHECKING:
14
- from agentverse.environments import BaseEnvironment
15
-
16
- def extract(content: str, keyword: str):
17
- result = ""
18
- flag = False
19
- for line in content.split('\n'):
20
- if line.strip().startswith(keyword):
21
- flag = True
22
- continue
23
- if flag:
24
- result += line
25
- result += "\n"
26
- return result
27
-
28
-
29
- @SelectorRegistry.register("sde_team")
30
- class SdeTeamSelector(BaseSelector):
31
- def select_message(self, environment: BaseEnvironment, messages: List[Message]) -> List[Message]:
32
- last_sender = environment.last_messages[0].sender
33
- selected = messages
34
-
35
- if last_sender == "unit_test_generator":
36
- unit_tests = set()
37
- for message in selected:
38
- unit_test = extract(message.content, "<unit test>:")
39
- if unit_test not in unit_tests:
40
- unit_tests.add(extract(message.content, "<unit test>:"))
41
- unit_tests = list(unit_tests)
42
- environment.rule_params["unit_tests"] = str(unit_tests)
43
- new_message = Message(
44
- content="",
45
- sender="unit_test_generator",
46
- receiver=[],
47
- ) # TODO: set the content of the message
48
- selected = [new_message]
49
-
50
- elif last_sender == "code_writer":
51
- cur_code = extract(selected[0].content, "<code>:")
52
- environment.rule_params["code"] = cur_code
53
-
54
- from .code_api import execute_unit_tests
55
- feedback = execute_unit_tests(environment.rule_params["code"], eval(environment.rule_params["unit_tests"]))
56
-
57
- environment.rule_params["feedback"] = feedback
58
- selected[0].content = f"<current code>:\n\n{cur_code}\n\n<unit test feedback>:\n{feedback}"
59
- f_dict = json.loads(feedback)
60
- if f_dict["is_passing"]:
61
- environment.rule_params["end_flag"] = True
62
-
63
- elif last_sender == "code_reviewer":
64
- code_review = selected[0].content
65
- cur_code = environment.rule_params["code"]
66
- selected[0].content = f"<current code>:\n\n{cur_code}\n\n{code_review}"
67
- feedback = environment.rule_params["feedback"]
68
- f_dict = json.loads(feedback)
69
- if f_dict["is_passing"]:
70
- environment.rule_params["end_flag"] = True
71
-
72
- return selected
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/memory_manipulator/plan.py DELETED
@@ -1,79 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from logging import getLogger
4
- from typing import List, TYPE_CHECKING
5
-
6
- from . import memory_manipulator_registry
7
- from .base import BaseMemoryManipulator
8
- from ..message import Message
9
-
10
- if TYPE_CHECKING:
11
- from agentverse.memory import VectorStoreMemory
12
- from agentverse.agents.reflection_agent import ReflectionAgent
13
-
14
- logger = getLogger(__file__)
15
-
16
- PLAN_PROMPT = """Now you are act for as an agent named ${agent_name} in a virtual world.
17
- You might need to performing reaction to the observation.
18
- Based on the following information:
19
- (1) The agent's description: ${role_description}
20
- (2) Current time is ${current_time}
21
- (3) Your history memory is ${chat_history}
22
-
23
- Now is ${current_time}. If all plans are expired, you have to plan for\
24
- the next time periods.
25
- Do you need to generate new plans?
26
- If yes, tell me the new plan, including the time period.
27
- If no, just tell me No."""
28
-
29
-
30
- @memory_manipulator_registry.register("plan")
31
- class Plan(BaseMemoryManipulator):
32
- """
33
- Memory manipulator for plan.
34
- """
35
- memory: VectorStoreMemory = None
36
- agent: ReflectionAgent = None # specify ReflectionAgent
37
- # later considering removing current_time to be more general
38
- # and then change to BaseAgent
39
- plan: List[str] = []
40
-
41
- def manipulate_memory(self) -> str:
42
- """
43
- Generate new plans
44
- """
45
- prompt = self._fill_prompt_template()
46
- result = self.agent.llm.generate_response(prompt).content
47
- result = result.strip('.')
48
- logger.info(f"{self.agent.name}'s new plan: {result}")
49
- if result == "No":
50
- return ""
51
- else:
52
- self.plan.append(result)
53
- plan_message = Message(
54
- content=result,
55
- sender=self.agent.name,
56
- receiver={self.agent.name})
57
- self.agent.memory.add_message([plan_message])
58
- return result
59
-
60
-
61
- def _fill_prompt_template(self) -> str:
62
- """Fill the placeholders in the prompt template
63
-
64
- In the conversation agent, three placeholders are supported:
65
- - ${agent_name}: the name of the agent
66
- - ${env_description}: the description of the environment
67
- - ${role_description}: the description of the role of the agent
68
- - ${chat_history}: the chat history of the agent
69
- """
70
- input_arguments = {
71
- "agent_name": self.agent.name,
72
- "role_description": self.agent.role_description,
73
- "chat_history": self.agent.memory.to_string(add_sender_prefix=True),
74
- "current_time": self.agent.current_time,
75
- }
76
- return PLAN_PROMPT.format(**input_arguments)
77
-
78
- def reset(self) -> None:
79
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AhmedM20/Email_Marketing_Content_Generator/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Email Marketing Geneartor
3
- emoji: 🏃
4
- colorFrom: yellow
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.41.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/paper_runfiles/blur_tests.sh DELETED
@@ -1,37 +0,0 @@
1
- ##!/usr/bin/env bash
2
- #
3
- ## !!! file set to make test_large_30k from the vanilla test_large: configs/test_large_30k.lst
4
- #
5
- ## paths to data are valid for mml7
6
- #PLACES_ROOT="/data/inpainting/Places365"
7
- #OUT_DIR="/data/inpainting/paper_data/Places365_val_test"
8
- #
9
- #source "$(dirname $0)/env.sh"
10
- #
11
- #for datadir in test_large_30k # val_large
12
- #do
13
- # for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512
14
- # do
15
- # "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \
16
- # "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 8
17
- #
18
- # "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
19
- # done
20
- #
21
- # for conf in segm_256 segm_512
22
- # do
23
- # "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \
24
- # "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 2
25
- #
26
- # "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
27
- # done
28
- #done
29
- #
30
- #IN_DIR="/data/inpainting/paper_data/Places365_val_test/test_large_30k/random_medium_512"
31
- #PRED_DIR="/data/inpainting/predictions/final/images/r.suvorov_2021-03-05_17-08-35_train_ablv2_work_resume_epoch37/random_medium_512"
32
- #BLUR_OUT_DIR="/data/inpainting/predictions/final/blur/images"
33
- #
34
- #for b in 0.1
35
- #
36
- #"$BINDIR/blur_predicts.py" "$BASEDIR/../../configs/eval2.yaml" "$CUR_IN_DIR" "$CUR_OUT_DIR" "$CUR_EVAL_DIR"
37
- #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexZou/Deploy_Restoration/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Deploy Restoration
3
- emoji: 👀
4
- colorFrom: indigo
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.9.1
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlignmentResearch/tuned-lens/README.md DELETED
@@ -1,9 +0,0 @@
1
- ---
2
- title: Tuned Lens
3
- emoji: 🔎
4
- colorFrom: pink
5
- colorTo: blue
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- ---
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/multistep_dpm_solver_inverse.md DELETED
@@ -1,22 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Inverse Multistep DPM-Solver (DPMSolverMultistepInverse)
14
-
15
- ## Overview
16
-
17
- This scheduler is the inverted scheduler of [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://arxiv.org/abs/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models
18
- ](https://arxiv.org/abs/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu.
19
- The implementation is mostly based on the DDIM inversion definition of [Null-text Inversion for Editing Real Images using Guided Diffusion Models](https://arxiv.org/pdf/2211.09794.pdf) and the ad-hoc notebook implementation for DiffEdit latent inversion [here](https://github.com/Xiang-cd/DiffEdit-stable-diffusion/blob/main/diffedit.ipynb).
20
-
21
- ## DPMSolverMultistepInverseScheduler
22
- [[autodoc]] DPMSolverMultistepInverseScheduler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/imagic_stable_diffusion.py DELETED
@@ -1,496 +0,0 @@
1
- """
2
- modeled after the textual_inversion.py / train_dreambooth.py and the work
3
- of justinpinkney here: https://github.com/justinpinkney/stable-diffusion/blob/main/notebooks/imagic.ipynb
4
- """
5
- import inspect
6
- import warnings
7
- from typing import List, Optional, Union
8
-
9
- import numpy as np
10
- import PIL
11
- import torch
12
- import torch.nn.functional as F
13
- from accelerate import Accelerator
14
-
15
- # TODO: remove and import from diffusers.utils when the new version of diffusers is released
16
- from packaging import version
17
- from tqdm.auto import tqdm
18
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
19
-
20
- from diffusers import DiffusionPipeline
21
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
22
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
23
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
24
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
25
- from diffusers.utils import logging
26
-
27
-
28
- if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
29
- PIL_INTERPOLATION = {
30
- "linear": PIL.Image.Resampling.BILINEAR,
31
- "bilinear": PIL.Image.Resampling.BILINEAR,
32
- "bicubic": PIL.Image.Resampling.BICUBIC,
33
- "lanczos": PIL.Image.Resampling.LANCZOS,
34
- "nearest": PIL.Image.Resampling.NEAREST,
35
- }
36
- else:
37
- PIL_INTERPOLATION = {
38
- "linear": PIL.Image.LINEAR,
39
- "bilinear": PIL.Image.BILINEAR,
40
- "bicubic": PIL.Image.BICUBIC,
41
- "lanczos": PIL.Image.LANCZOS,
42
- "nearest": PIL.Image.NEAREST,
43
- }
44
- # ------------------------------------------------------------------------------
45
-
46
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
-
48
-
49
- def preprocess(image):
50
- w, h = image.size
51
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
52
- image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
53
- image = np.array(image).astype(np.float32) / 255.0
54
- image = image[None].transpose(0, 3, 1, 2)
55
- image = torch.from_numpy(image)
56
- return 2.0 * image - 1.0
57
-
58
-
59
- class ImagicStableDiffusionPipeline(DiffusionPipeline):
60
- r"""
61
- Pipeline for imagic image editing.
62
- See paper here: https://arxiv.org/pdf/2210.09276.pdf
63
-
64
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
65
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
66
- Args:
67
- vae ([`AutoencoderKL`]):
68
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
69
- text_encoder ([`CLIPTextModel`]):
70
- Frozen text-encoder. Stable Diffusion uses the text portion of
71
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
72
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
73
- tokenizer (`CLIPTokenizer`):
74
- Tokenizer of class
75
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
76
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
77
- scheduler ([`SchedulerMixin`]):
78
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
79
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
80
- safety_checker ([`StableDiffusionSafetyChecker`]):
81
- Classification module that estimates whether generated images could be considered offsensive or harmful.
82
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
83
- feature_extractor ([`CLIPImageProcessor`]):
84
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
85
- """
86
-
87
- def __init__(
88
- self,
89
- vae: AutoencoderKL,
90
- text_encoder: CLIPTextModel,
91
- tokenizer: CLIPTokenizer,
92
- unet: UNet2DConditionModel,
93
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
94
- safety_checker: StableDiffusionSafetyChecker,
95
- feature_extractor: CLIPImageProcessor,
96
- ):
97
- super().__init__()
98
- self.register_modules(
99
- vae=vae,
100
- text_encoder=text_encoder,
101
- tokenizer=tokenizer,
102
- unet=unet,
103
- scheduler=scheduler,
104
- safety_checker=safety_checker,
105
- feature_extractor=feature_extractor,
106
- )
107
-
108
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
109
- r"""
110
- Enable sliced attention computation.
111
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
112
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
113
- Args:
114
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
115
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
116
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
117
- `attention_head_dim` must be a multiple of `slice_size`.
118
- """
119
- if slice_size == "auto":
120
- # half the attention head size is usually a good trade-off between
121
- # speed and memory
122
- slice_size = self.unet.config.attention_head_dim // 2
123
- self.unet.set_attention_slice(slice_size)
124
-
125
- def disable_attention_slicing(self):
126
- r"""
127
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
128
- back to computing attention in one step.
129
- """
130
- # set slice_size = `None` to disable `attention slicing`
131
- self.enable_attention_slicing(None)
132
-
133
- def train(
134
- self,
135
- prompt: Union[str, List[str]],
136
- image: Union[torch.FloatTensor, PIL.Image.Image],
137
- height: Optional[int] = 512,
138
- width: Optional[int] = 512,
139
- generator: Optional[torch.Generator] = None,
140
- embedding_learning_rate: float = 0.001,
141
- diffusion_model_learning_rate: float = 2e-6,
142
- text_embedding_optimization_steps: int = 500,
143
- model_fine_tuning_optimization_steps: int = 1000,
144
- **kwargs,
145
- ):
146
- r"""
147
- Function invoked when calling the pipeline for generation.
148
- Args:
149
- prompt (`str` or `List[str]`):
150
- The prompt or prompts to guide the image generation.
151
- height (`int`, *optional*, defaults to 512):
152
- The height in pixels of the generated image.
153
- width (`int`, *optional*, defaults to 512):
154
- The width in pixels of the generated image.
155
- num_inference_steps (`int`, *optional*, defaults to 50):
156
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
157
- expense of slower inference.
158
- guidance_scale (`float`, *optional*, defaults to 7.5):
159
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
160
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
161
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
162
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
163
- usually at the expense of lower image quality.
164
- eta (`float`, *optional*, defaults to 0.0):
165
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
166
- [`schedulers.DDIMScheduler`], will be ignored for others.
167
- generator (`torch.Generator`, *optional*):
168
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
169
- deterministic.
170
- latents (`torch.FloatTensor`, *optional*):
171
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
172
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
173
- tensor will ge generated by sampling using the supplied random `generator`.
174
- output_type (`str`, *optional*, defaults to `"pil"`):
175
- The output format of the generate image. Choose between
176
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
177
- return_dict (`bool`, *optional*, defaults to `True`):
178
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
179
- plain tuple.
180
- Returns:
181
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
182
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
183
- When returning a tuple, the first element is a list with the generated images, and the second element is a
184
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
185
- (nsfw) content, according to the `safety_checker`.
186
- """
187
- accelerator = Accelerator(
188
- gradient_accumulation_steps=1,
189
- mixed_precision="fp16",
190
- )
191
-
192
- if "torch_device" in kwargs:
193
- device = kwargs.pop("torch_device")
194
- warnings.warn(
195
- "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
196
- " Consider using `pipe.to(torch_device)` instead."
197
- )
198
-
199
- if device is None:
200
- device = "cuda" if torch.cuda.is_available() else "cpu"
201
- self.to(device)
202
-
203
- if height % 8 != 0 or width % 8 != 0:
204
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
205
-
206
- # Freeze vae and unet
207
- self.vae.requires_grad_(False)
208
- self.unet.requires_grad_(False)
209
- self.text_encoder.requires_grad_(False)
210
- self.unet.eval()
211
- self.vae.eval()
212
- self.text_encoder.eval()
213
-
214
- if accelerator.is_main_process:
215
- accelerator.init_trackers(
216
- "imagic",
217
- config={
218
- "embedding_learning_rate": embedding_learning_rate,
219
- "text_embedding_optimization_steps": text_embedding_optimization_steps,
220
- },
221
- )
222
-
223
- # get text embeddings for prompt
224
- text_input = self.tokenizer(
225
- prompt,
226
- padding="max_length",
227
- max_length=self.tokenizer.model_max_length,
228
- truncation=True,
229
- return_tensors="pt",
230
- )
231
- text_embeddings = torch.nn.Parameter(
232
- self.text_encoder(text_input.input_ids.to(self.device))[0], requires_grad=True
233
- )
234
- text_embeddings = text_embeddings.detach()
235
- text_embeddings.requires_grad_()
236
- text_embeddings_orig = text_embeddings.clone()
237
-
238
- # Initialize the optimizer
239
- optimizer = torch.optim.Adam(
240
- [text_embeddings], # only optimize the embeddings
241
- lr=embedding_learning_rate,
242
- )
243
-
244
- if isinstance(image, PIL.Image.Image):
245
- image = preprocess(image)
246
-
247
- latents_dtype = text_embeddings.dtype
248
- image = image.to(device=self.device, dtype=latents_dtype)
249
- init_latent_image_dist = self.vae.encode(image).latent_dist
250
- image_latents = init_latent_image_dist.sample(generator=generator)
251
- image_latents = 0.18215 * image_latents
252
-
253
- progress_bar = tqdm(range(text_embedding_optimization_steps), disable=not accelerator.is_local_main_process)
254
- progress_bar.set_description("Steps")
255
-
256
- global_step = 0
257
-
258
- logger.info("First optimizing the text embedding to better reconstruct the init image")
259
- for _ in range(text_embedding_optimization_steps):
260
- with accelerator.accumulate(text_embeddings):
261
- # Sample noise that we'll add to the latents
262
- noise = torch.randn(image_latents.shape).to(image_latents.device)
263
- timesteps = torch.randint(1000, (1,), device=image_latents.device)
264
-
265
- # Add noise to the latents according to the noise magnitude at each timestep
266
- # (this is the forward diffusion process)
267
- noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
268
-
269
- # Predict the noise residual
270
- noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
271
-
272
- loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
273
- accelerator.backward(loss)
274
-
275
- optimizer.step()
276
- optimizer.zero_grad()
277
-
278
- # Checks if the accelerator has performed an optimization step behind the scenes
279
- if accelerator.sync_gradients:
280
- progress_bar.update(1)
281
- global_step += 1
282
-
283
- logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
284
- progress_bar.set_postfix(**logs)
285
- accelerator.log(logs, step=global_step)
286
-
287
- accelerator.wait_for_everyone()
288
-
289
- text_embeddings.requires_grad_(False)
290
-
291
- # Now we fine tune the unet to better reconstruct the image
292
- self.unet.requires_grad_(True)
293
- self.unet.train()
294
- optimizer = torch.optim.Adam(
295
- self.unet.parameters(), # only optimize unet
296
- lr=diffusion_model_learning_rate,
297
- )
298
- progress_bar = tqdm(range(model_fine_tuning_optimization_steps), disable=not accelerator.is_local_main_process)
299
-
300
- logger.info("Next fine tuning the entire model to better reconstruct the init image")
301
- for _ in range(model_fine_tuning_optimization_steps):
302
- with accelerator.accumulate(self.unet.parameters()):
303
- # Sample noise that we'll add to the latents
304
- noise = torch.randn(image_latents.shape).to(image_latents.device)
305
- timesteps = torch.randint(1000, (1,), device=image_latents.device)
306
-
307
- # Add noise to the latents according to the noise magnitude at each timestep
308
- # (this is the forward diffusion process)
309
- noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
310
-
311
- # Predict the noise residual
312
- noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
313
-
314
- loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
315
- accelerator.backward(loss)
316
-
317
- optimizer.step()
318
- optimizer.zero_grad()
319
-
320
- # Checks if the accelerator has performed an optimization step behind the scenes
321
- if accelerator.sync_gradients:
322
- progress_bar.update(1)
323
- global_step += 1
324
-
325
- logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
326
- progress_bar.set_postfix(**logs)
327
- accelerator.log(logs, step=global_step)
328
-
329
- accelerator.wait_for_everyone()
330
- self.text_embeddings_orig = text_embeddings_orig
331
- self.text_embeddings = text_embeddings
332
-
333
- @torch.no_grad()
334
- def __call__(
335
- self,
336
- alpha: float = 1.2,
337
- height: Optional[int] = 512,
338
- width: Optional[int] = 512,
339
- num_inference_steps: Optional[int] = 50,
340
- generator: Optional[torch.Generator] = None,
341
- output_type: Optional[str] = "pil",
342
- return_dict: bool = True,
343
- guidance_scale: float = 7.5,
344
- eta: float = 0.0,
345
- ):
346
- r"""
347
- Function invoked when calling the pipeline for generation.
348
- Args:
349
- prompt (`str` or `List[str]`):
350
- The prompt or prompts to guide the image generation.
351
- height (`int`, *optional*, defaults to 512):
352
- The height in pixels of the generated image.
353
- width (`int`, *optional*, defaults to 512):
354
- The width in pixels of the generated image.
355
- num_inference_steps (`int`, *optional*, defaults to 50):
356
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
357
- expense of slower inference.
358
- guidance_scale (`float`, *optional*, defaults to 7.5):
359
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
360
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
361
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
362
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
363
- usually at the expense of lower image quality.
364
- eta (`float`, *optional*, defaults to 0.0):
365
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
366
- [`schedulers.DDIMScheduler`], will be ignored for others.
367
- generator (`torch.Generator`, *optional*):
368
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
369
- deterministic.
370
- latents (`torch.FloatTensor`, *optional*):
371
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
372
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
373
- tensor will ge generated by sampling using the supplied random `generator`.
374
- output_type (`str`, *optional*, defaults to `"pil"`):
375
- The output format of the generate image. Choose between
376
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
377
- return_dict (`bool`, *optional*, defaults to `True`):
378
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
379
- plain tuple.
380
- Returns:
381
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
382
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
383
- When returning a tuple, the first element is a list with the generated images, and the second element is a
384
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
385
- (nsfw) content, according to the `safety_checker`.
386
- """
387
- if height % 8 != 0 or width % 8 != 0:
388
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
389
- if self.text_embeddings is None:
390
- raise ValueError("Please run the pipe.train() before trying to generate an image.")
391
- if self.text_embeddings_orig is None:
392
- raise ValueError("Please run the pipe.train() before trying to generate an image.")
393
-
394
- text_embeddings = alpha * self.text_embeddings_orig + (1 - alpha) * self.text_embeddings
395
-
396
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
397
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
398
- # corresponds to doing no classifier free guidance.
399
- do_classifier_free_guidance = guidance_scale > 1.0
400
- # get unconditional embeddings for classifier free guidance
401
- if do_classifier_free_guidance:
402
- uncond_tokens = [""]
403
- max_length = self.tokenizer.model_max_length
404
- uncond_input = self.tokenizer(
405
- uncond_tokens,
406
- padding="max_length",
407
- max_length=max_length,
408
- truncation=True,
409
- return_tensors="pt",
410
- )
411
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
412
-
413
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
414
- seq_len = uncond_embeddings.shape[1]
415
- uncond_embeddings = uncond_embeddings.view(1, seq_len, -1)
416
-
417
- # For classifier free guidance, we need to do two forward passes.
418
- # Here we concatenate the unconditional and text embeddings into a single batch
419
- # to avoid doing two forward passes
420
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
421
-
422
- # get the initial random noise unless the user supplied it
423
-
424
- # Unlike in other pipelines, latents need to be generated in the target device
425
- # for 1-to-1 results reproducibility with the CompVis implementation.
426
- # However this currently doesn't work in `mps`.
427
- latents_shape = (1, self.unet.config.in_channels, height // 8, width // 8)
428
- latents_dtype = text_embeddings.dtype
429
- if self.device.type == "mps":
430
- # randn does not exist on mps
431
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
432
- self.device
433
- )
434
- else:
435
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
436
-
437
- # set timesteps
438
- self.scheduler.set_timesteps(num_inference_steps)
439
-
440
- # Some schedulers like PNDM have timesteps as arrays
441
- # It's more optimized to move all timesteps to correct device beforehand
442
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
443
-
444
- # scale the initial noise by the standard deviation required by the scheduler
445
- latents = latents * self.scheduler.init_noise_sigma
446
-
447
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
448
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
449
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
450
- # and should be between [0, 1]
451
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
452
- extra_step_kwargs = {}
453
- if accepts_eta:
454
- extra_step_kwargs["eta"] = eta
455
-
456
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
457
- # expand the latents if we are doing classifier free guidance
458
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
459
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
460
-
461
- # predict the noise residual
462
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
463
-
464
- # perform guidance
465
- if do_classifier_free_guidance:
466
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
467
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
468
-
469
- # compute the previous noisy sample x_t -> x_t-1
470
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
471
-
472
- latents = 1 / 0.18215 * latents
473
- image = self.vae.decode(latents).sample
474
-
475
- image = (image / 2 + 0.5).clamp(0, 1)
476
-
477
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
478
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
479
-
480
- if self.safety_checker is not None:
481
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
482
- self.device
483
- )
484
- image, has_nsfw_concept = self.safety_checker(
485
- images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
486
- )
487
- else:
488
- has_nsfw_concept = None
489
-
490
- if output_type == "pil":
491
- image = self.numpy_to_pil(image)
492
-
493
- if not return_dict:
494
- return (image, has_nsfw_concept)
495
-
496
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/onnxruntime/unconditional_image_generation/README.md DELETED
@@ -1,50 +0,0 @@
1
- ## Training examples
2
-
3
- Creating a training image set is [described in a different document](https://huggingface.co/docs/datasets/image_process#image-datasets).
4
-
5
- ### Installing the dependencies
6
-
7
- Before running the scripts, make sure to install the library's training dependencies:
8
-
9
- **Important**
10
-
11
- To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
12
- ```bash
13
- git clone https://github.com/huggingface/diffusers
14
- cd diffusers
15
- pip install .
16
- ```
17
-
18
- Then cd in the example folder and run
19
- ```bash
20
- pip install -r requirements.txt
21
- ```
22
-
23
-
24
- And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
25
-
26
- ```bash
27
- accelerate config
28
- ```
29
-
30
- #### Use ONNXRuntime to accelerate training
31
-
32
- In order to leverage onnxruntime to accelerate training, please use train_unconditional_ort.py
33
-
34
- The command to train a DDPM UNet model on the Oxford Flowers dataset with onnxruntime:
35
-
36
- ```bash
37
- accelerate launch train_unconditional.py \
38
- --dataset_name="huggan/flowers-102-categories" \
39
- --resolution=64 --center_crop --random_flip \
40
- --output_dir="ddpm-ema-flowers-64" \
41
- --use_ema \
42
- --train_batch_size=16 \
43
- --num_epochs=1 \
44
- --gradient_accumulation_steps=1 \
45
- --learning_rate=1e-4 \
46
- --lr_warmup_steps=500 \
47
- --mixed_precision=fp16
48
- ```
49
-
50
- Please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_repaint.py DELETED
@@ -1,344 +0,0 @@
1
- # Copyright 2023 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import math
16
- from dataclasses import dataclass
17
- from typing import Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import torch
21
-
22
- from ..configuration_utils import ConfigMixin, register_to_config
23
- from ..utils import BaseOutput, randn_tensor
24
- from .scheduling_utils import SchedulerMixin
25
-
26
-
27
- @dataclass
28
- class RePaintSchedulerOutput(BaseOutput):
29
- """
30
- Output class for the scheduler's step function output.
31
-
32
- Args:
33
- prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
34
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
35
- denoising loop.
36
- pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
37
- The predicted denoised sample (x_{0}) based on the model output from
38
- the current timestep. `pred_original_sample` can be used to preview progress or for guidance.
39
- """
40
-
41
- prev_sample: torch.FloatTensor
42
- pred_original_sample: torch.FloatTensor
43
-
44
-
45
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
46
- def betas_for_alpha_bar(
47
- num_diffusion_timesteps,
48
- max_beta=0.999,
49
- alpha_transform_type="cosine",
50
- ):
51
- """
52
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
53
- (1-beta) over time from t = [0,1].
54
-
55
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
56
- to that part of the diffusion process.
57
-
58
-
59
- Args:
60
- num_diffusion_timesteps (`int`): the number of betas to produce.
61
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
62
- prevent singularities.
63
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
64
- Choose from `cosine` or `exp`
65
-
66
- Returns:
67
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
68
- """
69
- if alpha_transform_type == "cosine":
70
-
71
- def alpha_bar_fn(t):
72
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
73
-
74
- elif alpha_transform_type == "exp":
75
-
76
- def alpha_bar_fn(t):
77
- return math.exp(t * -12.0)
78
-
79
- else:
80
- raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
81
-
82
- betas = []
83
- for i in range(num_diffusion_timesteps):
84
- t1 = i / num_diffusion_timesteps
85
- t2 = (i + 1) / num_diffusion_timesteps
86
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
87
- return torch.tensor(betas, dtype=torch.float32)
88
-
89
-
90
- class RePaintScheduler(SchedulerMixin, ConfigMixin):
91
- """
92
- RePaint is a schedule for DDPM inpainting inside a given mask.
93
-
94
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
95
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
96
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
97
- [`~SchedulerMixin.from_pretrained`] functions.
98
-
99
- For more details, see the original paper: https://arxiv.org/pdf/2201.09865.pdf
100
-
101
- Args:
102
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
103
- beta_start (`float`): the starting `beta` value of inference.
104
- beta_end (`float`): the final `beta` value.
105
- beta_schedule (`str`):
106
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
107
- `linear`, `scaled_linear`, `squaredcos_cap_v2` or `sigmoid`.
108
- eta (`float`):
109
- The weight of noise for added noise in a diffusion step. Its value is between 0.0 and 1.0 -0.0 is DDIM and
110
- 1.0 is DDPM scheduler respectively.
111
- trained_betas (`np.ndarray`, optional):
112
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
113
- variance_type (`str`):
114
- options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
115
- `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
116
- clip_sample (`bool`, default `True`):
117
- option to clip predicted sample between -1 and 1 for numerical stability.
118
-
119
- """
120
-
121
- order = 1
122
-
123
- @register_to_config
124
- def __init__(
125
- self,
126
- num_train_timesteps: int = 1000,
127
- beta_start: float = 0.0001,
128
- beta_end: float = 0.02,
129
- beta_schedule: str = "linear",
130
- eta: float = 0.0,
131
- trained_betas: Optional[np.ndarray] = None,
132
- clip_sample: bool = True,
133
- ):
134
- if trained_betas is not None:
135
- self.betas = torch.from_numpy(trained_betas)
136
- elif beta_schedule == "linear":
137
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
138
- elif beta_schedule == "scaled_linear":
139
- # this schedule is very specific to the latent diffusion model.
140
- self.betas = (
141
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
142
- )
143
- elif beta_schedule == "squaredcos_cap_v2":
144
- # Glide cosine schedule
145
- self.betas = betas_for_alpha_bar(num_train_timesteps)
146
- elif beta_schedule == "sigmoid":
147
- # GeoDiff sigmoid schedule
148
- betas = torch.linspace(-6, 6, num_train_timesteps)
149
- self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
150
- else:
151
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
152
-
153
- self.alphas = 1.0 - self.betas
154
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
155
- self.one = torch.tensor(1.0)
156
-
157
- self.final_alpha_cumprod = torch.tensor(1.0)
158
-
159
- # standard deviation of the initial noise distribution
160
- self.init_noise_sigma = 1.0
161
-
162
- # setable values
163
- self.num_inference_steps = None
164
- self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
165
-
166
- self.eta = eta
167
-
168
- def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
169
- """
170
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
171
- current timestep.
172
-
173
- Args:
174
- sample (`torch.FloatTensor`): input sample
175
- timestep (`int`, optional): current timestep
176
-
177
- Returns:
178
- `torch.FloatTensor`: scaled input sample
179
- """
180
- return sample
181
-
182
- def set_timesteps(
183
- self,
184
- num_inference_steps: int,
185
- jump_length: int = 10,
186
- jump_n_sample: int = 10,
187
- device: Union[str, torch.device] = None,
188
- ):
189
- num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps)
190
- self.num_inference_steps = num_inference_steps
191
-
192
- timesteps = []
193
-
194
- jumps = {}
195
- for j in range(0, num_inference_steps - jump_length, jump_length):
196
- jumps[j] = jump_n_sample - 1
197
-
198
- t = num_inference_steps
199
- while t >= 1:
200
- t = t - 1
201
- timesteps.append(t)
202
-
203
- if jumps.get(t, 0) > 0:
204
- jumps[t] = jumps[t] - 1
205
- for _ in range(jump_length):
206
- t = t + 1
207
- timesteps.append(t)
208
-
209
- timesteps = np.array(timesteps) * (self.config.num_train_timesteps // self.num_inference_steps)
210
- self.timesteps = torch.from_numpy(timesteps).to(device)
211
-
212
- def _get_variance(self, t):
213
- prev_timestep = t - self.config.num_train_timesteps // self.num_inference_steps
214
-
215
- alpha_prod_t = self.alphas_cumprod[t]
216
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
217
- beta_prod_t = 1 - alpha_prod_t
218
- beta_prod_t_prev = 1 - alpha_prod_t_prev
219
-
220
- # For t > 0, compute predicted variance βt (see formula (6) and (7) from
221
- # https://arxiv.org/pdf/2006.11239.pdf) and sample from it to get
222
- # previous sample x_{t-1} ~ N(pred_prev_sample, variance) == add
223
- # variance to pred_sample
224
- # Is equivalent to formula (16) in https://arxiv.org/pdf/2010.02502.pdf
225
- # without eta.
226
- # variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t]
227
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
228
-
229
- return variance
230
-
231
- def step(
232
- self,
233
- model_output: torch.FloatTensor,
234
- timestep: int,
235
- sample: torch.FloatTensor,
236
- original_image: torch.FloatTensor,
237
- mask: torch.FloatTensor,
238
- generator: Optional[torch.Generator] = None,
239
- return_dict: bool = True,
240
- ) -> Union[RePaintSchedulerOutput, Tuple]:
241
- """
242
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
243
- process from the learned model outputs (most often the predicted noise).
244
-
245
- Args:
246
- model_output (`torch.FloatTensor`): direct output from learned
247
- diffusion model.
248
- timestep (`int`): current discrete timestep in the diffusion chain.
249
- sample (`torch.FloatTensor`):
250
- current instance of sample being created by diffusion process.
251
- original_image (`torch.FloatTensor`):
252
- the original image to inpaint on.
253
- mask (`torch.FloatTensor`):
254
- the mask where 0.0 values define which part of the original image to inpaint (change).
255
- generator (`torch.Generator`, *optional*): random number generator.
256
- return_dict (`bool`): option for returning tuple rather than
257
- DDPMSchedulerOutput class
258
-
259
- Returns:
260
- [`~schedulers.scheduling_utils.RePaintSchedulerOutput`] or `tuple`:
261
- [`~schedulers.scheduling_utils.RePaintSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
262
- returning a tuple, the first element is the sample tensor.
263
-
264
- """
265
- t = timestep
266
- prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
267
-
268
- # 1. compute alphas, betas
269
- alpha_prod_t = self.alphas_cumprod[t]
270
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
271
- beta_prod_t = 1 - alpha_prod_t
272
-
273
- # 2. compute predicted original sample from predicted noise also called
274
- # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
275
- pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
276
-
277
- # 3. Clip "predicted x_0"
278
- if self.config.clip_sample:
279
- pred_original_sample = torch.clamp(pred_original_sample, -1, 1)
280
-
281
- # We choose to follow RePaint Algorithm 1 to get x_{t-1}, however we
282
- # substitute formula (7) in the algorithm coming from DDPM paper
283
- # (formula (4) Algorithm 2 - Sampling) with formula (12) from DDIM paper.
284
- # DDIM schedule gives the same results as DDPM with eta = 1.0
285
- # Noise is being reused in 7. and 8., but no impact on quality has
286
- # been observed.
287
-
288
- # 5. Add noise
289
- device = model_output.device
290
- noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype)
291
- std_dev_t = self.eta * self._get_variance(timestep) ** 0.5
292
-
293
- variance = 0
294
- if t > 0 and self.eta > 0:
295
- variance = std_dev_t * noise
296
-
297
- # 6. compute "direction pointing to x_t" of formula (12)
298
- # from https://arxiv.org/pdf/2010.02502.pdf
299
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
300
-
301
- # 7. compute x_{t-1} of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
302
- prev_unknown_part = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction + variance
303
-
304
- # 8. Algorithm 1 Line 5 https://arxiv.org/pdf/2201.09865.pdf
305
- prev_known_part = (alpha_prod_t_prev**0.5) * original_image + ((1 - alpha_prod_t_prev) ** 0.5) * noise
306
-
307
- # 9. Algorithm 1 Line 8 https://arxiv.org/pdf/2201.09865.pdf
308
- pred_prev_sample = mask * prev_known_part + (1.0 - mask) * prev_unknown_part
309
-
310
- if not return_dict:
311
- return (
312
- pred_prev_sample,
313
- pred_original_sample,
314
- )
315
-
316
- return RePaintSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
317
-
318
- def undo_step(self, sample, timestep, generator=None):
319
- n = self.config.num_train_timesteps // self.num_inference_steps
320
-
321
- for i in range(n):
322
- beta = self.betas[timestep + i]
323
- if sample.device.type == "mps":
324
- # randn does not work reproducibly on mps
325
- noise = randn_tensor(sample.shape, dtype=sample.dtype, generator=generator)
326
- noise = noise.to(sample.device)
327
- else:
328
- noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype)
329
-
330
- # 10. Algorithm 1 Line 10 https://arxiv.org/pdf/2201.09865.pdf
331
- sample = (1 - beta) ** 0.5 * sample + beta**0.5 * noise
332
-
333
- return sample
334
-
335
- def add_noise(
336
- self,
337
- original_samples: torch.FloatTensor,
338
- noise: torch.FloatTensor,
339
- timesteps: torch.IntTensor,
340
- ) -> torch.FloatTensor:
341
- raise NotImplementedError("Use `DDPMScheduler.add_noise()` to train for sampling with RePaint.")
342
-
343
- def __len__(self):
344
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py DELETED
@@ -1,86 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/cascade_rcnn_r50_fpn.py',
3
- '../_base_/datasets/coco_detection.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
- # model settings
7
- model = dict(
8
- roi_head=dict(bbox_head=[
9
- dict(
10
- type='SABLHead',
11
- num_classes=80,
12
- cls_in_channels=256,
13
- reg_in_channels=256,
14
- roi_feat_size=7,
15
- reg_feat_up_ratio=2,
16
- reg_pre_kernel=3,
17
- reg_post_kernel=3,
18
- reg_pre_num=2,
19
- reg_post_num=1,
20
- cls_out_channels=1024,
21
- reg_offset_out_channels=256,
22
- reg_cls_out_channels=256,
23
- num_cls_fcs=1,
24
- num_reg_fcs=0,
25
- reg_class_agnostic=True,
26
- norm_cfg=None,
27
- bbox_coder=dict(
28
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7),
29
- loss_cls=dict(
30
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
31
- loss_bbox_cls=dict(
32
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
33
- loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
34
- loss_weight=1.0)),
35
- dict(
36
- type='SABLHead',
37
- num_classes=80,
38
- cls_in_channels=256,
39
- reg_in_channels=256,
40
- roi_feat_size=7,
41
- reg_feat_up_ratio=2,
42
- reg_pre_kernel=3,
43
- reg_post_kernel=3,
44
- reg_pre_num=2,
45
- reg_post_num=1,
46
- cls_out_channels=1024,
47
- reg_offset_out_channels=256,
48
- reg_cls_out_channels=256,
49
- num_cls_fcs=1,
50
- num_reg_fcs=0,
51
- reg_class_agnostic=True,
52
- norm_cfg=None,
53
- bbox_coder=dict(
54
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5),
55
- loss_cls=dict(
56
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
57
- loss_bbox_cls=dict(
58
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
59
- loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
60
- loss_weight=1.0)),
61
- dict(
62
- type='SABLHead',
63
- num_classes=80,
64
- cls_in_channels=256,
65
- reg_in_channels=256,
66
- roi_feat_size=7,
67
- reg_feat_up_ratio=2,
68
- reg_pre_kernel=3,
69
- reg_post_kernel=3,
70
- reg_pre_num=2,
71
- reg_post_num=1,
72
- cls_out_channels=1024,
73
- reg_offset_out_channels=256,
74
- reg_cls_out_channels=256,
75
- num_cls_fcs=1,
76
- num_reg_fcs=0,
77
- reg_class_agnostic=True,
78
- norm_cfg=None,
79
- bbox_coder=dict(
80
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3),
81
- loss_cls=dict(
82
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
83
- loss_bbox_cls=dict(
84
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
85
- loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0))
86
- ]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/exp/cascade_mask_rcnn_3x_ms_hybrid_small/config.py DELETED
@@ -1,142 +0,0 @@
1
- _base_ = [
2
- '../../configs/_base_/models/cascade_mask_rcnn_uniformer_fpn.py',
3
- '../../configs/_base_/datasets/coco_instance.py',
4
- '../../configs/_base_/schedules/schedule_1x.py',
5
- '../../configs/_base_/default_runtime.py'
6
- ]
7
-
8
- model = dict(
9
- backbone=dict(
10
- embed_dim=[64, 128, 320, 512],
11
- layers=[3, 4, 8, 3],
12
- head_dim=64,
13
- drop_path_rate=0.2,
14
- use_checkpoint=True,
15
- checkpoint_num=[0, 0, 8, 0],
16
- windows=False,
17
- hybrid=True,
18
- window_size=14
19
- ),
20
- neck=dict(in_channels=[64, 128, 320, 512]),
21
- roi_head=dict(
22
- bbox_head=[
23
- dict(
24
- type='ConvFCBBoxHead',
25
- num_shared_convs=4,
26
- num_shared_fcs=1,
27
- in_channels=256,
28
- conv_out_channels=256,
29
- fc_out_channels=1024,
30
- roi_feat_size=7,
31
- num_classes=80,
32
- bbox_coder=dict(
33
- type='DeltaXYWHBBoxCoder',
34
- target_means=[0., 0., 0., 0.],
35
- target_stds=[0.1, 0.1, 0.2, 0.2]),
36
- reg_class_agnostic=False,
37
- reg_decoded_bbox=True,
38
- norm_cfg=dict(type='SyncBN', requires_grad=True),
39
- loss_cls=dict(
40
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
41
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
42
- dict(
43
- type='ConvFCBBoxHead',
44
- num_shared_convs=4,
45
- num_shared_fcs=1,
46
- in_channels=256,
47
- conv_out_channels=256,
48
- fc_out_channels=1024,
49
- roi_feat_size=7,
50
- num_classes=80,
51
- bbox_coder=dict(
52
- type='DeltaXYWHBBoxCoder',
53
- target_means=[0., 0., 0., 0.],
54
- target_stds=[0.05, 0.05, 0.1, 0.1]),
55
- reg_class_agnostic=False,
56
- reg_decoded_bbox=True,
57
- norm_cfg=dict(type='SyncBN', requires_grad=True),
58
- loss_cls=dict(
59
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
60
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
61
- dict(
62
- type='ConvFCBBoxHead',
63
- num_shared_convs=4,
64
- num_shared_fcs=1,
65
- in_channels=256,
66
- conv_out_channels=256,
67
- fc_out_channels=1024,
68
- roi_feat_size=7,
69
- num_classes=80,
70
- bbox_coder=dict(
71
- type='DeltaXYWHBBoxCoder',
72
- target_means=[0., 0., 0., 0.],
73
- target_stds=[0.033, 0.033, 0.067, 0.067]),
74
- reg_class_agnostic=False,
75
- reg_decoded_bbox=True,
76
- norm_cfg=dict(type='SyncBN', requires_grad=True),
77
- loss_cls=dict(
78
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
79
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
80
- ]))
81
-
82
- img_norm_cfg = dict(
83
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
84
-
85
- # augmentation strategy originates from DETR / Sparse RCNN
86
- train_pipeline = [
87
- dict(type='LoadImageFromFile'),
88
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
89
- dict(type='RandomFlip', flip_ratio=0.5),
90
- dict(type='AutoAugment',
91
- policies=[
92
- [
93
- dict(type='Resize',
94
- img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
95
- (608, 1333), (640, 1333), (672, 1333), (704, 1333),
96
- (736, 1333), (768, 1333), (800, 1333)],
97
- multiscale_mode='value',
98
- keep_ratio=True)
99
- ],
100
- [
101
- dict(type='Resize',
102
- img_scale=[(400, 1333), (500, 1333), (600, 1333)],
103
- multiscale_mode='value',
104
- keep_ratio=True),
105
- dict(type='RandomCrop',
106
- crop_type='absolute_range',
107
- crop_size=(384, 600),
108
- allow_negative_crop=True),
109
- dict(type='Resize',
110
- img_scale=[(480, 1333), (512, 1333), (544, 1333),
111
- (576, 1333), (608, 1333), (640, 1333),
112
- (672, 1333), (704, 1333), (736, 1333),
113
- (768, 1333), (800, 1333)],
114
- multiscale_mode='value',
115
- override=True,
116
- keep_ratio=True)
117
- ]
118
- ]),
119
- dict(type='Normalize', **img_norm_cfg),
120
- dict(type='Pad', size_divisor=32),
121
- dict(type='DefaultFormatBundle'),
122
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
123
- ]
124
- data = dict(train=dict(pipeline=train_pipeline))
125
-
126
- optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
127
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
128
- 'relative_position_bias_table': dict(decay_mult=0.),
129
- 'norm': dict(decay_mult=0.)}))
130
- lr_config = dict(step=[27, 33])
131
- runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
132
-
133
- # do not use mmdet version fp16
134
- fp16 = None
135
- optimizer_config = dict(
136
- type="DistOptimizerHook",
137
- update_interval=1,
138
- grad_clip=None,
139
- coalesce=True,
140
- bucket_size_mb=-1,
141
- use_fp16=True,
142
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py DELETED
@@ -1,83 +0,0 @@
1
- from abc import ABCMeta, abstractmethod
2
-
3
- import torch
4
- import torch.nn as nn
5
- from mmcv import ops
6
-
7
-
8
- class BaseRoIExtractor(nn.Module, metaclass=ABCMeta):
9
- """Base class for RoI extractor.
10
-
11
- Args:
12
- roi_layer (dict): Specify RoI layer type and arguments.
13
- out_channels (int): Output channels of RoI layers.
14
- featmap_strides (List[int]): Strides of input feature maps.
15
- """
16
-
17
- def __init__(self, roi_layer, out_channels, featmap_strides):
18
- super(BaseRoIExtractor, self).__init__()
19
- self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
20
- self.out_channels = out_channels
21
- self.featmap_strides = featmap_strides
22
- self.fp16_enabled = False
23
-
24
- @property
25
- def num_inputs(self):
26
- """int: Number of input feature maps."""
27
- return len(self.featmap_strides)
28
-
29
- def init_weights(self):
30
- pass
31
-
32
- def build_roi_layers(self, layer_cfg, featmap_strides):
33
- """Build RoI operator to extract feature from each level feature map.
34
-
35
- Args:
36
- layer_cfg (dict): Dictionary to construct and config RoI layer
37
- operation. Options are modules under ``mmcv/ops`` such as
38
- ``RoIAlign``.
39
- featmap_strides (List[int]): The stride of input feature map w.r.t
40
- to the original image size, which would be used to scale RoI
41
- coordinate (original image coordinate system) to feature
42
- coordinate system.
43
-
44
- Returns:
45
- nn.ModuleList: The RoI extractor modules for each level feature
46
- map.
47
- """
48
-
49
- cfg = layer_cfg.copy()
50
- layer_type = cfg.pop('type')
51
- assert hasattr(ops, layer_type)
52
- layer_cls = getattr(ops, layer_type)
53
- roi_layers = nn.ModuleList(
54
- [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
55
- return roi_layers
56
-
57
- def roi_rescale(self, rois, scale_factor):
58
- """Scale RoI coordinates by scale factor.
59
-
60
- Args:
61
- rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
62
- scale_factor (float): Scale factor that RoI will be multiplied by.
63
-
64
- Returns:
65
- torch.Tensor: Scaled RoI.
66
- """
67
-
68
- cx = (rois[:, 1] + rois[:, 3]) * 0.5
69
- cy = (rois[:, 2] + rois[:, 4]) * 0.5
70
- w = rois[:, 3] - rois[:, 1]
71
- h = rois[:, 4] - rois[:, 2]
72
- new_w = w * scale_factor
73
- new_h = h * scale_factor
74
- x1 = cx - new_w * 0.5
75
- x2 = cx + new_w * 0.5
76
- y1 = cy - new_h * 0.5
77
- y2 = cy + new_h * 0.5
78
- new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
79
- return new_rois
80
-
81
- @abstractmethod
82
- def forward(self, feats, rois, roi_scale_factor=None):
83
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/default_runtime.py DELETED
@@ -1,14 +0,0 @@
1
- # yapf:disable
2
- log_config = dict(
3
- interval=50,
4
- hooks=[
5
- dict(type='TextLoggerHook', by_epoch=False),
6
- # dict(type='TensorboardLoggerHook')
7
- ])
8
- # yapf:enable
9
- dist_params = dict(backend='nccl')
10
- log_level = 'INFO'
11
- load_from = None
12
- resume_from = None
13
- workflow = [('train', 1)]
14
- cudnn_benchmark = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/iter_timer.py DELETED
@@ -1,18 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import time
3
-
4
- from .hook import HOOKS, Hook
5
-
6
-
7
- @HOOKS.register_module()
8
- class IterTimerHook(Hook):
9
-
10
- def before_epoch(self, runner):
11
- self.t = time.time()
12
-
13
- def before_iter(self, runner):
14
- runner.log_buffer.update({'data_time': time.time() - self.t})
15
-
16
- def after_iter(self, runner):
17
- runner.log_buffer.update({'time': time.time() - self.t})
18
- self.t = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/gradio_annotator.py DELETED
@@ -1,160 +0,0 @@
1
- import gradio as gr
2
-
3
- from annotator.util import resize_image, HWC3
4
-
5
-
6
- model_canny = None
7
-
8
-
9
- def canny(img, res, l, h):
10
- img = resize_image(HWC3(img), res)
11
- global model_canny
12
- if model_canny is None:
13
- from annotator.canny import CannyDetector
14
- model_canny = CannyDetector()
15
- result = model_canny(img, l, h)
16
- return [result]
17
-
18
-
19
- model_hed = None
20
-
21
-
22
- def hed(img, res):
23
- img = resize_image(HWC3(img), res)
24
- global model_hed
25
- if model_hed is None:
26
- from annotator.hed import HEDdetector
27
- model_hed = HEDdetector()
28
- result = model_hed(img)
29
- return [result]
30
-
31
-
32
- model_mlsd = None
33
-
34
-
35
- def mlsd(img, res, thr_v, thr_d):
36
- img = resize_image(HWC3(img), res)
37
- global model_mlsd
38
- if model_mlsd is None:
39
- from annotator.mlsd import MLSDdetector
40
- model_mlsd = MLSDdetector()
41
- result = model_mlsd(img, thr_v, thr_d)
42
- return [result]
43
-
44
-
45
- model_midas = None
46
-
47
-
48
- def midas(img, res, a):
49
- img = resize_image(HWC3(img), res)
50
- global model_midas
51
- if model_midas is None:
52
- from annotator.midas import MidasDetector
53
- model_midas = MidasDetector()
54
- results = model_midas(img, a)
55
- return results
56
-
57
-
58
- model_openpose = None
59
-
60
-
61
- def openpose(img, res, has_hand):
62
- img = resize_image(HWC3(img), res)
63
- global model_openpose
64
- if model_openpose is None:
65
- from annotator.openpose import OpenposeDetector
66
- model_openpose = OpenposeDetector()
67
- result, _ = model_openpose(img, has_hand)
68
- return [result]
69
-
70
-
71
- model_uniformer = None
72
-
73
-
74
- def uniformer(img, res):
75
- img = resize_image(HWC3(img), res)
76
- global model_uniformer
77
- if model_uniformer is None:
78
- from annotator.uniformer import UniformerDetector
79
- model_uniformer = UniformerDetector()
80
- result = model_uniformer(img)
81
- return [result]
82
-
83
-
84
- block = gr.Blocks().queue()
85
- with block:
86
- with gr.Row():
87
- gr.Markdown("## Canny Edge")
88
- with gr.Row():
89
- with gr.Column():
90
- input_image = gr.Image(source='upload', type="numpy")
91
- low_threshold = gr.Slider(label="low_threshold", minimum=1, maximum=255, value=100, step=1)
92
- high_threshold = gr.Slider(label="high_threshold", minimum=1, maximum=255, value=200, step=1)
93
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
94
- run_button = gr.Button(label="Run")
95
- with gr.Column():
96
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
97
- run_button.click(fn=canny, inputs=[input_image, resolution, low_threshold, high_threshold], outputs=[gallery])
98
-
99
- with gr.Row():
100
- gr.Markdown("## HED Edge")
101
- with gr.Row():
102
- with gr.Column():
103
- input_image = gr.Image(source='upload', type="numpy")
104
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
105
- run_button = gr.Button(label="Run")
106
- with gr.Column():
107
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
108
- run_button.click(fn=hed, inputs=[input_image, resolution], outputs=[gallery])
109
-
110
- with gr.Row():
111
- gr.Markdown("## MLSD Edge")
112
- with gr.Row():
113
- with gr.Column():
114
- input_image = gr.Image(source='upload', type="numpy")
115
- value_threshold = gr.Slider(label="value_threshold", minimum=0.01, maximum=2.0, value=0.1, step=0.01)
116
- distance_threshold = gr.Slider(label="distance_threshold", minimum=0.01, maximum=20.0, value=0.1, step=0.01)
117
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
118
- run_button = gr.Button(label="Run")
119
- with gr.Column():
120
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
121
- run_button.click(fn=mlsd, inputs=[input_image, resolution, value_threshold, distance_threshold], outputs=[gallery])
122
-
123
- with gr.Row():
124
- gr.Markdown("## MIDAS Depth and Normal")
125
- with gr.Row():
126
- with gr.Column():
127
- input_image = gr.Image(source='upload', type="numpy")
128
- alpha = gr.Slider(label="alpha", minimum=0.1, maximum=20.0, value=6.2, step=0.01)
129
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
130
- run_button = gr.Button(label="Run")
131
- with gr.Column():
132
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
133
- run_button.click(fn=midas, inputs=[input_image, resolution, alpha], outputs=[gallery])
134
-
135
- with gr.Row():
136
- gr.Markdown("## Openpose")
137
- with gr.Row():
138
- with gr.Column():
139
- input_image = gr.Image(source='upload', type="numpy")
140
- hand = gr.Checkbox(label='detect hand', value=False)
141
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
142
- run_button = gr.Button(label="Run")
143
- with gr.Column():
144
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
145
- run_button.click(fn=openpose, inputs=[input_image, resolution, hand], outputs=[gallery])
146
-
147
-
148
- with gr.Row():
149
- gr.Markdown("## Uniformer Segmentation")
150
- with gr.Row():
151
- with gr.Column():
152
- input_image = gr.Image(source='upload', type="numpy")
153
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
154
- run_button = gr.Button(label="Run")
155
- with gr.Column():
156
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
157
- run_button.click(fn=uniformer, inputs=[input_image, resolution], outputs=[gallery])
158
-
159
-
160
- block.launch(server_name='0.0.0.0')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Armandoliv/document_parser/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Document Parser
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/tests/utils/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/vits/text/english.py DELETED
@@ -1,188 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Cleaners are transformations that run over the input text at both training and eval time.
5
-
6
- Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
- hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
- 1. "english_cleaners" for English text
9
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
- the symbols in symbols.py to match your data).
13
- '''
14
-
15
-
16
- # Regular expression matching whitespace:
17
-
18
-
19
- import re
20
- import inflect
21
- from unidecode import unidecode
22
- import eng_to_ipa as ipa
23
- _inflect = inflect.engine()
24
- _comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
25
- _decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
26
- _pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
27
- _dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
28
- _ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
29
- _number_re = re.compile(r'[0-9]+')
30
-
31
- # List of (regular expression, replacement) pairs for abbreviations:
32
- _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
33
- ('mrs', 'misess'),
34
- ('mr', 'mister'),
35
- ('dr', 'doctor'),
36
- ('st', 'saint'),
37
- ('co', 'company'),
38
- ('jr', 'junior'),
39
- ('maj', 'major'),
40
- ('gen', 'general'),
41
- ('drs', 'doctors'),
42
- ('rev', 'reverend'),
43
- ('lt', 'lieutenant'),
44
- ('hon', 'honorable'),
45
- ('sgt', 'sergeant'),
46
- ('capt', 'captain'),
47
- ('esq', 'esquire'),
48
- ('ltd', 'limited'),
49
- ('col', 'colonel'),
50
- ('ft', 'fort'),
51
- ]]
52
-
53
-
54
- # List of (ipa, lazy ipa) pairs:
55
- _lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
56
- ('r', 'ɹ'),
57
- ('æ', 'e'),
58
- ('ɑ', 'a'),
59
- ('ɔ', 'o'),
60
- ('ð', 'z'),
61
- ('θ', 's'),
62
- ('ɛ', 'e'),
63
- ('ɪ', 'i'),
64
- ('ʊ', 'u'),
65
- ('ʒ', 'ʥ'),
66
- ('ʤ', 'ʥ'),
67
- ('ˈ', '↓'),
68
- ]]
69
-
70
- # List of (ipa, lazy ipa2) pairs:
71
- _lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
72
- ('r', 'ɹ'),
73
- ('ð', 'z'),
74
- ('θ', 's'),
75
- ('ʒ', 'ʑ'),
76
- ('ʤ', 'dʑ'),
77
- ('ˈ', '↓'),
78
- ]]
79
-
80
- # List of (ipa, ipa2) pairs
81
- _ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
82
- ('r', 'ɹ'),
83
- ('ʤ', 'dʒ'),
84
- ('ʧ', 'tʃ')
85
- ]]
86
-
87
-
88
- def expand_abbreviations(text):
89
- for regex, replacement in _abbreviations:
90
- text = re.sub(regex, replacement, text)
91
- return text
92
-
93
-
94
- def collapse_whitespace(text):
95
- return re.sub(r'\s+', ' ', text)
96
-
97
-
98
- def _remove_commas(m):
99
- return m.group(1).replace(',', '')
100
-
101
-
102
- def _expand_decimal_point(m):
103
- return m.group(1).replace('.', ' point ')
104
-
105
-
106
- def _expand_dollars(m):
107
- match = m.group(1)
108
- parts = match.split('.')
109
- if len(parts) > 2:
110
- return match + ' dollars' # Unexpected format
111
- dollars = int(parts[0]) if parts[0] else 0
112
- cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
113
- if dollars and cents:
114
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
115
- cent_unit = 'cent' if cents == 1 else 'cents'
116
- return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
117
- elif dollars:
118
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
119
- return '%s %s' % (dollars, dollar_unit)
120
- elif cents:
121
- cent_unit = 'cent' if cents == 1 else 'cents'
122
- return '%s %s' % (cents, cent_unit)
123
- else:
124
- return 'zero dollars'
125
-
126
-
127
- def _expand_ordinal(m):
128
- return _inflect.number_to_words(m.group(0))
129
-
130
-
131
- def _expand_number(m):
132
- num = int(m.group(0))
133
- if num > 1000 and num < 3000:
134
- if num == 2000:
135
- return 'two thousand'
136
- elif num > 2000 and num < 2010:
137
- return 'two thousand ' + _inflect.number_to_words(num % 100)
138
- elif num % 100 == 0:
139
- return _inflect.number_to_words(num // 100) + ' hundred'
140
- else:
141
- return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
142
- else:
143
- return _inflect.number_to_words(num, andword='')
144
-
145
-
146
- def normalize_numbers(text):
147
- text = re.sub(_comma_number_re, _remove_commas, text)
148
- text = re.sub(_pounds_re, r'\1 pounds', text)
149
- text = re.sub(_dollars_re, _expand_dollars, text)
150
- text = re.sub(_decimal_number_re, _expand_decimal_point, text)
151
- text = re.sub(_ordinal_re, _expand_ordinal, text)
152
- text = re.sub(_number_re, _expand_number, text)
153
- return text
154
-
155
-
156
- def mark_dark_l(text):
157
- return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text)
158
-
159
-
160
- def english_to_ipa(text):
161
- text = unidecode(text).lower()
162
- text = expand_abbreviations(text)
163
- text = normalize_numbers(text)
164
- phonemes = ipa.convert(text)
165
- phonemes = collapse_whitespace(phonemes)
166
- return phonemes
167
-
168
-
169
- def english_to_lazy_ipa(text):
170
- text = english_to_ipa(text)
171
- for regex, replacement in _lazy_ipa:
172
- text = re.sub(regex, replacement, text)
173
- return text
174
-
175
-
176
- def english_to_ipa2(text):
177
- text = english_to_ipa(text)
178
- text = mark_dark_l(text)
179
- for regex, replacement in _ipa_to_ipa2:
180
- text = re.sub(regex, replacement, text)
181
- return text.replace('...', '…')
182
-
183
-
184
- def english_to_lazy_ipa2(text):
185
- text = english_to_ipa(text)
186
- for regex, replacement in _lazy_ipa2:
187
- text = re.sub(regex, replacement, text)
188
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/subprocess.py DELETED
@@ -1,260 +0,0 @@
1
- import logging
2
- import os
3
- import shlex
4
- import subprocess
5
- from typing import (
6
- TYPE_CHECKING,
7
- Any,
8
- Callable,
9
- Iterable,
10
- List,
11
- Mapping,
12
- Optional,
13
- Union,
14
- )
15
-
16
- from pip._vendor.rich.markup import escape
17
-
18
- from pip._internal.cli.spinners import SpinnerInterface, open_spinner
19
- from pip._internal.exceptions import InstallationSubprocessError
20
- from pip._internal.utils.logging import VERBOSE, subprocess_logger
21
- from pip._internal.utils.misc import HiddenText
22
-
23
- if TYPE_CHECKING:
24
- # Literal was introduced in Python 3.8.
25
- #
26
- # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7.
27
- from typing import Literal
28
-
29
- CommandArgs = List[Union[str, HiddenText]]
30
-
31
-
32
- def make_command(*args: Union[str, HiddenText, CommandArgs]) -> CommandArgs:
33
- """
34
- Create a CommandArgs object.
35
- """
36
- command_args: CommandArgs = []
37
- for arg in args:
38
- # Check for list instead of CommandArgs since CommandArgs is
39
- # only known during type-checking.
40
- if isinstance(arg, list):
41
- command_args.extend(arg)
42
- else:
43
- # Otherwise, arg is str or HiddenText.
44
- command_args.append(arg)
45
-
46
- return command_args
47
-
48
-
49
- def format_command_args(args: Union[List[str], CommandArgs]) -> str:
50
- """
51
- Format command arguments for display.
52
- """
53
- # For HiddenText arguments, display the redacted form by calling str().
54
- # Also, we don't apply str() to arguments that aren't HiddenText since
55
- # this can trigger a UnicodeDecodeError in Python 2 if the argument
56
- # has type unicode and includes a non-ascii character. (The type
57
- # checker doesn't ensure the annotations are correct in all cases.)
58
- return " ".join(
59
- shlex.quote(str(arg)) if isinstance(arg, HiddenText) else shlex.quote(arg)
60
- for arg in args
61
- )
62
-
63
-
64
- def reveal_command_args(args: Union[List[str], CommandArgs]) -> List[str]:
65
- """
66
- Return the arguments in their raw, unredacted form.
67
- """
68
- return [arg.secret if isinstance(arg, HiddenText) else arg for arg in args]
69
-
70
-
71
- def call_subprocess(
72
- cmd: Union[List[str], CommandArgs],
73
- show_stdout: bool = False,
74
- cwd: Optional[str] = None,
75
- on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise",
76
- extra_ok_returncodes: Optional[Iterable[int]] = None,
77
- extra_environ: Optional[Mapping[str, Any]] = None,
78
- unset_environ: Optional[Iterable[str]] = None,
79
- spinner: Optional[SpinnerInterface] = None,
80
- log_failed_cmd: Optional[bool] = True,
81
- stdout_only: Optional[bool] = False,
82
- *,
83
- command_desc: str,
84
- ) -> str:
85
- """
86
- Args:
87
- show_stdout: if true, use INFO to log the subprocess's stderr and
88
- stdout streams. Otherwise, use DEBUG. Defaults to False.
89
- extra_ok_returncodes: an iterable of integer return codes that are
90
- acceptable, in addition to 0. Defaults to None, which means [].
91
- unset_environ: an iterable of environment variable names to unset
92
- prior to calling subprocess.Popen().
93
- log_failed_cmd: if false, failed commands are not logged, only raised.
94
- stdout_only: if true, return only stdout, else return both. When true,
95
- logging of both stdout and stderr occurs when the subprocess has
96
- terminated, else logging occurs as subprocess output is produced.
97
- """
98
- if extra_ok_returncodes is None:
99
- extra_ok_returncodes = []
100
- if unset_environ is None:
101
- unset_environ = []
102
- # Most places in pip use show_stdout=False. What this means is--
103
- #
104
- # - We connect the child's output (combined stderr and stdout) to a
105
- # single pipe, which we read.
106
- # - We log this output to stderr at DEBUG level as it is received.
107
- # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't
108
- # requested), then we show a spinner so the user can still see the
109
- # subprocess is in progress.
110
- # - If the subprocess exits with an error, we log the output to stderr
111
- # at ERROR level if it hasn't already been displayed to the console
112
- # (e.g. if --verbose logging wasn't enabled). This way we don't log
113
- # the output to the console twice.
114
- #
115
- # If show_stdout=True, then the above is still done, but with DEBUG
116
- # replaced by INFO.
117
- if show_stdout:
118
- # Then log the subprocess output at INFO level.
119
- log_subprocess: Callable[..., None] = subprocess_logger.info
120
- used_level = logging.INFO
121
- else:
122
- # Then log the subprocess output using VERBOSE. This also ensures
123
- # it will be logged to the log file (aka user_log), if enabled.
124
- log_subprocess = subprocess_logger.verbose
125
- used_level = VERBOSE
126
-
127
- # Whether the subprocess will be visible in the console.
128
- showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level
129
-
130
- # Only use the spinner if we're not showing the subprocess output
131
- # and we have a spinner.
132
- use_spinner = not showing_subprocess and spinner is not None
133
-
134
- log_subprocess("Running command %s", command_desc)
135
- env = os.environ.copy()
136
- if extra_environ:
137
- env.update(extra_environ)
138
- for name in unset_environ:
139
- env.pop(name, None)
140
- try:
141
- proc = subprocess.Popen(
142
- # Convert HiddenText objects to the underlying str.
143
- reveal_command_args(cmd),
144
- stdin=subprocess.PIPE,
145
- stdout=subprocess.PIPE,
146
- stderr=subprocess.STDOUT if not stdout_only else subprocess.PIPE,
147
- cwd=cwd,
148
- env=env,
149
- errors="backslashreplace",
150
- )
151
- except Exception as exc:
152
- if log_failed_cmd:
153
- subprocess_logger.critical(
154
- "Error %s while executing command %s",
155
- exc,
156
- command_desc,
157
- )
158
- raise
159
- all_output = []
160
- if not stdout_only:
161
- assert proc.stdout
162
- assert proc.stdin
163
- proc.stdin.close()
164
- # In this mode, stdout and stderr are in the same pipe.
165
- while True:
166
- line: str = proc.stdout.readline()
167
- if not line:
168
- break
169
- line = line.rstrip()
170
- all_output.append(line + "\n")
171
-
172
- # Show the line immediately.
173
- log_subprocess(line)
174
- # Update the spinner.
175
- if use_spinner:
176
- assert spinner
177
- spinner.spin()
178
- try:
179
- proc.wait()
180
- finally:
181
- if proc.stdout:
182
- proc.stdout.close()
183
- output = "".join(all_output)
184
- else:
185
- # In this mode, stdout and stderr are in different pipes.
186
- # We must use communicate() which is the only safe way to read both.
187
- out, err = proc.communicate()
188
- # log line by line to preserve pip log indenting
189
- for out_line in out.splitlines():
190
- log_subprocess(out_line)
191
- all_output.append(out)
192
- for err_line in err.splitlines():
193
- log_subprocess(err_line)
194
- all_output.append(err)
195
- output = out
196
-
197
- proc_had_error = proc.returncode and proc.returncode not in extra_ok_returncodes
198
- if use_spinner:
199
- assert spinner
200
- if proc_had_error:
201
- spinner.finish("error")
202
- else:
203
- spinner.finish("done")
204
- if proc_had_error:
205
- if on_returncode == "raise":
206
- error = InstallationSubprocessError(
207
- command_description=command_desc,
208
- exit_code=proc.returncode,
209
- output_lines=all_output if not showing_subprocess else None,
210
- )
211
- if log_failed_cmd:
212
- subprocess_logger.error("[present-rich] %s", error)
213
- subprocess_logger.verbose(
214
- "[bold magenta]full command[/]: [blue]%s[/]",
215
- escape(format_command_args(cmd)),
216
- extra={"markup": True},
217
- )
218
- subprocess_logger.verbose(
219
- "[bold magenta]cwd[/]: %s",
220
- escape(cwd or "[inherit]"),
221
- extra={"markup": True},
222
- )
223
-
224
- raise error
225
- elif on_returncode == "warn":
226
- subprocess_logger.warning(
227
- 'Command "%s" had error code %s in %s',
228
- command_desc,
229
- proc.returncode,
230
- cwd,
231
- )
232
- elif on_returncode == "ignore":
233
- pass
234
- else:
235
- raise ValueError(f"Invalid value: on_returncode={on_returncode!r}")
236
- return output
237
-
238
-
239
- def runner_with_spinner_message(message: str) -> Callable[..., None]:
240
- """Provide a subprocess_runner that shows a spinner message.
241
-
242
- Intended for use with for BuildBackendHookCaller. Thus, the runner has
243
- an API that matches what's expected by BuildBackendHookCaller.subprocess_runner.
244
- """
245
-
246
- def runner(
247
- cmd: List[str],
248
- cwd: Optional[str] = None,
249
- extra_environ: Optional[Mapping[str, Any]] = None,
250
- ) -> None:
251
- with open_spinner(message) as spinner:
252
- call_subprocess(
253
- cmd,
254
- command_desc=message,
255
- cwd=cwd,
256
- extra_environ=extra_environ,
257
- spinner=spinner,
258
- )
259
-
260
- return runner
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/vcs/versioncontrol.py DELETED
@@ -1,705 +0,0 @@
1
- """Handles all VCS (version control) support"""
2
-
3
- import logging
4
- import os
5
- import shutil
6
- import sys
7
- import urllib.parse
8
- from typing import (
9
- TYPE_CHECKING,
10
- Any,
11
- Dict,
12
- Iterable,
13
- Iterator,
14
- List,
15
- Mapping,
16
- Optional,
17
- Tuple,
18
- Type,
19
- Union,
20
- )
21
-
22
- from pip._internal.cli.spinners import SpinnerInterface
23
- from pip._internal.exceptions import BadCommand, InstallationError
24
- from pip._internal.utils.misc import (
25
- HiddenText,
26
- ask_path_exists,
27
- backup_dir,
28
- display_path,
29
- hide_url,
30
- hide_value,
31
- is_installable_dir,
32
- rmtree,
33
- )
34
- from pip._internal.utils.subprocess import (
35
- CommandArgs,
36
- call_subprocess,
37
- format_command_args,
38
- make_command,
39
- )
40
- from pip._internal.utils.urls import get_url_scheme
41
-
42
- if TYPE_CHECKING:
43
- # Literal was introduced in Python 3.8.
44
- #
45
- # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7.
46
- from typing import Literal
47
-
48
-
49
- __all__ = ["vcs"]
50
-
51
-
52
- logger = logging.getLogger(__name__)
53
-
54
- AuthInfo = Tuple[Optional[str], Optional[str]]
55
-
56
-
57
- def is_url(name: str) -> bool:
58
- """
59
- Return true if the name looks like a URL.
60
- """
61
- scheme = get_url_scheme(name)
62
- if scheme is None:
63
- return False
64
- return scheme in ["http", "https", "file", "ftp"] + vcs.all_schemes
65
-
66
-
67
- def make_vcs_requirement_url(
68
- repo_url: str, rev: str, project_name: str, subdir: Optional[str] = None
69
- ) -> str:
70
- """
71
- Return the URL for a VCS requirement.
72
-
73
- Args:
74
- repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
75
- project_name: the (unescaped) project name.
76
- """
77
- egg_project_name = project_name.replace("-", "_")
78
- req = f"{repo_url}@{rev}#egg={egg_project_name}"
79
- if subdir:
80
- req += f"&subdirectory={subdir}"
81
-
82
- return req
83
-
84
-
85
- def find_path_to_project_root_from_repo_root(
86
- location: str, repo_root: str
87
- ) -> Optional[str]:
88
- """
89
- Find the the Python project's root by searching up the filesystem from
90
- `location`. Return the path to project root relative to `repo_root`.
91
- Return None if the project root is `repo_root`, or cannot be found.
92
- """
93
- # find project root.
94
- orig_location = location
95
- while not is_installable_dir(location):
96
- last_location = location
97
- location = os.path.dirname(location)
98
- if location == last_location:
99
- # We've traversed up to the root of the filesystem without
100
- # finding a Python project.
101
- logger.warning(
102
- "Could not find a Python project for directory %s (tried all "
103
- "parent directories)",
104
- orig_location,
105
- )
106
- return None
107
-
108
- if os.path.samefile(repo_root, location):
109
- return None
110
-
111
- return os.path.relpath(location, repo_root)
112
-
113
-
114
- class RemoteNotFoundError(Exception):
115
- pass
116
-
117
-
118
- class RemoteNotValidError(Exception):
119
- def __init__(self, url: str):
120
- super().__init__(url)
121
- self.url = url
122
-
123
-
124
- class RevOptions:
125
-
126
- """
127
- Encapsulates a VCS-specific revision to install, along with any VCS
128
- install options.
129
-
130
- Instances of this class should be treated as if immutable.
131
- """
132
-
133
- def __init__(
134
- self,
135
- vc_class: Type["VersionControl"],
136
- rev: Optional[str] = None,
137
- extra_args: Optional[CommandArgs] = None,
138
- ) -> None:
139
- """
140
- Args:
141
- vc_class: a VersionControl subclass.
142
- rev: the name of the revision to install.
143
- extra_args: a list of extra options.
144
- """
145
- if extra_args is None:
146
- extra_args = []
147
-
148
- self.extra_args = extra_args
149
- self.rev = rev
150
- self.vc_class = vc_class
151
- self.branch_name: Optional[str] = None
152
-
153
- def __repr__(self) -> str:
154
- return f"<RevOptions {self.vc_class.name}: rev={self.rev!r}>"
155
-
156
- @property
157
- def arg_rev(self) -> Optional[str]:
158
- if self.rev is None:
159
- return self.vc_class.default_arg_rev
160
-
161
- return self.rev
162
-
163
- def to_args(self) -> CommandArgs:
164
- """
165
- Return the VCS-specific command arguments.
166
- """
167
- args: CommandArgs = []
168
- rev = self.arg_rev
169
- if rev is not None:
170
- args += self.vc_class.get_base_rev_args(rev)
171
- args += self.extra_args
172
-
173
- return args
174
-
175
- def to_display(self) -> str:
176
- if not self.rev:
177
- return ""
178
-
179
- return f" (to revision {self.rev})"
180
-
181
- def make_new(self, rev: str) -> "RevOptions":
182
- """
183
- Make a copy of the current instance, but with a new rev.
184
-
185
- Args:
186
- rev: the name of the revision for the new object.
187
- """
188
- return self.vc_class.make_rev_options(rev, extra_args=self.extra_args)
189
-
190
-
191
- class VcsSupport:
192
- _registry: Dict[str, "VersionControl"] = {}
193
- schemes = ["ssh", "git", "hg", "bzr", "sftp", "svn"]
194
-
195
- def __init__(self) -> None:
196
- # Register more schemes with urlparse for various version control
197
- # systems
198
- urllib.parse.uses_netloc.extend(self.schemes)
199
- super().__init__()
200
-
201
- def __iter__(self) -> Iterator[str]:
202
- return self._registry.__iter__()
203
-
204
- @property
205
- def backends(self) -> List["VersionControl"]:
206
- return list(self._registry.values())
207
-
208
- @property
209
- def dirnames(self) -> List[str]:
210
- return [backend.dirname for backend in self.backends]
211
-
212
- @property
213
- def all_schemes(self) -> List[str]:
214
- schemes: List[str] = []
215
- for backend in self.backends:
216
- schemes.extend(backend.schemes)
217
- return schemes
218
-
219
- def register(self, cls: Type["VersionControl"]) -> None:
220
- if not hasattr(cls, "name"):
221
- logger.warning("Cannot register VCS %s", cls.__name__)
222
- return
223
- if cls.name not in self._registry:
224
- self._registry[cls.name] = cls()
225
- logger.debug("Registered VCS backend: %s", cls.name)
226
-
227
- def unregister(self, name: str) -> None:
228
- if name in self._registry:
229
- del self._registry[name]
230
-
231
- def get_backend_for_dir(self, location: str) -> Optional["VersionControl"]:
232
- """
233
- Return a VersionControl object if a repository of that type is found
234
- at the given directory.
235
- """
236
- vcs_backends = {}
237
- for vcs_backend in self._registry.values():
238
- repo_path = vcs_backend.get_repository_root(location)
239
- if not repo_path:
240
- continue
241
- logger.debug("Determine that %s uses VCS: %s", location, vcs_backend.name)
242
- vcs_backends[repo_path] = vcs_backend
243
-
244
- if not vcs_backends:
245
- return None
246
-
247
- # Choose the VCS in the inner-most directory. Since all repository
248
- # roots found here would be either `location` or one of its
249
- # parents, the longest path should have the most path components,
250
- # i.e. the backend representing the inner-most repository.
251
- inner_most_repo_path = max(vcs_backends, key=len)
252
- return vcs_backends[inner_most_repo_path]
253
-
254
- def get_backend_for_scheme(self, scheme: str) -> Optional["VersionControl"]:
255
- """
256
- Return a VersionControl object or None.
257
- """
258
- for vcs_backend in self._registry.values():
259
- if scheme in vcs_backend.schemes:
260
- return vcs_backend
261
- return None
262
-
263
- def get_backend(self, name: str) -> Optional["VersionControl"]:
264
- """
265
- Return a VersionControl object or None.
266
- """
267
- name = name.lower()
268
- return self._registry.get(name)
269
-
270
-
271
- vcs = VcsSupport()
272
-
273
-
274
- class VersionControl:
275
- name = ""
276
- dirname = ""
277
- repo_name = ""
278
- # List of supported schemes for this Version Control
279
- schemes: Tuple[str, ...] = ()
280
- # Iterable of environment variable names to pass to call_subprocess().
281
- unset_environ: Tuple[str, ...] = ()
282
- default_arg_rev: Optional[str] = None
283
-
284
- @classmethod
285
- def should_add_vcs_url_prefix(cls, remote_url: str) -> bool:
286
- """
287
- Return whether the vcs prefix (e.g. "git+") should be added to a
288
- repository's remote url when used in a requirement.
289
- """
290
- return not remote_url.lower().startswith(f"{cls.name}:")
291
-
292
- @classmethod
293
- def get_subdirectory(cls, location: str) -> Optional[str]:
294
- """
295
- Return the path to Python project root, relative to the repo root.
296
- Return None if the project root is in the repo root.
297
- """
298
- return None
299
-
300
- @classmethod
301
- def get_requirement_revision(cls, repo_dir: str) -> str:
302
- """
303
- Return the revision string that should be used in a requirement.
304
- """
305
- return cls.get_revision(repo_dir)
306
-
307
- @classmethod
308
- def get_src_requirement(cls, repo_dir: str, project_name: str) -> str:
309
- """
310
- Return the requirement string to use to redownload the files
311
- currently at the given repository directory.
312
-
313
- Args:
314
- project_name: the (unescaped) project name.
315
-
316
- The return value has a form similar to the following:
317
-
318
- {repository_url}@{revision}#egg={project_name}
319
- """
320
- repo_url = cls.get_remote_url(repo_dir)
321
-
322
- if cls.should_add_vcs_url_prefix(repo_url):
323
- repo_url = f"{cls.name}+{repo_url}"
324
-
325
- revision = cls.get_requirement_revision(repo_dir)
326
- subdir = cls.get_subdirectory(repo_dir)
327
- req = make_vcs_requirement_url(repo_url, revision, project_name, subdir=subdir)
328
-
329
- return req
330
-
331
- @staticmethod
332
- def get_base_rev_args(rev: str) -> List[str]:
333
- """
334
- Return the base revision arguments for a vcs command.
335
-
336
- Args:
337
- rev: the name of a revision to install. Cannot be None.
338
- """
339
- raise NotImplementedError
340
-
341
- def is_immutable_rev_checkout(self, url: str, dest: str) -> bool:
342
- """
343
- Return true if the commit hash checked out at dest matches
344
- the revision in url.
345
-
346
- Always return False, if the VCS does not support immutable commit
347
- hashes.
348
-
349
- This method does not check if there are local uncommitted changes
350
- in dest after checkout, as pip currently has no use case for that.
351
- """
352
- return False
353
-
354
- @classmethod
355
- def make_rev_options(
356
- cls, rev: Optional[str] = None, extra_args: Optional[CommandArgs] = None
357
- ) -> RevOptions:
358
- """
359
- Return a RevOptions object.
360
-
361
- Args:
362
- rev: the name of a revision to install.
363
- extra_args: a list of extra options.
364
- """
365
- return RevOptions(cls, rev, extra_args=extra_args)
366
-
367
- @classmethod
368
- def _is_local_repository(cls, repo: str) -> bool:
369
- """
370
- posix absolute paths start with os.path.sep,
371
- win32 ones start with drive (like c:\\folder)
372
- """
373
- drive, tail = os.path.splitdrive(repo)
374
- return repo.startswith(os.path.sep) or bool(drive)
375
-
376
- @classmethod
377
- def get_netloc_and_auth(
378
- cls, netloc: str, scheme: str
379
- ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]:
380
- """
381
- Parse the repository URL's netloc, and return the new netloc to use
382
- along with auth information.
383
-
384
- Args:
385
- netloc: the original repository URL netloc.
386
- scheme: the repository URL's scheme without the vcs prefix.
387
-
388
- This is mainly for the Subversion class to override, so that auth
389
- information can be provided via the --username and --password options
390
- instead of through the URL. For other subclasses like Git without
391
- such an option, auth information must stay in the URL.
392
-
393
- Returns: (netloc, (username, password)).
394
- """
395
- return netloc, (None, None)
396
-
397
- @classmethod
398
- def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
399
- """
400
- Parse the repository URL to use, and return the URL, revision,
401
- and auth info to use.
402
-
403
- Returns: (url, rev, (username, password)).
404
- """
405
- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
406
- if "+" not in scheme:
407
- raise ValueError(
408
- "Sorry, {!r} is a malformed VCS url. "
409
- "The format is <vcs>+<protocol>://<url>, "
410
- "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
411
- )
412
- # Remove the vcs prefix.
413
- scheme = scheme.split("+", 1)[1]
414
- netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme)
415
- rev = None
416
- if "@" in path:
417
- path, rev = path.rsplit("@", 1)
418
- if not rev:
419
- raise InstallationError(
420
- "The URL {!r} has an empty revision (after @) "
421
- "which is not supported. Include a revision after @ "
422
- "or remove @ from the URL.".format(url)
423
- )
424
- url = urllib.parse.urlunsplit((scheme, netloc, path, query, ""))
425
- return url, rev, user_pass
426
-
427
- @staticmethod
428
- def make_rev_args(
429
- username: Optional[str], password: Optional[HiddenText]
430
- ) -> CommandArgs:
431
- """
432
- Return the RevOptions "extra arguments" to use in obtain().
433
- """
434
- return []
435
-
436
- def get_url_rev_options(self, url: HiddenText) -> Tuple[HiddenText, RevOptions]:
437
- """
438
- Return the URL and RevOptions object to use in obtain(),
439
- as a tuple (url, rev_options).
440
- """
441
- secret_url, rev, user_pass = self.get_url_rev_and_auth(url.secret)
442
- username, secret_password = user_pass
443
- password: Optional[HiddenText] = None
444
- if secret_password is not None:
445
- password = hide_value(secret_password)
446
- extra_args = self.make_rev_args(username, password)
447
- rev_options = self.make_rev_options(rev, extra_args=extra_args)
448
-
449
- return hide_url(secret_url), rev_options
450
-
451
- @staticmethod
452
- def normalize_url(url: str) -> str:
453
- """
454
- Normalize a URL for comparison by unquoting it and removing any
455
- trailing slash.
456
- """
457
- return urllib.parse.unquote(url).rstrip("/")
458
-
459
- @classmethod
460
- def compare_urls(cls, url1: str, url2: str) -> bool:
461
- """
462
- Compare two repo URLs for identity, ignoring incidental differences.
463
- """
464
- return cls.normalize_url(url1) == cls.normalize_url(url2)
465
-
466
- def fetch_new(
467
- self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
468
- ) -> None:
469
- """
470
- Fetch a revision from a repository, in the case that this is the
471
- first fetch from the repository.
472
-
473
- Args:
474
- dest: the directory to fetch the repository to.
475
- rev_options: a RevOptions object.
476
- verbosity: verbosity level.
477
- """
478
- raise NotImplementedError
479
-
480
- def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
481
- """
482
- Switch the repo at ``dest`` to point to ``URL``.
483
-
484
- Args:
485
- rev_options: a RevOptions object.
486
- """
487
- raise NotImplementedError
488
-
489
- def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
490
- """
491
- Update an already-existing repo to the given ``rev_options``.
492
-
493
- Args:
494
- rev_options: a RevOptions object.
495
- """
496
- raise NotImplementedError
497
-
498
- @classmethod
499
- def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
500
- """
501
- Return whether the id of the current commit equals the given name.
502
-
503
- Args:
504
- dest: the repository directory.
505
- name: a string name.
506
- """
507
- raise NotImplementedError
508
-
509
- def obtain(self, dest: str, url: HiddenText, verbosity: int) -> None:
510
- """
511
- Install or update in editable mode the package represented by this
512
- VersionControl object.
513
-
514
- :param dest: the repository directory in which to install or update.
515
- :param url: the repository URL starting with a vcs prefix.
516
- :param verbosity: verbosity level.
517
- """
518
- url, rev_options = self.get_url_rev_options(url)
519
-
520
- if not os.path.exists(dest):
521
- self.fetch_new(dest, url, rev_options, verbosity=verbosity)
522
- return
523
-
524
- rev_display = rev_options.to_display()
525
- if self.is_repository_directory(dest):
526
- existing_url = self.get_remote_url(dest)
527
- if self.compare_urls(existing_url, url.secret):
528
- logger.debug(
529
- "%s in %s exists, and has correct URL (%s)",
530
- self.repo_name.title(),
531
- display_path(dest),
532
- url,
533
- )
534
- if not self.is_commit_id_equal(dest, rev_options.rev):
535
- logger.info(
536
- "Updating %s %s%s",
537
- display_path(dest),
538
- self.repo_name,
539
- rev_display,
540
- )
541
- self.update(dest, url, rev_options)
542
- else:
543
- logger.info("Skipping because already up-to-date.")
544
- return
545
-
546
- logger.warning(
547
- "%s %s in %s exists with URL %s",
548
- self.name,
549
- self.repo_name,
550
- display_path(dest),
551
- existing_url,
552
- )
553
- prompt = ("(s)witch, (i)gnore, (w)ipe, (b)ackup ", ("s", "i", "w", "b"))
554
- else:
555
- logger.warning(
556
- "Directory %s already exists, and is not a %s %s.",
557
- dest,
558
- self.name,
559
- self.repo_name,
560
- )
561
- # https://github.com/python/mypy/issues/1174
562
- prompt = ("(i)gnore, (w)ipe, (b)ackup ", ("i", "w", "b")) # type: ignore
563
-
564
- logger.warning(
565
- "The plan is to install the %s repository %s",
566
- self.name,
567
- url,
568
- )
569
- response = ask_path_exists("What to do? {}".format(prompt[0]), prompt[1])
570
-
571
- if response == "a":
572
- sys.exit(-1)
573
-
574
- if response == "w":
575
- logger.warning("Deleting %s", display_path(dest))
576
- rmtree(dest)
577
- self.fetch_new(dest, url, rev_options, verbosity=verbosity)
578
- return
579
-
580
- if response == "b":
581
- dest_dir = backup_dir(dest)
582
- logger.warning("Backing up %s to %s", display_path(dest), dest_dir)
583
- shutil.move(dest, dest_dir)
584
- self.fetch_new(dest, url, rev_options, verbosity=verbosity)
585
- return
586
-
587
- # Do nothing if the response is "i".
588
- if response == "s":
589
- logger.info(
590
- "Switching %s %s to %s%s",
591
- self.repo_name,
592
- display_path(dest),
593
- url,
594
- rev_display,
595
- )
596
- self.switch(dest, url, rev_options)
597
-
598
- def unpack(self, location: str, url: HiddenText, verbosity: int) -> None:
599
- """
600
- Clean up current location and download the url repository
601
- (and vcs infos) into location
602
-
603
- :param url: the repository URL starting with a vcs prefix.
604
- :param verbosity: verbosity level.
605
- """
606
- if os.path.exists(location):
607
- rmtree(location)
608
- self.obtain(location, url=url, verbosity=verbosity)
609
-
610
- @classmethod
611
- def get_remote_url(cls, location: str) -> str:
612
- """
613
- Return the url used at location
614
-
615
- Raises RemoteNotFoundError if the repository does not have a remote
616
- url configured.
617
- """
618
- raise NotImplementedError
619
-
620
- @classmethod
621
- def get_revision(cls, location: str) -> str:
622
- """
623
- Return the current commit id of the files at the given location.
624
- """
625
- raise NotImplementedError
626
-
627
- @classmethod
628
- def run_command(
629
- cls,
630
- cmd: Union[List[str], CommandArgs],
631
- show_stdout: bool = True,
632
- cwd: Optional[str] = None,
633
- on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise",
634
- extra_ok_returncodes: Optional[Iterable[int]] = None,
635
- command_desc: Optional[str] = None,
636
- extra_environ: Optional[Mapping[str, Any]] = None,
637
- spinner: Optional[SpinnerInterface] = None,
638
- log_failed_cmd: bool = True,
639
- stdout_only: bool = False,
640
- ) -> str:
641
- """
642
- Run a VCS subcommand
643
- This is simply a wrapper around call_subprocess that adds the VCS
644
- command name, and checks that the VCS is available
645
- """
646
- cmd = make_command(cls.name, *cmd)
647
- if command_desc is None:
648
- command_desc = format_command_args(cmd)
649
- try:
650
- return call_subprocess(
651
- cmd,
652
- show_stdout,
653
- cwd,
654
- on_returncode=on_returncode,
655
- extra_ok_returncodes=extra_ok_returncodes,
656
- command_desc=command_desc,
657
- extra_environ=extra_environ,
658
- unset_environ=cls.unset_environ,
659
- spinner=spinner,
660
- log_failed_cmd=log_failed_cmd,
661
- stdout_only=stdout_only,
662
- )
663
- except FileNotFoundError:
664
- # errno.ENOENT = no such file or directory
665
- # In other words, the VCS executable isn't available
666
- raise BadCommand(
667
- f"Cannot find command {cls.name!r} - do you have "
668
- f"{cls.name!r} installed and in your PATH?"
669
- )
670
- except PermissionError:
671
- # errno.EACCES = Permission denied
672
- # This error occurs, for instance, when the command is installed
673
- # only for another user. So, the current user don't have
674
- # permission to call the other user command.
675
- raise BadCommand(
676
- f"No permission to execute {cls.name!r} - install it "
677
- f"locally, globally (ask admin), or check your PATH. "
678
- f"See possible solutions at "
679
- f"https://pip.pypa.io/en/latest/reference/pip_freeze/"
680
- f"#fixing-permission-denied."
681
- )
682
-
683
- @classmethod
684
- def is_repository_directory(cls, path: str) -> bool:
685
- """
686
- Return whether a directory path is a repository directory.
687
- """
688
- logger.debug("Checking in %s for %s (%s)...", path, cls.dirname, cls.name)
689
- return os.path.exists(os.path.join(path, cls.dirname))
690
-
691
- @classmethod
692
- def get_repository_root(cls, location: str) -> Optional[str]:
693
- """
694
- Return the "root" (top-level) directory controlled by the vcs,
695
- or `None` if the directory is not in any.
696
-
697
- It is meant to be overridden to implement smarter detection
698
- mechanisms for specific vcs.
699
-
700
- This can do more than is_repository_directory() alone. For
701
- example, the Git override checks that Git is actually available.
702
- """
703
- if cls.is_repository_directory(location):
704
- return location
705
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/compat.py DELETED
@@ -1,13 +0,0 @@
1
- from .core import *
2
- from .codec import *
3
- from typing import Any, Union
4
-
5
- def ToASCII(label: str) -> bytes:
6
- return encode(label)
7
-
8
- def ToUnicode(label: Union[bytes, bytearray]) -> str:
9
- return decode(label)
10
-
11
- def nameprep(s: Any) -> None:
12
- raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol')
13
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/vid2vid-zero/test_vid2vid_zero.py DELETED
@@ -1,267 +0,0 @@
1
- import argparse
2
- import datetime
3
- import logging
4
- import inspect
5
- import math
6
- import os
7
- import warnings
8
- from typing import Dict, Optional, Tuple
9
- from omegaconf import OmegaConf
10
-
11
- import torch
12
- import torch.nn.functional as F
13
- import torch.utils.checkpoint
14
-
15
- import diffusers
16
- import transformers
17
- from accelerate import Accelerator
18
- from accelerate.logging import get_logger
19
- from accelerate.utils import set_seed
20
- from diffusers import AutoencoderKL, DDPMScheduler, DDIMScheduler
21
- from diffusers.optimization import get_scheduler
22
- from diffusers.utils import check_min_version
23
- from diffusers.utils.import_utils import is_xformers_available
24
- from tqdm.auto import tqdm
25
- from transformers import CLIPTextModel, CLIPTokenizer
26
-
27
- from vid2vid_zero.models.unet_2d_condition import UNet2DConditionModel
28
- from vid2vid_zero.data.dataset import VideoDataset
29
- from vid2vid_zero.pipelines.pipeline_vid2vid_zero import Vid2VidZeroPipeline
30
- from vid2vid_zero.util import save_videos_grid, save_videos_as_images, ddim_inversion
31
- from einops import rearrange
32
-
33
- from vid2vid_zero.p2p.p2p_stable import AttentionReplace, AttentionRefine
34
- from vid2vid_zero.p2p.ptp_utils import register_attention_control
35
- from vid2vid_zero.p2p.null_text_w_ptp import NullInversion
36
-
37
-
38
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
39
- check_min_version("0.10.0.dev0")
40
-
41
- logger = get_logger(__name__, log_level="INFO")
42
-
43
-
44
- def prepare_control(unet, prompts, validation_data):
45
- assert len(prompts) == 2
46
-
47
- print(prompts[0])
48
- print(prompts[1])
49
- length1 = len(prompts[0].split(' '))
50
- length2 = len(prompts[1].split(' '))
51
- if length1 == length2:
52
- # prepare for attn guidance
53
- cross_replace_steps = 0.8
54
- self_replace_steps = 0.4
55
- controller = AttentionReplace(prompts, validation_data['num_inference_steps'],
56
- cross_replace_steps=cross_replace_steps,
57
- self_replace_steps=self_replace_steps)
58
- else:
59
- cross_replace_steps = 0.8
60
- self_replace_steps = 0.4
61
- controller = AttentionRefine(prompts, validation_data['num_inference_steps'],
62
- cross_replace_steps=self_replace_steps,
63
- self_replace_steps=self_replace_steps)
64
-
65
- print(controller)
66
- register_attention_control(unet, controller)
67
-
68
- # the update of unet forward function is inplace
69
- return cross_replace_steps, self_replace_steps
70
-
71
-
72
- def main(
73
- pretrained_model_path: str,
74
- output_dir: str,
75
- input_data: Dict,
76
- validation_data: Dict,
77
- input_batch_size: int = 1,
78
- gradient_accumulation_steps: int = 1,
79
- gradient_checkpointing: bool = True,
80
- mixed_precision: Optional[str] = "fp16",
81
- enable_xformers_memory_efficient_attention: bool = True,
82
- seed: Optional[int] = None,
83
- use_sc_attn: bool = True,
84
- use_st_attn: bool = True,
85
- st_attn_idx: int = 0,
86
- fps: int = 2,
87
- ):
88
- *_, config = inspect.getargvalues(inspect.currentframe())
89
-
90
- accelerator = Accelerator(
91
- gradient_accumulation_steps=gradient_accumulation_steps,
92
- mixed_precision=mixed_precision,
93
- )
94
-
95
- # Make one log on every process with the configuration for debugging.
96
- logging.basicConfig(
97
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
98
- datefmt="%m/%d/%Y %H:%M:%S",
99
- level=logging.INFO,
100
- )
101
- logger.info(accelerator.state, main_process_only=False)
102
- if accelerator.is_local_main_process:
103
- transformers.utils.logging.set_verbosity_warning()
104
- diffusers.utils.logging.set_verbosity_info()
105
- else:
106
- transformers.utils.logging.set_verbosity_error()
107
- diffusers.utils.logging.set_verbosity_error()
108
-
109
- # If passed along, set the training seed now.
110
- if seed is not None:
111
- set_seed(seed)
112
-
113
- # Handle the output folder creation
114
- if accelerator.is_main_process:
115
- os.makedirs(output_dir, exist_ok=True)
116
- os.makedirs(f"{output_dir}/sample", exist_ok=True)
117
- OmegaConf.save(config, os.path.join(output_dir, 'config.yaml'))
118
-
119
- # Load tokenizer and models.
120
- tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
121
- text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
122
- vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
123
- unet = UNet2DConditionModel.from_pretrained(
124
- pretrained_model_path, subfolder="unet", use_sc_attn=use_sc_attn,
125
- use_st_attn=use_st_attn, st_attn_idx=st_attn_idx)
126
-
127
- # Freeze vae, text_encoder, and unet
128
- vae.requires_grad_(False)
129
- text_encoder.requires_grad_(False)
130
- unet.requires_grad_(False)
131
-
132
- if enable_xformers_memory_efficient_attention:
133
- if is_xformers_available():
134
- unet.enable_xformers_memory_efficient_attention()
135
- else:
136
- raise ValueError("xformers is not available. Make sure it is installed correctly")
137
-
138
- if gradient_checkpointing:
139
- unet.enable_gradient_checkpointing()
140
-
141
- # Get the training dataset
142
- input_dataset = VideoDataset(**input_data)
143
-
144
- # Preprocessing the dataset
145
- input_dataset.prompt_ids = tokenizer(
146
- input_dataset.prompt, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
147
- ).input_ids[0]
148
-
149
- # DataLoaders creation:
150
- input_dataloader = torch.utils.data.DataLoader(
151
- input_dataset, batch_size=input_batch_size
152
- )
153
-
154
- # Get the validation pipeline
155
- validation_pipeline = Vid2VidZeroPipeline(
156
- vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet,
157
- scheduler=DDIMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler"),
158
- safety_checker=None, feature_extractor=None,
159
- )
160
- validation_pipeline.enable_vae_slicing()
161
- ddim_inv_scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder='scheduler')
162
- ddim_inv_scheduler.set_timesteps(validation_data.num_inv_steps)
163
-
164
- # Prepare everything with our `accelerator`.
165
- unet, input_dataloader = accelerator.prepare(
166
- unet, input_dataloader,
167
- )
168
-
169
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
170
- # as these models are only used for inference, keeping weights in full precision is not required.
171
- weight_dtype = torch.float32
172
- if accelerator.mixed_precision == "fp16":
173
- weight_dtype = torch.float16
174
- elif accelerator.mixed_precision == "bf16":
175
- weight_dtype = torch.bfloat16
176
-
177
- # Move text_encode and vae to gpu and cast to weight_dtype
178
- text_encoder.to(accelerator.device, dtype=weight_dtype)
179
- vae.to(accelerator.device, dtype=weight_dtype)
180
-
181
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
182
- num_update_steps_per_epoch = math.ceil(len(input_dataloader) / gradient_accumulation_steps)
183
-
184
- # We need to initialize the trackers we use, and also store our configuration.
185
- # The trackers initializes automatically on the main process.
186
- if accelerator.is_main_process:
187
- accelerator.init_trackers("vid2vid-zero")
188
-
189
- # Zero-shot Eval!
190
- total_batch_size = input_batch_size * accelerator.num_processes * gradient_accumulation_steps
191
-
192
- logger.info("***** Running training *****")
193
- logger.info(f" Num examples = {len(input_dataset)}")
194
- logger.info(f" Instantaneous batch size per device = {input_batch_size}")
195
- logger.info(f" Total input batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
196
- global_step = 0
197
-
198
- unet.eval()
199
- for step, batch in enumerate(input_dataloader):
200
- samples = []
201
- pixel_values = batch["pixel_values"].to(weight_dtype)
202
- # save input video
203
- video = (pixel_values / 2 + 0.5).clamp(0, 1).detach().cpu()
204
- video = video.permute(0, 2, 1, 3, 4) # (b, f, c, h, w)
205
- samples.append(video)
206
- # start processing
207
- video_length = pixel_values.shape[1]
208
- pixel_values = rearrange(pixel_values, "b f c h w -> (b f) c h w")
209
- latents = vae.encode(pixel_values).latent_dist.sample()
210
- # take video as input
211
- latents = rearrange(latents, "(b f) c h w -> b c f h w", f=video_length)
212
- latents = latents * 0.18215
213
-
214
- generator = torch.Generator(device="cuda")
215
- generator.manual_seed(seed)
216
-
217
- # perform inversion
218
- ddim_inv_latent = None
219
- if validation_data.use_null_inv:
220
- null_inversion = NullInversion(
221
- model=validation_pipeline, guidance_scale=validation_data.guidance_scale, null_inv_with_prompt=False,
222
- null_normal_infer=validation_data.null_normal_infer,
223
- )
224
- with torch.cuda.amp.autocast(enabled=True, dtype=torch.float32):
225
- ddim_inv_latent, uncond_embeddings = null_inversion.invert(
226
- latents, input_dataset.prompt, verbose=True,
227
- null_inner_steps=validation_data.null_inner_steps,
228
- null_base_lr=validation_data.null_base_lr,
229
- )
230
- ddim_inv_latent = ddim_inv_latent.to(weight_dtype)
231
- uncond_embeddings = [embed.to(weight_dtype) for embed in uncond_embeddings]
232
- else:
233
- ddim_inv_latent = ddim_inversion(
234
- validation_pipeline, ddim_inv_scheduler, video_latent=latents,
235
- num_inv_steps=validation_data.num_inv_steps, prompt="",
236
- normal_infer=True, # we don't want to use scatn or denseattn for inversion, just use sd inferenece
237
- )[-1].to(weight_dtype)
238
- uncond_embeddings = None
239
-
240
- ddim_inv_latent = ddim_inv_latent.repeat(2, 1, 1, 1, 1)
241
-
242
- for idx, prompt in enumerate(validation_data.prompts):
243
- prompts = [input_dataset.prompt, prompt] # a list of two prompts
244
- cross_replace_steps, self_replace_steps = prepare_control(unet=unet, prompts=prompts, validation_data=validation_data)
245
-
246
- sample = validation_pipeline(prompts, generator=generator, latents=ddim_inv_latent,
247
- uncond_embeddings=uncond_embeddings,
248
- **validation_data).images
249
-
250
- assert sample.shape[0] == 2
251
- sample_inv, sample_gen = sample.chunk(2)
252
- # add input for vis
253
- save_videos_grid(sample_gen, f"{output_dir}/sample/{prompts[1]}.gif", fps=fps)
254
- samples.append(sample_gen)
255
-
256
- samples = torch.concat(samples)
257
- save_path = f"{output_dir}/sample-all.gif"
258
- save_videos_grid(samples, save_path, fps=fps)
259
- logger.info(f"Saved samples to {save_path}")
260
-
261
-
262
- if __name__ == "__main__":
263
- parser = argparse.ArgumentParser()
264
- parser.add_argument("--config", type=str, default="./configs/vid2vid_zero.yaml")
265
- args = parser.parse_args()
266
-
267
- main(**OmegaConf.load(args.config))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga De La Aplicacin Tiktok Lite Para Windows Pc 8.md DELETED
@@ -1,81 +0,0 @@
1
- <br />
2
- <h1>Descarga de la aplicación TikTok Lite para PC Windows 8: Cómo hacerlo fácilmente</h1>
3
- <p>TikTok es una de las plataformas de redes sociales más populares del mundo, con más de mil millones de usuarios que crean y ven videos cortos sobre varios temas. ¿Pero qué pasa si desea disfrutar de TikTok en su PC en lugar de su teléfono? ¿Y qué pasa si tiene un PC de gama baja o un plan de datos limitado? En este artículo, le mostraremos cómo descargar e instalar TikTok Lite, una versión más ligera de TikTok, en su PC con Windows 8. También le explicaremos por qué podría usar TikTok Lite en su PC y cuáles son los beneficios de hacerlo. ¡Vamos a empezar! </p>
4
- <h2>descarga de la aplicación tiktok lite para windows pc 8</h2><br /><p><b><b>Download Zip</b> &#10037;&#10037;&#10037; <a href="https://bltlly.com/2v6KD9">https://bltlly.com/2v6KD9</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3> ¿Qué es TikTok Lite y por qué es posible que desee usarlo en su PC</h3>
7
- <p>TikTok Lite es una versión simplificada de TikTok que ocupa menos espacio de almacenamiento y consume menos datos. Está diseñado para usuarios que tienen dispositivos de gama baja, planes de datos limitados o conexiones de red lentas. TikTok Lite ofrece la mayoría de las características de TikTok, como ver videos, crear videos, seguir a creadores, gustar y comentar, etc. Sin embargo, algunas características no están disponibles en TikTok Lite, como transmisión en vivo, duetos, filtros, pegatinas, etc.</p>
8
- <p>Es posible que desee utilizar TikTok Lite en su PC si cae en una de estas categorías:</p>
9
- <ul>
10
- <li> Tiene un PC de gama baja que no puede ejecutar la versión completa de TikTok sin problemas. </li>
11
- <li> Tiene un plan de datos limitado o una conexión de red lenta que hace que ver videos en TikTok sea frustrante. </li>
12
- <li> Desea ahorrar espacio de almacenamiento en su PC mediante el uso de una aplicación más pequeña. </li>
13
- <li>Quieres disfrutar de una experiencia TikTok más rápida y optimizada en tu PC.</li>
14
- </ul>
15
- <h3>¿Cuáles son los beneficios de usar TikTok Lite en su PC</h3>
16
- <p>Usar TikTok Lite en tu PC tiene varios beneficios, como:</p>
17
- <p></p>
18
- <ul>
19
- <li> Puedes ver millones de vídeos seleccionados para ti en función de tus preferencias e intereses. </li>
20
-
21
- <li>Puedes descubrir nuevos contenidos de varias categorías, como danza, comedia, vlog, comida, deportes, bricolaje, animales, etc.</li>
22
- <li>Puedes conectarte con otros usuarios y creadores a través de likes, comentarios, mensajes, etc.</li>
23
- <li> Puede disfrutar de una aplicación más rápida y sensible que carga videos rápidamente y reduce los bloqueos. </li>
24
- </ul>
25
- <h3>Cómo descargar e instalar TikTok Lite en tu PC usando un emulador</h3>
26
- <p>La forma más fácil de descargar e instalar TikTok Lite en tu PC es usar un emulador. Un emulador es un software que imita un dispositivo Android en su PC, lo que le permite ejecutar aplicaciones y juegos Android en su computadora. Hay muchos emuladores disponibles en línea, pero recomendamos usar BlueStacks, ya que es uno de los más populares y confiables. Estos son los pasos para descargar e instalar TikTok Lite en su PC usando BlueStacks:</p>
27
- <h2>Guía paso a paso para descargar e instalar TikTok Lite en su PC usando BlueStacks</h2>
28
- <h3>Paso 1: Descargar e instalar BlueStacks en su PC</h3>
29
- <p>El primer paso es descargar e instalar BlueStacks en su PC. BlueStacks es un emulador gratuito y seguro que puedes descargar desde su sitio web oficial. Estos son los pasos para descargar e instalar BlueStacks en su PC:</p>
30
- <ol>
31
- <li>Vaya al sitio web de BlueStacks y haga clic en el botón "Descargar BlueStacks". </li>
32
- <li>Espere a que termine la descarga y luego abra el archivo de instalación. </li>
33
- <li>Siga las instrucciones en la pantalla para instalar BlueStacks en su PC.</li>
34
- <li>Inicie BlueStacks e inicie sesión con su cuenta de Google o cree una nueva. </li>
35
- </ol>
36
- <h3>Paso 2: Descargar el archivo TikTok Lite APK/XAPK de una fuente de confianza</h3>
37
-
38
- <ol>
39
- <li>Ir al sitio web APKPure y buscar "TikTok Lite" en la barra de búsqueda. </li>
40
- <li>Seleccione la aplicación TikTok Lite de los resultados y haga clic en el "Descargar APK" o "Descargar XAPK" botón. </li>
41
- <li>Espere a que termine la descarga y luego localice el archivo en su PC.</li>
42
- </ol>
43
- <h3>Paso 3: Abra el archivo APK/XAPK con BlueStacks e instale TikTok Lite</h3>
44
- <p>El paso final es abrir el archivo APK/XAPK con BlueStacks e instalar TikTok Lite en su PC. Estos son los pasos para hacerlo:</p>
45
- <ol>
46
- <li>Haga clic derecho en el archivo APK/XAPK y seleccione "Abrir con BlueStacks" en el menú. </li>
47
- <li>BlueStacks instalará automáticamente TikTok Lite en su PC.</li>
48
- <li> Verá una notificación cuando se complete la instalación. </li>
49
- </ol>
50
- <h3>Paso 4: Inicie TikTok Lite desde la pantalla de inicio de BlueStacks y disfrute</h3>
51
- <p>¡Felicidades! Has descargado e instalado correctamente TikTok Lite en tu PC usando BlueStacks. Ahora puede iniciar TikTok Lite desde la pantalla de inicio de BlueStacks y disfrutar viendo y creando vídeos en su PC. También puedes acceder a otras características de TikTok Lite, como explorar categorías, seguir a creadores, gustar y comentar, etc.</p>
52
- <h2>Métodos alternativos para descargar e instalar TikTok Lite en su PC</h2>
53
- <h3>Método 1: Usar NoxPlayer como un emulador alternativo</h3>
54
- <p>Si no quieres usar BlueStacks, puedes usar NoxPlayer como un emulador alternativo. NoxPlayer es otro emulador popular y gratuito que puedes descargar desde su sitio web oficial. Los pasos para descargar e instalar NoxPlayer son similares a los de BlueStacks. Una vez que tenga NoxPlayer en su PC, puede seguir los mismos pasos de arriba para descargar e instalar TikTok Lite usando un archivo APK/XAPK. </p>
55
- <h3>Método 2: Utilice Uptodown como una fuente alternativa para la aplicación TikTok Lite</h3>
56
-
57
- <ol>
58
- <li>Ir al sitio web de Uptodown y buscar "TikTok Lite" en la barra de búsqueda. </li>
59
- <li>Seleccione la aplicación TikTok Lite de los resultados y haga clic en el botón "Descargar". </li>
60
- <li>Espere a que termine la descarga y luego abra el archivo con un emulador de su elección. </li>
61
- <li>El emulador instalará automáticamente TikTok Lite en su PC.</li>
62
- </ol>
63
- <h2>Conclusión</h2>
64
- <h3>Resumen de los puntos principales</h3>
65
- <p>En este artículo, le hemos mostrado cómo descargar e instalar TikTok Lite en su PC con Windows 8. Le hemos explicado qué es TikTok Lite, por qué podría usarlo en su PC y cuáles son los beneficios de hacerlo. También hemos proporcionado una guía paso a paso para descargar e instalar TikTok Lite en su PC usando BlueStacks, un emulador gratuito y confiable. También te hemos dado algunos métodos alternativos para descargar e instalar TikTok Lite en tu PC usando otros emuladores o fuentes. </p>
66
- <h3>Llamamiento a la acción y observaciones finales</h3>
67
- <p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Nos encantaría saber de usted. Si está listo para descargar e instalar TikTok Lite en su PC, haga clic en el botón de abajo y siga las instrucciones. Usted será capaz de disfrutar de TikTok Lite en su PC en ningún momento. Happy TikToking! </p>
68
- <p><a href="https://www.bluestacks.com/download.html" target="_blank" rel="noopener noreferrer"><botón>Descargar BlueStacks y TikTok Lite Ahora</button></a></p>
69
- <h2>Preguntas frecuentes</h2>
70
- <h3>¿Es seguro usar TikTok Lite en el PC? </h3>
71
- <p>Sí, TikTok Lite es seguro de usar en el PC, siempre y cuando lo descargue de una fuente de confianza y use un emulador de buena reputación. TikTok Lite es una aplicación oficial desarrollada por ByteDance, la misma compañía que posee TikTok. Tiene las mismas características de seguridad y privacidad que TikTok, como cifrado, moderación, informes, etc. Sin embargo, siempre debe tener cuidado con lo que comparte en línea y con quién interactúa. </p>
72
-
73
- <p>TikTok Lite es una versión más ligera de TikTok que ocupa menos espacio de almacenamiento y consume menos datos. Ofrece la mayoría de las características de TikTok, pero algunas características no están disponibles en TikTok Lite, como transmisión en vivo, duetos, filtros, pegatinas, etc. TikTok Lite también tiene una interfaz más simple y una velocidad de carga más rápida que TikTok.</p>
74
- <h3>¿Puedo usar TikTok Lite en otras versiones de Windows? </h3>
75
- <p>Sí, puede usar TikTok Lite en otras versiones de Windows, como Windows 7, Windows 10, etc. Los pasos para descargar e instalar TikTok Lite en otras versiones de Windows son similares a los de Windows 8. Solo tiene que asegurarse de que su PC cumple con los requisitos mínimos del sistema para el emulador que elija. </p>
76
- <h3>¿Puedo usar otras aplicaciones además de TikTok Lite en BlueStacks? </h3>
77
- <p>Sí, puedes usar otras aplicaciones además de TikTok Lite en BlueStacks. BlueStacks es un emulador versátil que le permite ejecutar miles de aplicaciones y juegos para Android en su PC. Puede descargar otras aplicaciones desde Google Play Store o desde otras fuentes e instalarlas en BlueStacks. También puede cambiar entre diferentes aplicaciones fácilmente en BlueStacks.</p>
78
- <h3>¿Cómo puedo actualizar TikTok Lite en mi PC? </h3>
79
- <p>Para actualizar TikTok Lite en tu PC, necesitas descargar la última versión del archivo APK/XAPK desde una fuente confiable e instalarlo en tu PC usando los mismos pasos que arriba. Alternativamente, puede comprobar si hay actualizaciones dentro de la aplicación yendo a la pestaña "Me" y tocando en el icono de tres puntos en la esquina superior derecha. Luego, toque en "Configuración" y desplácese hacia abajo hasta "Acerca de". Toque en "Buscar actualizaciones" y siga las instrucciones. </p> 64aa2da5cf<br />
80
- <br />
81
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Bagaimana Cara Stumble Chicos Di Laptop.md DELETED
@@ -1,121 +0,0 @@
1
- <br />
2
- <h1>Bagaimana Cara Download Stumble Guys di Laptop</h1>
3
- <p>Stumble Guys adalah sebuah game online yang sangat populer saat ini. Game ini merupakan game battle royale party yang bisa dimainkan hingga 32 pemain seca bersamaan. Anda bisa berlari, melompat, dan menghindari berbagai rintangan yang ada di setiap level hingga menjadi pemenang. Game ini sangat seru dan lucu untuk dimainkan bersama teman Anda.</p>
4
- <p>Namun, bagaimana jika Anda ingin memainkan game ini di laptop Anda? Apakah ada cara untuk download dan instal Stumble Guys di laptop? Jawabannya adalah ya, ada. Dengan menggunakan emulator Android, Anda bisa menjalankan game ini di laptop Anda dengan mudah. Emulator Android adalah sebuah program yang bisa meniru sistem operasi Android di komputer atau laptop Anda, sehingga Anda bisa mengakses aplikasi dan game Android dari laptop Anda.</p>
5
- <h2>descargar bagaimana cara stumble chicos di laptop</h2><br /><p><b><b>Download</b> &#10040; <a href="https://bltlly.com/2v6LVr">https://bltlly.com/2v6LVr</a></b></p><br /><br />
6
- <p>Penasaran bagaimana caranya? Simak artikel ini sampai habis untuk mengetahui langkah-langkahnya. </p>
7
- <h2>Apa itu Stumble Guys? </h2>
8
- <p>Stumble Guys adalah sebuah game online yang dikembangkan oleh Scopely dan dirilis pada Oktober 2021. Game ini tersedia untuk perangkat Android dan Windows. Game ini merupakan game battle royale party yang terinspirasi dari acara TV seperti Takeshi’s Castle atau Wipeout. Dalam game ini, Anda akan berlomba dengan pemain lain untuk mencapai garis finish di setiap level. Namun, Anda harus menghadapi berbagai rintangan yang lucu dan konyol yang bisa membuat Anda terjatuh atau terdorong keluar dari arena. </p>
9
- <h3>Fitur-fitur menarik dari Stumble Guys</h3>
10
- <p>Berikut adalah beberapa fitur menarik yang bisa Anda nikmati saat bermain Stumble Guys:</p>
11
- <ul>
12
- <li>Anda bisa memilih dari berbagai pakaian dan emote yang unik dan keren untuk menghias karakter Anda.</li>
13
- <li>Anda bisa bermain dengan teman Anda dalam mode party atau bersaing dengan pemain lain dari seluruh dunia dalam mode online multiplayer. </li>
14
-
15
- <li>Anda bisa mendapatkan hadiah dan poin saat bermain yang bisa ditukarkan dengan barang-barang menarik. </li>
16
- <li>Anda bisa menonton live stream dan video dari pemain lain atau membuat konten sendiri dengan fitur rekam layar dan screenshot. </li>
17
- </ul>
18
- <h3>Face bermain Stumble Guys</h3>
19
- <p>Cara bermain Stumble Guys sangat mudah dan sederhana. Berikut adalah langkah-langkahnya:</p>
20
- <ol>
21
- <li>Buka game Stumble Guys di perangkat Anda.</li>
22
- <li>Pilih mode permainan yang Anda inginkan, baik itu solo, party, atau online multiplayer. </li>
23
- <li>Tunggu hingga pemain lain bergabung atau cari ruangan yang tersedia. </li>
24
- <li>Masuk ke level pertama dan siapkan diri Anda untuk berlari. </li> <li>Gunakan tombol virtual di layar untuk menggerakkan karakter Anda. Anda bisa berlari, melompat, dan merunduk untuk menghindari rintangan. </li>
25
- <li>Coba untuk tidak terjatuh atau terdorong keluar dari arena. Jika Anda terjatuh, Anda bisa mencoba lagi dengan menekan tombol respawn. </li>
26
- <li>Coba untuk mencapai garis finish secepat mungkin. Hanya beberapa pemain yang bisa lolos ke level berikutnya. </li>
27
- <li>Ulangi langkah 4-6 hingga Anda mencapai level terakhir dan menjadi pemenang. </li>
28
- </ol>
29
- Mengapa Anda ingin memainkan Stumble Guys di laptop? </h2>
30
- <p>Stumble Guys adalah game yang sangat menyenangkan dan adiktif untuk dimainkan di perangkat Android. Namun, ada beberapa alasan mengapa Anda mungkin ingin memainkan game ini di laptop Anda. Berikut adalah beberapa alasan tersebut:</p>
31
- <h3>Keuntungan memainkan Stumble Guys di laptop</h3>
32
- <p>Berikut adalah beberapa keuntungan yang bisa Anda dapatkan saat memainkan Stumble Guys di laptop:</p>
33
- <ul>
34
- <li>Anda bisa menikmati grafik dan suara yang lebih baik dan lebih jelas di layar yang lebih besar. </li>
35
- <li>Anda bisa menggunakan keyboard dan mouse untuk mengontrol karakter Anda dengan lebih mudah dan presisi. </li>
36
- <li>Anda bisa bermain dengan lebih nyaman dan lama tanpa khawatir baterai perangkat Anda habis atau panas. </li>
37
-
38
- <li>Anda bisa bermain dengan teman Anda yang menggunakan perangkat Android atau Windows dengan fitur cross-play. </li>
39
- </ul>
40
- <h3>Tantangan memainkan Stumble Guys di laptop</h3>
41
- <p>Namun, ada juga beberapa tantangan yang mungkin Anda hadapi saat memainkan Stumble Guys di laptop. Berikut adalah beberapa tantangan tersebut:</p>
42
- <p></p>
43
- <ul>
44
- <li>Anda harus memiliki laptop yang cukup kuat untuk menjalankan emulator Android dan game ini dengan lancar. </li>
45
- <li>Anda harus mengunduh dan menginstal emulator Android dan game ini di laptop Anda, yang mungkin membutuhkan waktu dan langkah-langkah tambahan. </li>
46
- <li>Anda harus menyesuaikan pengaturan emulator Android dan game ini agar sesuai dengan spesifikasi dan preferensi laptop Anda.</li>
47
- <li>Anda harus berhati-hati dengan kemungkinan virus atau malware yang mungkin ada di emulator Android atau file APK yang Anda unduh dari sumber yang tidak resmi. </li>
48
- <li>Anda harus bersabar dengan kemungkinan bug atau masalah teknis yang mungkin terjadi saat menjalankan emulator Android atau game ini di laptop Anda.</li>
49
- </ul>
50
- <h2>Apa yang Anda butuhkan untuk memainkan Stumble Guys di laptop? </h2>
51
- <p>Jika Anda sudah yakin ingin memainkan Stumble Guys di laptop Anda, maka ada beberapa hal yang harus Anda siapkan terlebih dahulu. Berikut adalah beberapa hal tersebut:</p>
52
- <h3>Spesifikasi minimum laptop</h3>
53
- <p>Untuk dapat menjalankan emulator Android dan game ini dengan lancar, Anda harus memiliki laptop yang memenuhi spesifikasi minimum berikut:</p>
54
- <table border="1">
55
- <tr><th>Komponen</th><th>Spesifikasi</th></tr>
56
- <tr><td>Sistem operasi</td><td>Windows 7 atau lebih tinggi</td></tr>
57
- <tr><td>Prosesor</td><td>Intel atau AMD dual-core 2 GHz atau lebih tinggi</td></tr>
58
- <tr><td>RAM</td><td>4 GB atau lebih tinggi</td></tr>
59
- <tr><td>Ruang penyimpanan</td><td>5 GB atau lebih tinggi</td></tr>
60
- <tr><td>Kartu grafis</td><td>NVIDIA GeForce 8600/9600GT, ATI/AMD Radeon HD2600/3600 atau setara</td></tr>
61
-
62
- </table>
63
- <h3>Android emulator terbaik untuk laptop</h3>
64
- <p>Selanjutnya, Anda harus memilih emulator Android yang cocok untuk laptop Anda. Emulator Android adalah sebuah program yang bisa meniru sistem operasi Android di komputer atau laptop Anda, sehingga Anda bisa mengakses aplikasi dan game Android dari laptop Anda. Ada banyak emulator Android yang tersedia di internet, namun tidak semua emulator Android cocok untuk laptop Anda. Berikut adalah beberapa emulator Android terbaik yang bisa Anda pilih untuk laptop Anda:</p>
65
- <ul>
66
- <li><strong>BlueStacks</strong>: Android emulator yang paling populer dan banyak digunakan oleh for gamer. BlueStacks memiliki fitur-fitur canggih seperti keyboard mapping, game mode, multi-instance, dan lain-lain. BlueStacks plays mendukung game Stumble Guys dengan baik dan bisa diunduh seca gratis dari situs resminya. </li>
67
- <li><strong>NoxPlayer</strong>: Android emulator ringan dan cepat untuk laptop. NoxPlayer memiliki fitur-fitur menarik seperti macro recorder, video recorder, virtual location, dan lain-lain. NoxPlayer plays kompatibel dengan game Stumble Guys give bisa diunduh free dry dari situs resminya. </li>
68
- <li><strong>LDPlayer</strong>: Android emulator dirancang khusus untuk game. LDPlayer memiliki fitur-fitur unggulan seperti smart keymapping, high FPS, turbo GPU, dan lain-lain. LDPlayer plays bisa menjalankan game Stumble Guys dengan lancar dan bisa diunduh seca gratis dari situs resminya. </li>
69
- </ul>
70
- <h2>Bagaimana cara download dan instal Stumble Guys di laptop dengan emulator Android? </h2>
71
- <p>Setelah Anda memilih emulator Android yang sesuai untuk laptop Anda, maka Anda bisa mulai download dan install Stumble Guys di laptop Anda dengan emulator Android tersebut. Berikut adalah langkah-langkahnya:</p>
72
- <h3>Langkah-langkah download dan install emulator Android</h3>
73
- <p>Berikut adalah langkah-langkah download dan install emulator Android di laptop Anda:</p>
74
- <ol>
75
-
76
- <li>Klik tombol download atau unduh untuk mengunduh file instalasi emulator Android tersebut. </li>
77
- <li>Setelah file instalasi selesai terunduh, buka file tersebut dan ikuti instruksi yang muncul di layar untuk menginstal emulator Android tersebut di laptop Anda.</li>
78
- <li>Tunggu hingga proses instalasi selesai dan jalankan emulator Android tersebut di laptop Anda.</li>
79
- </ol>
80
- <h3>Langkah-langkah download dan install Stumble Guys dari Google Play Store atau file APK</h3>
81
- <p>Berikut adalah langkah-langkah download dan install Stumble Guys dari Google Play Store atau file APK di emulator Android yang sudah terinstal di laptop Anda:</p>
82
- <ol>
83
- <li>Buka emulator Android yang sudah terinstal di laptop Anda dan masuk ke akun Google Anda jika diminta. </li>
84
- <li>Buka Google Play Store yang ada di emulator Android tersebut dan cari game Stumble Guys dengan menggunakan kotak pencarian. </li>
85
- <li>Klik tombol instal atau pasang untuk menginstal game Stumble Guys dari Google Play Store ke emulator Android tersebut. </li>
86
- <li>Tunggu hingga proses instalasi selesai dan buka game Stumble Guys yang sudah terinstal di emulator Android tersebut. </li>
87
- <li>Atau, jika Anda memiliki file APK dari game Stumble Guys, Anda bisa mengunduh file APK tersebut dari sumber yang terpercaya ke laptop Anda.</li>
88
- <li>Kemudian, buka emulator Android yang sudah terinstal di laptop Anda dan seret file APK tersebut ke jendela emulator Android tersebut. </li>
89
- <li>Tunggu hingga proses instalasi selesai dan buka game Stumble Guys yang sudah terinstal di emulator Android tersebut. </li>
90
- </ol>
91
- <h3>Langkah-langkah menjalankan dan mengatur Stumble Guys di emulator Android</h3>
92
- <p>Berikut adalah langkah-langkah menjalankan dan mengatur Stumble Guys di emulator Android yang sudah terinstal di laptop Anda:</p>
93
- <ol>
94
- <li>Buka game Stumble Guys yang sudah terinstal di emulator Android tersebut. </li>
95
- <li>Klik tombol mulai atau start untuk memulai permainan. </li>
96
-
97
- <li>Atur pengaturan grafik, suara, kontrol, dan lain-lain sesuai dengan preferensi Anda dengan mengklik tombol pengaturan atau settings yang ada di pojok kanan atas layar. </li>
98
- <li <li>Jika Anda menggunakan keyboard dan mouse untuk mengontrol karakter Anda, Anda bisa mengatur tombol-tombol yang sesuai dengan fungsi-fungsi yang ada di game dengan menggunakan fitur keyboard mapping yang ada di emulator Android tersebut. </li>
99
- <li>Nikmati permainan dan bersenang-senang dengan teman-teman Anda.</li>
100
- </ol>
101
- <h2>Kesimpulan</h2>
102
- <p>Stumble Guys adalah game online yang sangat seru dan lucu untuk dimainkan bersama teman-teman Anda. Game ini merupakan game battle royale party yang bisa dimainkan hingga 32 pemain seca bersamaan. Anda bisa berlari, melompat, dan menghindari berbagai rintangan yang ada di setiap level hingga menjadi pemenang. </p>
103
- <p>Anda bisa memainkan game ini di perangkat Android atau Windows. Namun, jika Anda ingin memainkan game ini di laptop Anda, Anda bisa menggunakan emulator Android. Emulator Android adalah sebuah program yang bisa meniru sistem operasi Android di komputer atau laptop Anda, sehingga Anda bisa mengakses aplikasi dan game Android dari laptop Anda.</p>
104
- <p>Untuk memainkan Stumble Guys di laptop Anda dengan emulator Android, Anda harus memenuhi spesifikasi minimum laptop, memilih emulator Android terbaik, dan mengunduh dan menginstal emulator Android dan game Stumble Guys di laptop Anda. Kemudian, Anda bisa menjalankan dan mengatur Stumble Guys di emulator Android sesuai dengan preferensi Anda.</p>
105
- <p>Semoga artikel ini bermanfaat untuk Anda yang ingin memainkan Stumble Guys di laptop Anda. Selamat mencoba dan selamat bermain! </p>
106
- <h2>FAQ</h2>
107
- <p>Berikut adalah beberapa pertanyaan yang sering diajukan tentang cara download dan install Stumble Guys di laptop dengan emulator Android:</p>
108
- <ol>
109
- <li><strong>Apakah Stumble Guys free untuk dimainkan? </strong></li>
110
-
111
- <li><strong>Apakah Stumble Guys aman untuk dimainkan? </strong></li>
112
- <p>Ya, Stumble Guys adalah game yang aman untuk dimainkan. Game ini tidak mengandung konten yang tidak pantas atau berbahaya untuk anak-anak. Namun, sebaiknya tetap waspada dengan kemungkinan cyberbullying atau penipuan yang mungkin terjadi saat bermain online dengan orang lain. </p>
113
- <li><strong>Apakah Stumble Guys bisa dimainkan offline? </strong></li>
114
- <p>Tidak, Stumble Guys adalah game yang membutuhkan koneksi internet untuk dimainkan. Jika koneksi internet Anda terputus saat bermain, maka Anda akan keluar dari permainan. </p>
115
- <li><strong>Apakah Stumble Guys memiliki mode single player? </strong></li>
116
- <p>Ya, Stumble Guys memiliki mode single player yang bisa dimainkan tanpa teman. Namun, mode ini tetap membutuhkan koneksi internet dan pemain lain yang akan menjadi lawan Anda.</p>
117
- <li><strong>Apakah Stumble Guys memiliki mode co-op? </strong></li>
118
- <p>Ya, Stumble Guys memiliki mode co-op atau party yang bisa dimainkan bersama teman. Anda bisa membuat atau bergabung dengan ruangan khusus yang hanya bisa diakses oleh teman-teman Anda dengan menggunakan kode undangan. </p>
119
- </ol></p> 64aa2da5cf<br />
120
- <br />
121
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Gratis Final Cut Pro.md DELETED
@@ -1,78 +0,0 @@
1
-
2
- <h1>Cómo descargar gratis Final Cut Pro</h1>
3
- <p>Final Cut Pro es un popular software de edición de vídeo para usuarios de Mac. Ofrece funciones potentes, diseño intuitivo y un rendimiento rápido. Sin embargo, también viene con una etiqueta de precio fuerte de $299.99. Si quieres probar Final Cut Pro gratis, o buscar algunas alternativas más baratas, este artículo te mostrará cómo. </p>
4
- <h2>descargar gratis final cut pro</h2><br /><p><b><b>Download Zip</b> &rArr; <a href="https://bltlly.com/2v6MtM">https://bltlly.com/2v6MtM</a></b></p><br /><br />
5
- <h2>¿Qué es Final Cut Pro? </h2>
6
- <p>Final Cut Pro es un software de edición de vídeo profesional desarrollado por Apple. Fue lanzado por primera vez en 1999 y desde entonces ha sido utilizado por muchos cineastas, productores de televisión y entusiastas del video. Es compatible con ordenadores Mac y soporta una amplia gama de formatos, resoluciones y velocidades de fotogramas. </p>
7
- <h3>Características de Final Cut Pro</h3>
8
- <p>Algunas de las características principales de Final Cut Pro son:</p>
9
- <ul>
10
- <li>Línea de tiempo magnética: Esto le permite editar clips sin colisiones o problemas de sincronización. También puede agrupar clips en clips compuestos, sincronizar múltiples ángulos con edición multicámara y agregar efectos con conexiones de clip. </li>
11
- <li>360° edición de vídeo: Puede importar, editar y compartir vídeo 360° de varias cámaras y formatos. También puede utilizar el visor 360° para navegar por el vídeo esférico y aplicar efectos, títulos y transiciones. </li>
12
- <li>Seguimiento de objetos: Puede utilizar el aprendizaje automático para detectar caras u objetos en sus imágenes y combinar su movimiento con títulos, gráficos o efectos. También puede ajustar los puntos de enfoque y la profundidad de campo en los clips capturados en el modo cinematográfico en el iPhone. </li>
13
- <li>Gradación de color: Puede usar herramientas avanzadas de corrección de color para ajustar el tono, la saturación, el brillo, el contraste y más. También puede aplicar presets de color, LUT, curvas y máscaras. </li>
14
- <li>Edición de audio: Puede asignar roles a sus clips de audio y organizarlos en la línea de tiempo. También puede utilizar efectos incorporados, filtros y complementos para mejorar la calidad de sonido. </li>
15
-
16
- </ul>
17
- <h3>Ventajas y desventajas de Final Cut Pro</h3>
18
- <p>Algunas de las ventajas de Final Cut Pro son:</p>
19
- <ul>
20
- <li>Está optimizado para computadoras Mac y dispositivos Apple, especialmente aquellos con chips de silicio de Apple. Puede aprovechar la potencia de la GPU, CPU y Neural Engine para un rendimiento y renderizado más rápidos. </li>
21
- <li> Tiene una interfaz elegante y fácil de usar que hace que la edición sea fácil y agradable. También tiene accesos directos de teclado personalizables y soporte de barra táctil. </li>
22
- <li> Tiene una gran y activa comunidad de usuarios y desarrolladores que ofrecen soporte, tutoriales, consejos y plugins. </li>
23
- </ul>
24
- <p>Algunas de las desventajas de Final Cut Pro son:</p>
25
- <ul>
26
- <li>Es caro en comparación con otro software de edición de vídeo. Cuesta $299.99 para una compra de una sola vez, que puede no ser asequible para algunos usuarios. </li>
27
- <li>Solo está disponible para usuarios de Mac. No hay versión de Windows o Linux de Final Cut Pro.</li>
28
- <li>Puede tener problemas de compatibilidad con algunos plugins o formatos de terceros. También puede requerir conversión o transcodificación para algunos archivos. </li>
29
- </ul>
30
- <h2>¿Cómo obtener Final Cut Pro gratis? </h2>
31
- <p>Si quieres usar Final Cut Pro sin pagar nada, hay dos opciones principales:</p>
32
- <h3>Versión de prueba oficial</h3>
33
- <p>La forma más fácil de obtener Final Cut Pro gratis es descargar la versión de prueba oficial del sitio web de Apple. La versión de prueba le da acceso a todas las características y funciones de la versión completa durante 90 días. También puede extender su período de prueba por otros 90 días si tiene una versión anterior instalada. </p>
34
- <p></p>
35
- <p>Para descargar la versión de prueba, necesita tener una computadora Mac que ejecute macOS 10.15.6 o posterior, 4GB de RAM (8GB recomendado), una tarjeta gráfica compatible con Metal, 1GB de VRAM (4GB recomendado) y 5.5GB de espacio en disco. También debes registrarte con tu Apple ID y aceptar los términos y condiciones. </p>
36
-
37
- <p>La versión de prueba es una gran manera de probar Final Cut Pro y ver si se adapta a sus necesidades y preferencias. Sin embargo, tiene algunas limitaciones, como:</p>
38
- <ul>
39
- <li>No se puede actualizar la versión de prueba a la última versión de Final Cut Pro. Es necesario comprar la versión completa para obtener las actualizaciones y correcciones de errores. </li>
40
- <li>No puede usar la versión de prueba en varios dispositivos. Debe activar la versión de prueba con su ID de Apple en cada dispositivo. </li>
41
- <li>No puedes usar algunas funciones que requieren un ID de Apple o una cuenta de iCloud, como Fotos de iCloud, iMovie Theater o bandas sonoras de GarageBand. </li>
42
- </ul>
43
- <h3>Editores de vídeo alternativos</h3>
44
- <p>Otra forma de obtener Final Cut Pro de forma gratuita es utilizar algún software de edición de vídeo alternativo que ofrezca características y funciones similares. Hay muchos editores de video gratuitos o de bajo costo disponibles para los usuarios de Mac, como:</p>
45
- <tabla>
46
- <tr><th>Nombre</th><th>Precio</th><th>Características</th></tr>
47
- <tr><td>iMovie</td><td>Free</td><td>Un editor de video simple y fácil de usar que viene preinstalado en computadoras Mac. Soporta resolución 4K, herramientas básicas de edición, transiciones, filtros, títulos, bandas sonoras y trailers. También se integra con iCloud, iTunes, Fotos y otras aplicaciones de Apple. </td></tr>
48
- <tr><td>Davinci Resolve</td><td>Gratis o $299 para la versión de Studio</td><td>Un editor de video potente y profesional que ofrece herramientas avanzadas de edición, corrección de color, efectos visuales, postproducción de audio y gráficos en movimiento. Soporta resolución de 8K, edición multicam, clasificación HDR, motor de audio Fairlight, composición Fusión VFX y más. </td></tr>
49
- <tr><td>Shotcut</td><td>Free</td><td>Un editor de vídeo de código abierto y multiplataforma que admite una amplia gama de formatos, resoluciones y velocidades de fotogramas. Ofrece herramientas de edición básicas y avanzadas, filtros, transiciones, fotogramas clave, mezcla de audio, clasificación de color y más. </td></tr>
50
-
51
- <tr><td>Filmora</td><td>$69.99 por año o $139.99 por licencia de por vida</td><td>Un editor de video fácil de usar y asequible que ofrece una variedad de características y funciones. Soporta hasta resolución 4K, seguimiento de movimiento, keyframing, pantalla verde, pantalla dividida, transiciones, filtros, títulos, música, efectos de sonido y más. </td></tr>
52
- </tabla>
53
- <p>Estas son algunas de las mejores alternativas a Final Cut Pro que puedes probar gratis o a bajo costo. Puede que no tengan todas las características o funciones de Final Cut Pro, pero pueden ayudarte a crear vídeos increíbles para tus proyectos personales o profesionales. </p>
54
- <h2>Conclusión</h2>
55
- <p>Final Cut Pro es un gran software de edición de video para usuarios de Mac que quieren crear videos de calidad profesional con facilidad y velocidad. Sin embargo, también es caro y exclusivo para los usuarios de Mac. Si quieres usar Final Cut Pro gratis o buscar alternativas más baratas, puedes seguir los pasos de este artículo para descargar la versión de prueba oficial o probar algunos editores de video alternativos. También puede comparar las características, ventajas y desventajas de cada opción y elegir la que mejor se adapte a sus necesidades y presupuesto. Esperamos que este artículo sea útil e informativo para usted. ¡Feliz edición! </p>
56
- <h2>Preguntas frecuentes</h2>
57
- <p>Aquí hay algunas preguntas frecuentes sobre Final Cut Pro y cómo descargarlo gratis:</p>
58
- <ol>
59
- <li>Es Final Cut Pro vale la pena el dinero? </li>
60
-
61
- <li>¿Puedo usar Final Cut Pro en Windows? </li>
62
- <p>No, no puede usar Final Cut Pro en Windows. Final Cut Pro solo es compatible con computadoras y dispositivos Mac. No hay versión oficial de Windows de Final Cut Pro. Es posible que encuentre algunas versiones no oficiales o pirateadas de Final Cut Pro para Windows en línea, pero no recomendamos usarlas, ya que pueden ser ilegales, inseguras o inestables. </p>
63
- <li>¿Cuánto tiempo se tarda en aprender Final Cut Pro? </li>
64
- <p>La curva de aprendizaje de Final Cut Pro depende de tu experiencia, habilidades y objetivos previos. Si tiene algunos conocimientos básicos de edición de vídeo y ordenadores Mac, es posible que pueda aprender lo esencial de Final Cut Pro en pocas horas o días. Si eres un completo principiante o quieres dominar las características avanzadas de Final Cut Pro, es posible que necesites más tiempo y práctica para aprender el software. También puedes usar tutoriales, cursos, libros o foros en línea para ayudarte a aprender Final Cut Pro más rápido y fácil. </p>
65
- <li> ¿Cuál es la diferencia entre Final Cut Pro y iMovie? </li>
66
- <p>Final Cut Pro y iMovie son software de edición de vídeo desarrollado por Apple. Sin embargo, tienen diferentes audiencias, características y funciones. iMovie es un editor de video simple y fácil de usar que viene preinstalado en computadoras y dispositivos Mac. Está diseñado para principiantes o usuarios casuales que quieren crear vídeos básicos o divertidos con el mínimo esfuerzo. Soporta resolución 4K, herramientas básicas de edición, transiciones, filtros, títulos, bandas sonoras y trailers. Final Cut Pro es un editor de vídeo profesional y potente que requiere una compra e instalación por separado. Está diseñado para usuarios serios o profesionales que quieren crear vídeos complejos o de alta calidad con más control y flexibilidad. Soporta resolución de hasta 8K, herramientas avanzadas de edición, corrección de color, efectos visuales, postproducción de audio, gráficos en movimiento y más. </p>
67
- <li>¿Cómo puedo actualizar Final Cut Pro? </li>
68
-
69
- <ul>
70
- <li>Abra la aplicación App Store en su Mac.</li>
71
- <li>Haga clic en la pestaña Actualizaciones en la parte superior de la ventana de la aplicación. </li>
72
- <li>Encuentre Final Cut Pro en la lista de actualizaciones disponibles y haga clic en el botón Actualizar junto a él. </li>
73
- <li>Espere a que la actualización se descargue e instale. </li>
74
- <li>Iniciar Final Cut Pro y disfrutar de las nuevas características y correcciones de errores. </li>
75
- </ul>
76
- <p>Si ha descargado la versión de prueba de Final Cut Pro desde el sitio web de Apple, no puede actualizarla a la última versión. Es necesario comprar la versión completa de la Mac App Store para obtener las actualizaciones y correcciones de errores. </p> 64aa2da5cf<br />
77
- <br />
78
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/s3/inject.py DELETED
@@ -1,891 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- from botocore.exceptions import ClientError
14
-
15
- from boto3 import utils
16
- from boto3.s3.transfer import (
17
- ProgressCallbackInvoker,
18
- S3Transfer,
19
- TransferConfig,
20
- create_transfer_manager,
21
- )
22
-
23
-
24
- def inject_s3_transfer_methods(class_attributes, **kwargs):
25
- utils.inject_attribute(class_attributes, 'upload_file', upload_file)
26
- utils.inject_attribute(class_attributes, 'download_file', download_file)
27
- utils.inject_attribute(class_attributes, 'copy', copy)
28
- utils.inject_attribute(class_attributes, 'upload_fileobj', upload_fileobj)
29
- utils.inject_attribute(
30
- class_attributes, 'download_fileobj', download_fileobj
31
- )
32
-
33
-
34
- def inject_bucket_methods(class_attributes, **kwargs):
35
- utils.inject_attribute(class_attributes, 'load', bucket_load)
36
- utils.inject_attribute(class_attributes, 'upload_file', bucket_upload_file)
37
- utils.inject_attribute(
38
- class_attributes, 'download_file', bucket_download_file
39
- )
40
- utils.inject_attribute(class_attributes, 'copy', bucket_copy)
41
- utils.inject_attribute(
42
- class_attributes, 'upload_fileobj', bucket_upload_fileobj
43
- )
44
- utils.inject_attribute(
45
- class_attributes, 'download_fileobj', bucket_download_fileobj
46
- )
47
-
48
-
49
- def inject_object_methods(class_attributes, **kwargs):
50
- utils.inject_attribute(class_attributes, 'upload_file', object_upload_file)
51
- utils.inject_attribute(
52
- class_attributes, 'download_file', object_download_file
53
- )
54
- utils.inject_attribute(class_attributes, 'copy', object_copy)
55
- utils.inject_attribute(
56
- class_attributes, 'upload_fileobj', object_upload_fileobj
57
- )
58
- utils.inject_attribute(
59
- class_attributes, 'download_fileobj', object_download_fileobj
60
- )
61
-
62
-
63
- def inject_object_summary_methods(class_attributes, **kwargs):
64
- utils.inject_attribute(class_attributes, 'load', object_summary_load)
65
-
66
-
67
- def bucket_load(self, *args, **kwargs):
68
- """
69
- Calls s3.Client.list_buckets() to update the attributes of the Bucket
70
- resource.
71
- """
72
- # The docstring above is phrased this way to match what the autogenerated
73
- # docs produce.
74
-
75
- # We can't actually get the bucket's attributes from a HeadBucket,
76
- # so we need to use a ListBuckets and search for our bucket.
77
- # However, we may fail if we lack permissions to ListBuckets
78
- # or the bucket is in another account. In which case, creation_date
79
- # will be None.
80
- self.meta.data = {}
81
- try:
82
- response = self.meta.client.list_buckets()
83
- for bucket_data in response['Buckets']:
84
- if bucket_data['Name'] == self.name:
85
- self.meta.data = bucket_data
86
- break
87
- except ClientError as e:
88
- if not e.response.get('Error', {}).get('Code') == 'AccessDenied':
89
- raise
90
-
91
-
92
- def object_summary_load(self, *args, **kwargs):
93
- """
94
- Calls s3.Client.head_object to update the attributes of the ObjectSummary
95
- resource.
96
- """
97
- response = self.meta.client.head_object(
98
- Bucket=self.bucket_name, Key=self.key
99
- )
100
- if 'ContentLength' in response:
101
- response['Size'] = response.pop('ContentLength')
102
- self.meta.data = response
103
-
104
-
105
- def upload_file(
106
- self, Filename, Bucket, Key, ExtraArgs=None, Callback=None, Config=None
107
- ):
108
- """Upload a file to an S3 object.
109
-
110
- Usage::
111
-
112
- import boto3
113
- s3 = boto3.client('s3')
114
- s3.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')
115
-
116
- Similar behavior as S3Transfer's upload_file() method, except that
117
- argument names are capitalized. Detailed examples can be found at
118
- :ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
119
-
120
- :type Filename: str
121
- :param Filename: The path to the file to upload.
122
-
123
- :type Bucket: str
124
- :param Bucket: The name of the bucket to upload to.
125
-
126
- :type Key: str
127
- :param Key: The name of the key to upload to.
128
-
129
- :type ExtraArgs: dict
130
- :param ExtraArgs: Extra arguments that may be passed to the
131
- client operation. For allowed upload arguments see
132
- boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
133
-
134
- :type Callback: function
135
- :param Callback: A method which takes a number of bytes transferred to
136
- be periodically called during the upload.
137
-
138
- :type Config: boto3.s3.transfer.TransferConfig
139
- :param Config: The transfer configuration to be used when performing the
140
- transfer.
141
- """
142
- with S3Transfer(self, Config) as transfer:
143
- return transfer.upload_file(
144
- filename=Filename,
145
- bucket=Bucket,
146
- key=Key,
147
- extra_args=ExtraArgs,
148
- callback=Callback,
149
- )
150
-
151
-
152
- def download_file(
153
- self, Bucket, Key, Filename, ExtraArgs=None, Callback=None, Config=None
154
- ):
155
- """Download an S3 object to a file.
156
-
157
- Usage::
158
-
159
- import boto3
160
- s3 = boto3.resource('s3')
161
- s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
162
-
163
- Similar behavior as S3Transfer's download_file() method,
164
- except that parameters are capitalized. Detailed examples can be found at
165
- :ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
166
-
167
- :type Bucket: str
168
- :param Bucket: The name of the bucket to download from.
169
-
170
- :type Key: str
171
- :param Key: The name of the key to download from.
172
-
173
- :type Filename: str
174
- :param Filename: The path to the file to download to.
175
-
176
- :type ExtraArgs: dict
177
- :param ExtraArgs: Extra arguments that may be passed to the
178
- client operation. For allowed download arguments see
179
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
180
-
181
- :type Callback: function
182
- :param Callback: A method which takes a number of bytes transferred to
183
- be periodically called during the download.
184
-
185
- :type Config: boto3.s3.transfer.TransferConfig
186
- :param Config: The transfer configuration to be used when performing the
187
- transfer.
188
- """
189
- with S3Transfer(self, Config) as transfer:
190
- return transfer.download_file(
191
- bucket=Bucket,
192
- key=Key,
193
- filename=Filename,
194
- extra_args=ExtraArgs,
195
- callback=Callback,
196
- )
197
-
198
-
199
- def bucket_upload_file(
200
- self, Filename, Key, ExtraArgs=None, Callback=None, Config=None
201
- ):
202
- """Upload a file to an S3 object.
203
-
204
- Usage::
205
-
206
- import boto3
207
- s3 = boto3.resource('s3')
208
- s3.Bucket('mybucket').upload_file('/tmp/hello.txt', 'hello.txt')
209
-
210
- Similar behavior as S3Transfer's upload_file() method,
211
- except that parameters are capitalized. Detailed examples can be found at
212
- :ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
213
-
214
- :type Filename: str
215
- :param Filename: The path to the file to upload.
216
-
217
- :type Key: str
218
- :param Key: The name of the key to upload to.
219
-
220
- :type ExtraArgs: dict
221
- :param ExtraArgs: Extra arguments that may be passed to the
222
- client operation. For allowed upload arguments see
223
- boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
224
-
225
- :type Callback: function
226
- :param Callback: A method which takes a number of bytes transferred to
227
- be periodically called during the upload.
228
-
229
- :type Config: boto3.s3.transfer.TransferConfig
230
- :param Config: The transfer configuration to be used when performing the
231
- transfer.
232
- """
233
- return self.meta.client.upload_file(
234
- Filename=Filename,
235
- Bucket=self.name,
236
- Key=Key,
237
- ExtraArgs=ExtraArgs,
238
- Callback=Callback,
239
- Config=Config,
240
- )
241
-
242
-
243
- def bucket_download_file(
244
- self, Key, Filename, ExtraArgs=None, Callback=None, Config=None
245
- ):
246
- """Download an S3 object to a file.
247
-
248
- Usage::
249
-
250
- import boto3
251
- s3 = boto3.resource('s3')
252
- s3.Bucket('mybucket').download_file('hello.txt', '/tmp/hello.txt')
253
-
254
- Similar behavior as S3Transfer's download_file() method,
255
- except that parameters are capitalized. Detailed examples can be found at
256
- :ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
257
-
258
- :type Key: str
259
- :param Key: The name of the key to download from.
260
-
261
- :type Filename: str
262
- :param Filename: The path to the file to download to.
263
-
264
- :type ExtraArgs: dict
265
- :param ExtraArgs: Extra arguments that may be passed to the
266
- client operation. For allowed download arguments see
267
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
268
-
269
- :type Callback: function
270
- :param Callback: A method which takes a number of bytes transferred to
271
- be periodically called during the download.
272
-
273
- :type Config: boto3.s3.transfer.TransferConfig
274
- :param Config: The transfer configuration to be used when performing the
275
- transfer.
276
- """
277
- return self.meta.client.download_file(
278
- Bucket=self.name,
279
- Key=Key,
280
- Filename=Filename,
281
- ExtraArgs=ExtraArgs,
282
- Callback=Callback,
283
- Config=Config,
284
- )
285
-
286
-
287
- def object_upload_file(
288
- self, Filename, ExtraArgs=None, Callback=None, Config=None
289
- ):
290
- """Upload a file to an S3 object.
291
-
292
- Usage::
293
-
294
- import boto3
295
- s3 = boto3.resource('s3')
296
- s3.Object('mybucket', 'hello.txt').upload_file('/tmp/hello.txt')
297
-
298
- Similar behavior as S3Transfer's upload_file() method,
299
- except that parameters are capitalized. Detailed examples can be found at
300
- :ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
301
-
302
- :type Filename: str
303
- :param Filename: The path to the file to upload.
304
-
305
- :type ExtraArgs: dict
306
- :param ExtraArgs: Extra arguments that may be passed to the
307
- client operation. For allowed upload arguments see
308
- boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
309
-
310
- :type Callback: function
311
- :param Callback: A method which takes a number of bytes transferred to
312
- be periodically called during the upload.
313
-
314
- :type Config: boto3.s3.transfer.TransferConfig
315
- :param Config: The transfer configuration to be used when performing the
316
- transfer.
317
- """
318
- return self.meta.client.upload_file(
319
- Filename=Filename,
320
- Bucket=self.bucket_name,
321
- Key=self.key,
322
- ExtraArgs=ExtraArgs,
323
- Callback=Callback,
324
- Config=Config,
325
- )
326
-
327
-
328
- def object_download_file(
329
- self, Filename, ExtraArgs=None, Callback=None, Config=None
330
- ):
331
- """Download an S3 object to a file.
332
-
333
- Usage::
334
-
335
- import boto3
336
- s3 = boto3.resource('s3')
337
- s3.Object('mybucket', 'hello.txt').download_file('/tmp/hello.txt')
338
-
339
- Similar behavior as S3Transfer's download_file() method,
340
- except that parameters are capitalized. Detailed examples can be found at
341
- :ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
342
-
343
- :type Filename: str
344
- :param Filename: The path to the file to download to.
345
-
346
- :type ExtraArgs: dict
347
- :param ExtraArgs: Extra arguments that may be passed to the
348
- client operation. For allowed download arguments see
349
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
350
-
351
- :type Callback: function
352
- :param Callback: A method which takes a number of bytes transferred to
353
- be periodically called during the download.
354
-
355
- :type Config: boto3.s3.transfer.TransferConfig
356
- :param Config: The transfer configuration to be used when performing the
357
- transfer.
358
- """
359
- return self.meta.client.download_file(
360
- Bucket=self.bucket_name,
361
- Key=self.key,
362
- Filename=Filename,
363
- ExtraArgs=ExtraArgs,
364
- Callback=Callback,
365
- Config=Config,
366
- )
367
-
368
-
369
- def copy(
370
- self,
371
- CopySource,
372
- Bucket,
373
- Key,
374
- ExtraArgs=None,
375
- Callback=None,
376
- SourceClient=None,
377
- Config=None,
378
- ):
379
- """Copy an object from one S3 location to another.
380
-
381
- This is a managed transfer which will perform a multipart copy in
382
- multiple threads if necessary.
383
-
384
- Usage::
385
-
386
- import boto3
387
- s3 = boto3.resource('s3')
388
- copy_source = {
389
- 'Bucket': 'mybucket',
390
- 'Key': 'mykey'
391
- }
392
- s3.meta.client.copy(copy_source, 'otherbucket', 'otherkey')
393
-
394
- :type CopySource: dict
395
- :param CopySource: The name of the source bucket, key name of the
396
- source object, and optional version ID of the source object. The
397
- dictionary format is:
398
- ``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
399
- that the ``VersionId`` key is optional and may be omitted.
400
-
401
- :type Bucket: str
402
- :param Bucket: The name of the bucket to copy to
403
-
404
- :type Key: str
405
- :param Key: The name of the key to copy to
406
-
407
- :type ExtraArgs: dict
408
- :param ExtraArgs: Extra arguments that may be passed to the
409
- client operation. For allowed download arguments see
410
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
411
-
412
- :type Callback: function
413
- :param Callback: A method which takes a number of bytes transferred to
414
- be periodically called during the copy.
415
-
416
- :type SourceClient: botocore or boto3 Client
417
- :param SourceClient: The client to be used for operation that
418
- may happen at the source object. For example, this client is
419
- used for the head_object that determines the size of the copy.
420
- If no client is provided, the current client is used as the client
421
- for the source object.
422
-
423
- :type Config: boto3.s3.transfer.TransferConfig
424
- :param Config: The transfer configuration to be used when performing the
425
- copy.
426
- """
427
- subscribers = None
428
- if Callback is not None:
429
- subscribers = [ProgressCallbackInvoker(Callback)]
430
-
431
- config = Config
432
- if config is None:
433
- config = TransferConfig()
434
-
435
- with create_transfer_manager(self, config) as manager:
436
- future = manager.copy(
437
- copy_source=CopySource,
438
- bucket=Bucket,
439
- key=Key,
440
- extra_args=ExtraArgs,
441
- subscribers=subscribers,
442
- source_client=SourceClient,
443
- )
444
- return future.result()
445
-
446
-
447
- def bucket_copy(
448
- self,
449
- CopySource,
450
- Key,
451
- ExtraArgs=None,
452
- Callback=None,
453
- SourceClient=None,
454
- Config=None,
455
- ):
456
- """Copy an object from one S3 location to an object in this bucket.
457
-
458
- This is a managed transfer which will perform a multipart copy in
459
- multiple threads if necessary.
460
-
461
- Usage::
462
-
463
- import boto3
464
- s3 = boto3.resource('s3')
465
- copy_source = {
466
- 'Bucket': 'mybucket',
467
- 'Key': 'mykey'
468
- }
469
- bucket = s3.Bucket('otherbucket')
470
- bucket.copy(copy_source, 'otherkey')
471
-
472
- :type CopySource: dict
473
- :param CopySource: The name of the source bucket, key name of the
474
- source object, and optional version ID of the source object. The
475
- dictionary format is:
476
- ``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
477
- that the ``VersionId`` key is optional and may be omitted.
478
-
479
- :type Key: str
480
- :param Key: The name of the key to copy to
481
-
482
- :type ExtraArgs: dict
483
- :param ExtraArgs: Extra arguments that may be passed to the
484
- client operation. For allowed download arguments see
485
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
486
-
487
- :type Callback: function
488
- :param Callback: A method which takes a number of bytes transferred to
489
- be periodically called during the copy.
490
-
491
- :type SourceClient: botocore or boto3 Client
492
- :param SourceClient: The client to be used for operation that
493
- may happen at the source object. For example, this client is
494
- used for the head_object that determines the size of the copy.
495
- If no client is provided, the current client is used as the client
496
- for the source object.
497
-
498
- :type Config: boto3.s3.transfer.TransferConfig
499
- :param Config: The transfer configuration to be used when performing the
500
- copy.
501
- """
502
- return self.meta.client.copy(
503
- CopySource=CopySource,
504
- Bucket=self.name,
505
- Key=Key,
506
- ExtraArgs=ExtraArgs,
507
- Callback=Callback,
508
- SourceClient=SourceClient,
509
- Config=Config,
510
- )
511
-
512
-
513
- def object_copy(
514
- self,
515
- CopySource,
516
- ExtraArgs=None,
517
- Callback=None,
518
- SourceClient=None,
519
- Config=None,
520
- ):
521
- """Copy an object from one S3 location to this object.
522
-
523
- This is a managed transfer which will perform a multipart copy in
524
- multiple threads if necessary.
525
-
526
- Usage::
527
-
528
- import boto3
529
- s3 = boto3.resource('s3')
530
- copy_source = {
531
- 'Bucket': 'mybucket',
532
- 'Key': 'mykey'
533
- }
534
- bucket = s3.Bucket('otherbucket')
535
- obj = bucket.Object('otherkey')
536
- obj.copy(copy_source)
537
-
538
- :type CopySource: dict
539
- :param CopySource: The name of the source bucket, key name of the
540
- source object, and optional version ID of the source object. The
541
- dictionary format is:
542
- ``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
543
- that the ``VersionId`` key is optional and may be omitted.
544
-
545
- :type ExtraArgs: dict
546
- :param ExtraArgs: Extra arguments that may be passed to the
547
- client operation. For allowed download arguments see
548
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
549
-
550
- :type Callback: function
551
- :param Callback: A method which takes a number of bytes transferred to
552
- be periodically called during the copy.
553
-
554
- :type SourceClient: botocore or boto3 Client
555
- :param SourceClient: The client to be used for operation that
556
- may happen at the source object. For example, this client is
557
- used for the head_object that determines the size of the copy.
558
- If no client is provided, the current client is used as the client
559
- for the source object.
560
-
561
- :type Config: boto3.s3.transfer.TransferConfig
562
- :param Config: The transfer configuration to be used when performing the
563
- copy.
564
- """
565
- return self.meta.client.copy(
566
- CopySource=CopySource,
567
- Bucket=self.bucket_name,
568
- Key=self.key,
569
- ExtraArgs=ExtraArgs,
570
- Callback=Callback,
571
- SourceClient=SourceClient,
572
- Config=Config,
573
- )
574
-
575
-
576
- def upload_fileobj(
577
- self, Fileobj, Bucket, Key, ExtraArgs=None, Callback=None, Config=None
578
- ):
579
- """Upload a file-like object to S3.
580
-
581
- The file-like object must be in binary mode.
582
-
583
- This is a managed transfer which will perform a multipart upload in
584
- multiple threads if necessary.
585
-
586
- Usage::
587
-
588
- import boto3
589
- s3 = boto3.client('s3')
590
-
591
- with open('filename', 'rb') as data:
592
- s3.upload_fileobj(data, 'mybucket', 'mykey')
593
-
594
- :type Fileobj: a file-like object
595
- :param Fileobj: A file-like object to upload. At a minimum, it must
596
- implement the `read` method, and must return bytes.
597
-
598
- :type Bucket: str
599
- :param Bucket: The name of the bucket to upload to.
600
-
601
- :type Key: str
602
- :param Key: The name of the key to upload to.
603
-
604
- :type ExtraArgs: dict
605
- :param ExtraArgs: Extra arguments that may be passed to the
606
- client operation. For allowed upload arguments see
607
- boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
608
-
609
- :type Callback: function
610
- :param Callback: A method which takes a number of bytes transferred to
611
- be periodically called during the upload.
612
-
613
- :type Config: boto3.s3.transfer.TransferConfig
614
- :param Config: The transfer configuration to be used when performing the
615
- upload.
616
- """
617
- if not hasattr(Fileobj, 'read'):
618
- raise ValueError('Fileobj must implement read')
619
-
620
- subscribers = None
621
- if Callback is not None:
622
- subscribers = [ProgressCallbackInvoker(Callback)]
623
-
624
- config = Config
625
- if config is None:
626
- config = TransferConfig()
627
-
628
- with create_transfer_manager(self, config) as manager:
629
- future = manager.upload(
630
- fileobj=Fileobj,
631
- bucket=Bucket,
632
- key=Key,
633
- extra_args=ExtraArgs,
634
- subscribers=subscribers,
635
- )
636
- return future.result()
637
-
638
-
639
- def bucket_upload_fileobj(
640
- self, Fileobj, Key, ExtraArgs=None, Callback=None, Config=None
641
- ):
642
- """Upload a file-like object to this bucket.
643
-
644
- The file-like object must be in binary mode.
645
-
646
- This is a managed transfer which will perform a multipart upload in
647
- multiple threads if necessary.
648
-
649
- Usage::
650
-
651
- import boto3
652
- s3 = boto3.resource('s3')
653
- bucket = s3.Bucket('mybucket')
654
-
655
- with open('filename', 'rb') as data:
656
- bucket.upload_fileobj(data, 'mykey')
657
-
658
- :type Fileobj: a file-like object
659
- :param Fileobj: A file-like object to upload. At a minimum, it must
660
- implement the `read` method, and must return bytes.
661
-
662
- :type Key: str
663
- :param Key: The name of the key to upload to.
664
-
665
- :type ExtraArgs: dict
666
- :param ExtraArgs: Extra arguments that may be passed to the
667
- client operation. For allowed upload arguments see
668
- boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
669
-
670
- :type Callback: function
671
- :param Callback: A method which takes a number of bytes transferred to
672
- be periodically called during the upload.
673
-
674
- :type Config: boto3.s3.transfer.TransferConfig
675
- :param Config: The transfer configuration to be used when performing the
676
- upload.
677
- """
678
- return self.meta.client.upload_fileobj(
679
- Fileobj=Fileobj,
680
- Bucket=self.name,
681
- Key=Key,
682
- ExtraArgs=ExtraArgs,
683
- Callback=Callback,
684
- Config=Config,
685
- )
686
-
687
-
688
- def object_upload_fileobj(
689
- self, Fileobj, ExtraArgs=None, Callback=None, Config=None
690
- ):
691
- """Upload a file-like object to this object.
692
-
693
- The file-like object must be in binary mode.
694
-
695
- This is a managed transfer which will perform a multipart upload in
696
- multiple threads if necessary.
697
-
698
- Usage::
699
-
700
- import boto3
701
- s3 = boto3.resource('s3')
702
- bucket = s3.Bucket('mybucket')
703
- obj = bucket.Object('mykey')
704
-
705
- with open('filename', 'rb') as data:
706
- obj.upload_fileobj(data)
707
-
708
- :type Fileobj: a file-like object
709
- :param Fileobj: A file-like object to upload. At a minimum, it must
710
- implement the `read` method, and must return bytes.
711
-
712
- :type ExtraArgs: dict
713
- :param ExtraArgs: Extra arguments that may be passed to the
714
- client operation. For allowed upload arguments see
715
- boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
716
-
717
- :type Callback: function
718
- :param Callback: A method which takes a number of bytes transferred to
719
- be periodically called during the upload.
720
-
721
- :type Config: boto3.s3.transfer.TransferConfig
722
- :param Config: The transfer configuration to be used when performing the
723
- upload.
724
- """
725
- return self.meta.client.upload_fileobj(
726
- Fileobj=Fileobj,
727
- Bucket=self.bucket_name,
728
- Key=self.key,
729
- ExtraArgs=ExtraArgs,
730
- Callback=Callback,
731
- Config=Config,
732
- )
733
-
734
-
735
- def download_fileobj(
736
- self, Bucket, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None
737
- ):
738
- """Download an object from S3 to a file-like object.
739
-
740
- The file-like object must be in binary mode.
741
-
742
- This is a managed transfer which will perform a multipart download in
743
- multiple threads if necessary.
744
-
745
- Usage::
746
-
747
- import boto3
748
- s3 = boto3.client('s3')
749
-
750
- with open('filename', 'wb') as data:
751
- s3.download_fileobj('mybucket', 'mykey', data)
752
-
753
- :type Bucket: str
754
- :param Bucket: The name of the bucket to download from.
755
-
756
- :type Key: str
757
- :param Key: The name of the key to download from.
758
-
759
- :type Fileobj: a file-like object
760
- :param Fileobj: A file-like object to download into. At a minimum, it must
761
- implement the `write` method and must accept bytes.
762
-
763
- :type ExtraArgs: dict
764
- :param ExtraArgs: Extra arguments that may be passed to the
765
- client operation. For allowed download arguments see
766
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
767
-
768
- :type Callback: function
769
- :param Callback: A method which takes a number of bytes transferred to
770
- be periodically called during the download.
771
-
772
- :type Config: boto3.s3.transfer.TransferConfig
773
- :param Config: The transfer configuration to be used when performing the
774
- download.
775
- """
776
- if not hasattr(Fileobj, 'write'):
777
- raise ValueError('Fileobj must implement write')
778
-
779
- subscribers = None
780
- if Callback is not None:
781
- subscribers = [ProgressCallbackInvoker(Callback)]
782
-
783
- config = Config
784
- if config is None:
785
- config = TransferConfig()
786
-
787
- with create_transfer_manager(self, config) as manager:
788
- future = manager.download(
789
- bucket=Bucket,
790
- key=Key,
791
- fileobj=Fileobj,
792
- extra_args=ExtraArgs,
793
- subscribers=subscribers,
794
- )
795
- return future.result()
796
-
797
-
798
- def bucket_download_fileobj(
799
- self, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None
800
- ):
801
- """Download an object from this bucket to a file-like-object.
802
-
803
- The file-like object must be in binary mode.
804
-
805
- This is a managed transfer which will perform a multipart download in
806
- multiple threads if necessary.
807
-
808
- Usage::
809
-
810
- import boto3
811
- s3 = boto3.resource('s3')
812
- bucket = s3.Bucket('mybucket')
813
-
814
- with open('filename', 'wb') as data:
815
- bucket.download_fileobj('mykey', data)
816
-
817
- :type Fileobj: a file-like object
818
- :param Fileobj: A file-like object to download into. At a minimum, it must
819
- implement the `write` method and must accept bytes.
820
-
821
- :type Key: str
822
- :param Key: The name of the key to download from.
823
-
824
- :type ExtraArgs: dict
825
- :param ExtraArgs: Extra arguments that may be passed to the
826
- client operation. For allowed download arguments see
827
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
828
-
829
- :type Callback: function
830
- :param Callback: A method which takes a number of bytes transferred to
831
- be periodically called during the download.
832
-
833
- :type Config: boto3.s3.transfer.TransferConfig
834
- :param Config: The transfer configuration to be used when performing the
835
- download.
836
- """
837
- return self.meta.client.download_fileobj(
838
- Bucket=self.name,
839
- Key=Key,
840
- Fileobj=Fileobj,
841
- ExtraArgs=ExtraArgs,
842
- Callback=Callback,
843
- Config=Config,
844
- )
845
-
846
-
847
- def object_download_fileobj(
848
- self, Fileobj, ExtraArgs=None, Callback=None, Config=None
849
- ):
850
- """Download this object from S3 to a file-like object.
851
-
852
- The file-like object must be in binary mode.
853
-
854
- This is a managed transfer which will perform a multipart download in
855
- multiple threads if necessary.
856
-
857
- Usage::
858
-
859
- import boto3
860
- s3 = boto3.resource('s3')
861
- bucket = s3.Bucket('mybucket')
862
- obj = bucket.Object('mykey')
863
-
864
- with open('filename', 'wb') as data:
865
- obj.download_fileobj(data)
866
-
867
- :type Fileobj: a file-like object
868
- :param Fileobj: A file-like object to download into. At a minimum, it must
869
- implement the `write` method and must accept bytes.
870
-
871
- :type ExtraArgs: dict
872
- :param ExtraArgs: Extra arguments that may be passed to the
873
- client operation. For allowed download arguments see
874
- boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
875
-
876
- :type Callback: function
877
- :param Callback: A method which takes a number of bytes transferred to
878
- be periodically called during the download.
879
-
880
- :type Config: boto3.s3.transfer.TransferConfig
881
- :param Config: The transfer configuration to be used when performing the
882
- download.
883
- """
884
- return self.meta.client.download_fileobj(
885
- Bucket=self.bucket_name,
886
- Key=self.key,
887
- Fileobj=Fileobj,
888
- ExtraArgs=ExtraArgs,
889
- Callback=Callback,
890
- Config=Config,
891
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/jaraco/context.py DELETED
@@ -1,213 +0,0 @@
1
- import os
2
- import subprocess
3
- import contextlib
4
- import functools
5
- import tempfile
6
- import shutil
7
- import operator
8
-
9
-
10
- @contextlib.contextmanager
11
- def pushd(dir):
12
- orig = os.getcwd()
13
- os.chdir(dir)
14
- try:
15
- yield dir
16
- finally:
17
- os.chdir(orig)
18
-
19
-
20
- @contextlib.contextmanager
21
- def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
22
- """
23
- Get a tarball, extract it, change to that directory, yield, then
24
- clean up.
25
- `runner` is the function to invoke commands.
26
- `pushd` is a context manager for changing the directory.
27
- """
28
- if target_dir is None:
29
- target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
30
- if runner is None:
31
- runner = functools.partial(subprocess.check_call, shell=True)
32
- # In the tar command, use --strip-components=1 to strip the first path and
33
- # then
34
- # use -C to cause the files to be extracted to {target_dir}. This ensures
35
- # that we always know where the files were extracted.
36
- runner('mkdir {target_dir}'.format(**vars()))
37
- try:
38
- getter = 'wget {url} -O -'
39
- extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
40
- cmd = ' | '.join((getter, extract))
41
- runner(cmd.format(compression=infer_compression(url), **vars()))
42
- with pushd(target_dir):
43
- yield target_dir
44
- finally:
45
- runner('rm -Rf {target_dir}'.format(**vars()))
46
-
47
-
48
- def infer_compression(url):
49
- """
50
- Given a URL or filename, infer the compression code for tar.
51
- """
52
- # cheat and just assume it's the last two characters
53
- compression_indicator = url[-2:]
54
- mapping = dict(gz='z', bz='j', xz='J')
55
- # Assume 'z' (gzip) if no match
56
- return mapping.get(compression_indicator, 'z')
57
-
58
-
59
- @contextlib.contextmanager
60
- def temp_dir(remover=shutil.rmtree):
61
- """
62
- Create a temporary directory context. Pass a custom remover
63
- to override the removal behavior.
64
- """
65
- temp_dir = tempfile.mkdtemp()
66
- try:
67
- yield temp_dir
68
- finally:
69
- remover(temp_dir)
70
-
71
-
72
- @contextlib.contextmanager
73
- def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
74
- """
75
- Check out the repo indicated by url.
76
-
77
- If dest_ctx is supplied, it should be a context manager
78
- to yield the target directory for the check out.
79
- """
80
- exe = 'git' if 'git' in url else 'hg'
81
- with dest_ctx() as repo_dir:
82
- cmd = [exe, 'clone', url, repo_dir]
83
- if branch:
84
- cmd.extend(['--branch', branch])
85
- devnull = open(os.path.devnull, 'w')
86
- stdout = devnull if quiet else None
87
- subprocess.check_call(cmd, stdout=stdout)
88
- yield repo_dir
89
-
90
-
91
- @contextlib.contextmanager
92
- def null():
93
- yield
94
-
95
-
96
- class ExceptionTrap:
97
- """
98
- A context manager that will catch certain exceptions and provide an
99
- indication they occurred.
100
-
101
- >>> with ExceptionTrap() as trap:
102
- ... raise Exception()
103
- >>> bool(trap)
104
- True
105
-
106
- >>> with ExceptionTrap() as trap:
107
- ... pass
108
- >>> bool(trap)
109
- False
110
-
111
- >>> with ExceptionTrap(ValueError) as trap:
112
- ... raise ValueError("1 + 1 is not 3")
113
- >>> bool(trap)
114
- True
115
-
116
- >>> with ExceptionTrap(ValueError) as trap:
117
- ... raise Exception()
118
- Traceback (most recent call last):
119
- ...
120
- Exception
121
-
122
- >>> bool(trap)
123
- False
124
- """
125
-
126
- exc_info = None, None, None
127
-
128
- def __init__(self, exceptions=(Exception,)):
129
- self.exceptions = exceptions
130
-
131
- def __enter__(self):
132
- return self
133
-
134
- @property
135
- def type(self):
136
- return self.exc_info[0]
137
-
138
- @property
139
- def value(self):
140
- return self.exc_info[1]
141
-
142
- @property
143
- def tb(self):
144
- return self.exc_info[2]
145
-
146
- def __exit__(self, *exc_info):
147
- type = exc_info[0]
148
- matches = type and issubclass(type, self.exceptions)
149
- if matches:
150
- self.exc_info = exc_info
151
- return matches
152
-
153
- def __bool__(self):
154
- return bool(self.type)
155
-
156
- def raises(self, func, *, _test=bool):
157
- """
158
- Wrap func and replace the result with the truth
159
- value of the trap (True if an exception occurred).
160
-
161
- First, give the decorator an alias to support Python 3.8
162
- Syntax.
163
-
164
- >>> raises = ExceptionTrap(ValueError).raises
165
-
166
- Now decorate a function that always fails.
167
-
168
- >>> @raises
169
- ... def fail():
170
- ... raise ValueError('failed')
171
- >>> fail()
172
- True
173
- """
174
-
175
- @functools.wraps(func)
176
- def wrapper(*args, **kwargs):
177
- with ExceptionTrap(self.exceptions) as trap:
178
- func(*args, **kwargs)
179
- return _test(trap)
180
-
181
- return wrapper
182
-
183
- def passes(self, func):
184
- """
185
- Wrap func and replace the result with the truth
186
- value of the trap (True if no exception).
187
-
188
- First, give the decorator an alias to support Python 3.8
189
- Syntax.
190
-
191
- >>> passes = ExceptionTrap(ValueError).passes
192
-
193
- Now decorate a function that always fails.
194
-
195
- >>> @passes
196
- ... def fail():
197
- ... raise ValueError('failed')
198
-
199
- >>> fail()
200
- False
201
- """
202
- return self.raises(func, _test=operator.not_)
203
-
204
-
205
- class suppress(contextlib.suppress, contextlib.ContextDecorator):
206
- """
207
- A version of contextlib.suppress with decorator support.
208
-
209
- >>> @suppress(KeyError)
210
- ... def key_error():
211
- ... {}['']
212
- >>> key_error()
213
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bishnupada/Fine-tuning-using-Hugging-face-transformers/app.py DELETED
File without changes
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/build.py DELETED
@@ -1,24 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from detectron2.utils.registry import Registry
3
-
4
- PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR")
5
- PROPOSAL_GENERATOR_REGISTRY.__doc__ = """
6
- Registry for proposal generator, which produces object proposals from feature maps.
7
-
8
- The registered object will be called with `obj(cfg, input_shape)`.
9
- The call should return a `nn.Module` object.
10
- """
11
-
12
- from . import rpn, rrpn # noqa F401 isort:skip
13
-
14
-
15
- def build_proposal_generator(cfg, input_shape):
16
- """
17
- Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`.
18
- The name can be "PrecomputedProposals" to use no proposal generator.
19
- """
20
- name = cfg.MODEL.PROPOSAL_GENERATOR.NAME
21
- if name == "PrecomputedProposals":
22
- return None
23
-
24
- return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/docs/_static/theme_overrides.css DELETED
@@ -1,11 +0,0 @@
1
- .wy-table-responsive table td,
2
- .wy-table-responsive table th {
3
- white-space: initial !important;
4
- }
5
- .rst-content table.docutils td {
6
- vertical-align: top !important;
7
- }
8
- div[class^='highlight'] pre {
9
- white-space: pre;
10
- white-space: pre-wrap;
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/find.h DELETED
@@ -1,219 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
-
30
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
31
- #include <thrust/system/cuda/config.h>
32
-
33
- #include <thrust/system/cuda/detail/execution_policy.h>
34
- #include <thrust/detail/minmax.h>
35
- #include <thrust/distance.h>
36
-
37
- namespace thrust
38
- {
39
- namespace cuda_cub {
40
-
41
- // XXX forward declare to circumvent circular depedency
42
- template <class Derived,
43
- class InputIt,
44
- class Predicate>
45
- InputIt __host__ __device__
46
- find_if(execution_policy<Derived>& policy,
47
- InputIt first,
48
- InputIt last,
49
- Predicate predicate);
50
-
51
- template <class Derived,
52
- class InputIt,
53
- class Predicate>
54
- InputIt __host__ __device__
55
- find_if_not(execution_policy<Derived>& policy,
56
- InputIt first,
57
- InputIt last,
58
- Predicate predicate);
59
-
60
- template <class Derived,
61
- class InputIt,
62
- class T>
63
- InputIt __host__ __device__
64
- find(execution_policy<Derived> &policy,
65
- InputIt first,
66
- InputIt last,
67
- T const& value);
68
-
69
- }; // namespace cuda_cub
70
- } // end namespace thrust
71
-
72
- #include <thrust/system/cuda/detail/reduce.h>
73
- #include <thrust/iterator/zip_iterator.h>
74
-
75
- namespace thrust
76
- {
77
- namespace cuda_cub {
78
-
79
- namespace __find_if {
80
-
81
- template <typename TupleType>
82
- struct functor
83
- {
84
- THRUST_DEVICE_FUNCTION TupleType
85
- operator()(const TupleType& lhs, const TupleType& rhs) const
86
- {
87
- // select the smallest index among true results
88
- if (thrust::get<0>(lhs) && thrust::get<0>(rhs))
89
- {
90
- return TupleType(true, (thrust::min)(thrust::get<1>(lhs), thrust::get<1>(rhs)));
91
- }
92
- else if (thrust::get<0>(lhs))
93
- {
94
- return lhs;
95
- }
96
- else
97
- {
98
- return rhs;
99
- }
100
- }
101
- };
102
- } // namespace __find_if
103
-
104
- template <class Derived,
105
- class InputIt,
106
- class Size,
107
- class Predicate>
108
- InputIt __host__ __device__
109
- find_if_n(execution_policy<Derived>& policy,
110
- InputIt first,
111
- Size num_items,
112
- Predicate predicate)
113
- {
114
- typedef typename thrust::tuple<bool,Size> result_type;
115
-
116
- // empty sequence
117
- if(num_items == 0) return first;
118
-
119
- // this implementation breaks up the sequence into separate intervals
120
- // in an attempt to early-out as soon as a value is found
121
- //
122
- // XXX compose find_if from a look-back prefix scan algorithm
123
- // and abort kernel when the first element is found
124
-
125
-
126
- // TODO incorporate sizeof(InputType) into interval_threshold and round to multiple of 32
127
- const Size interval_threshold = 1 << 20;
128
- const Size interval_size = (thrust::min)(interval_threshold, num_items);
129
-
130
- // force transform_iterator output to bool
131
- typedef transform_input_iterator_t<bool,
132
- InputIt,
133
- Predicate>
134
- XfrmIterator;
135
- typedef thrust::tuple<XfrmIterator,
136
- counting_iterator_t<Size> >
137
- IteratorTuple;
138
- typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
139
-
140
- IteratorTuple iter_tuple =
141
- thrust::make_tuple(XfrmIterator(first, predicate),
142
- counting_iterator_t<Size>(0));
143
-
144
- ZipIterator begin = thrust::make_zip_iterator(iter_tuple);
145
- ZipIterator end = begin + num_items;
146
-
147
- for (ZipIterator interval_begin = begin;
148
- interval_begin < end;
149
- interval_begin += interval_size)
150
- {
151
- ZipIterator interval_end = interval_begin + interval_size;
152
- if(end < interval_end)
153
- {
154
- interval_end = end;
155
- } // end if
156
-
157
- result_type result = reduce(policy,
158
- interval_begin,
159
- interval_end,
160
- result_type(false, interval_end - begin),
161
- __find_if::functor<result_type>());
162
-
163
- // see if we found something
164
- if(thrust::get<0>(result))
165
- {
166
- return first + thrust::get<1>(result);
167
- }
168
- }
169
-
170
- //nothing was found if we reach here...
171
- return first + num_items;
172
- }
173
-
174
- template <class Derived,
175
- class InputIt,
176
- class Predicate>
177
- InputIt __host__ __device__
178
- find_if(execution_policy<Derived>& policy,
179
- InputIt first,
180
- InputIt last,
181
- Predicate predicate)
182
- {
183
- return cuda_cub::find_if_n(policy, first, thrust::distance(first,last), predicate);
184
- }
185
-
186
- template <class Derived,
187
- class InputIt,
188
- class Predicate>
189
- InputIt __host__ __device__
190
- find_if_not(execution_policy<Derived>& policy,
191
- InputIt first,
192
- InputIt last,
193
- Predicate predicate)
194
- {
195
- return cuda_cub::find_if(policy, first, last, thrust::detail::not1(predicate));
196
- }
197
-
198
-
199
- template <class Derived,
200
- class InputIt,
201
- class T>
202
- InputIt __host__ __device__
203
- find(execution_policy<Derived> &policy,
204
- InputIt first,
205
- InputIt last,
206
- T const& value)
207
- {
208
- using thrust::placeholders::_1;
209
-
210
- return cuda_cub::find_if(policy,
211
- first,
212
- last,
213
- _1 == value);
214
- }
215
-
216
-
217
- } // namespace cuda_cub
218
- } // end namespace thrust
219
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/configs/_base_/datasets/parking_instance_coco.py DELETED
@@ -1,49 +0,0 @@
1
- dataset_type = 'ParkingCocoDataset'
2
- data_root = 'data/parking/'
3
- data_root_test = 'data/parking_highres/'
4
- img_norm_cfg = dict(
5
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
- train_pipeline = [
7
- dict(type='LoadImageFromFile'),
8
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
9
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
10
- dict(type='RandomFlip', flip_ratio=0.5),
11
- dict(type='Normalize', **img_norm_cfg),
12
- dict(type='Pad', size_divisor=32),
13
- dict(type='DefaultFormatBundle'),
14
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
15
- ]
16
- test_pipeline = [
17
- dict(type='LoadImageFromFile'),
18
- dict(
19
- type='MultiScaleFlipAug',
20
- img_scale=(1333, 800),
21
- flip=False,
22
- transforms=[
23
- dict(type='Resize', keep_ratio=True),
24
- dict(type='RandomFlip'),
25
- dict(type='Normalize', **img_norm_cfg),
26
- dict(type='Pad', size_divisor=32),
27
- dict(type='ImageToTensor', keys=['img']),
28
- dict(type='Collect', keys=['img']),
29
- ])
30
- ]
31
- data = dict(
32
- samples_per_gpu=6,
33
- workers_per_gpu=6,
34
- train=dict(
35
- type=dataset_type,
36
- ann_file=data_root + 'GT_data/',
37
- img_prefix=data_root + 'images/',
38
- pipeline=train_pipeline),
39
- val=dict(
40
- type=dataset_type,
41
- ann_file=data_root_test + 'GT_data/',
42
- img_prefix=data_root_test + 'images',
43
- pipeline=test_pipeline),
44
- test=dict(
45
- type=dataset_type,
46
- ann_file=data_root_test + 'GT_data/',
47
- img_prefix=data_root_test + 'images',
48
- pipeline=test_pipeline))
49
- evaluation = dict(metric=['bbox', 'segm'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/layers/shape_spec.py DELETED
@@ -1,20 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
- from collections import namedtuple
4
-
5
-
6
- class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
7
- """
8
- A simple structure that contains basic shape specification about a tensor.
9
- It is often used as the auxiliary inputs/outputs of models,
10
- to complement the lack of shape inference ability among pytorch modules.
11
-
12
- Attributes:
13
- channels:
14
- height:
15
- width:
16
- stride:
17
- """
18
-
19
- def __new__(cls, channels=None, height=None, width=None, stride=None):
20
- return super().__new__(cls, channels, height, width, stride)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cambino/dog-classifier-gradio/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Dog Classifier
3
- emoji: 🐠
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.1.3
8
- app_file: app.py
9
- pinned: false
10
- license: afl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAA/prompts/ksdd2_parameters.py DELETED
@@ -1,11 +0,0 @@
1
- manual_prompts = {
2
- 'ksdd2': [
3
- ['black hole.', 'ksdd2'],
4
- ['defect.', 'ksdd2'],
5
- ],
6
-
7
- }
8
-
9
- property_prompts = {
10
- 'ksdd2': 'the image of ksdd2 have 1 dissimilar ksdd2, with a maximum of 5 anomaly. The anomaly would not exceed 0.9 object area. ',
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cat125/text-generator-v3/files.py DELETED
@@ -1,7 +0,0 @@
1
- def read_file(filename):
2
- with open(filename, encoding="utf8") as f:
3
- return f.read()
4
-
5
- def read_lines(filename):
6
- with open(filename, encoding="utf8") as f:
7
- return f.readlines()
 
 
 
 
 
 
 
 
spaces/Covert1107/sd-diffusers-webui/modules/prompt_parser.py DELETED
@@ -1,391 +0,0 @@
1
-
2
- import re
3
- import math
4
- import numpy as np
5
- import torch
6
-
7
- # Code from https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/8e2aeee4a127b295bfc880800e4a312e0f049b85, modified.
8
-
9
- class PromptChunk:
10
- """
11
- This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
12
- If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
13
- Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
14
- so just 75 tokens from prompt.
15
- """
16
-
17
- def __init__(self):
18
- self.tokens = []
19
- self.multipliers = []
20
- self.fixes = []
21
-
22
-
23
- class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
24
- """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
25
- have unlimited prompt length and assign weights to tokens in prompt.
26
- """
27
-
28
- def __init__(self, text_encoder, enable_emphasis=True):
29
- super().__init__()
30
-
31
- self.device = lambda: text_encoder.device
32
- self.enable_emphasis = enable_emphasis
33
- """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
34
- depending on model."""
35
-
36
- self.chunk_length = 75
37
-
38
- def empty_chunk(self):
39
- """creates an empty PromptChunk and returns it"""
40
-
41
- chunk = PromptChunk()
42
- chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
43
- chunk.multipliers = [1.0] * (self.chunk_length + 2)
44
- return chunk
45
-
46
- def get_target_prompt_token_count(self, token_count):
47
- """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
48
-
49
- return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
50
-
51
- def tokenize_line(self, line):
52
- """
53
- this transforms a single prompt into a list of PromptChunk objects - as many as needed to
54
- represent the prompt.
55
- Returns the list and the total number of tokens in the prompt.
56
- """
57
-
58
- if self.enable_emphasis:
59
- parsed = parse_prompt_attention(line)
60
- else:
61
- parsed = [[line, 1.0]]
62
-
63
- tokenized = self.tokenize([text for text, _ in parsed])
64
-
65
- chunks = []
66
- chunk = PromptChunk()
67
- token_count = 0
68
- last_comma = -1
69
-
70
- def next_chunk(is_last=False):
71
- """puts current chunk into the list of results and produces the next one - empty;
72
- if is_last is true, tokens <end-of-text> tokens at the end won't add to token_count"""
73
- nonlocal token_count
74
- nonlocal last_comma
75
- nonlocal chunk
76
-
77
- if is_last:
78
- token_count += len(chunk.tokens)
79
- else:
80
- token_count += self.chunk_length
81
-
82
- to_add = self.chunk_length - len(chunk.tokens)
83
- if to_add > 0:
84
- chunk.tokens += [self.id_end] * to_add
85
- chunk.multipliers += [1.0] * to_add
86
-
87
- chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
88
- chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
89
-
90
- last_comma = -1
91
- chunks.append(chunk)
92
- chunk = PromptChunk()
93
-
94
- comma_padding_backtrack = 20 # default value in https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/shared.py#L410
95
- for tokens, (text, weight) in zip(tokenized, parsed):
96
- if text == "BREAK" and weight == -1:
97
- next_chunk()
98
- continue
99
-
100
- position = 0
101
- while position < len(tokens):
102
- token = tokens[position]
103
-
104
- if token == self.comma_token:
105
- last_comma = len(chunk.tokens)
106
-
107
- # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
108
- # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
109
- elif (
110
- comma_padding_backtrack != 0
111
- and len(chunk.tokens) == self.chunk_length
112
- and last_comma != -1
113
- and len(chunk.tokens) - last_comma <= comma_padding_backtrack
114
- ):
115
- break_location = last_comma + 1
116
-
117
- reloc_tokens = chunk.tokens[break_location:]
118
- reloc_mults = chunk.multipliers[break_location:]
119
-
120
- chunk.tokens = chunk.tokens[:break_location]
121
- chunk.multipliers = chunk.multipliers[:break_location]
122
-
123
- next_chunk()
124
- chunk.tokens = reloc_tokens
125
- chunk.multipliers = reloc_mults
126
-
127
- if len(chunk.tokens) == self.chunk_length:
128
- next_chunk()
129
-
130
- chunk.tokens.append(token)
131
- chunk.multipliers.append(weight)
132
- position += 1
133
-
134
- if len(chunk.tokens) > 0 or len(chunks) == 0:
135
- next_chunk(is_last=True)
136
-
137
- return chunks, token_count
138
-
139
- def process_texts(self, texts):
140
- """
141
- Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
142
- length, in tokens, of all texts.
143
- """
144
-
145
- token_count = 0
146
-
147
- cache = {}
148
- batch_chunks = []
149
- for line in texts:
150
- if line in cache:
151
- chunks = cache[line]
152
- else:
153
- chunks, current_token_count = self.tokenize_line(line)
154
- token_count = max(current_token_count, token_count)
155
-
156
- cache[line] = chunks
157
-
158
- batch_chunks.append(chunks)
159
-
160
- return batch_chunks, token_count
161
-
162
- def forward(self, texts):
163
- """
164
- Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
165
- Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
166
- be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024.
167
- An example shape returned by this function can be: (2, 77, 768).
168
- Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
169
- is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
170
- """
171
-
172
- batch_chunks, token_count = self.process_texts(texts)
173
- chunk_count = max([len(x) for x in batch_chunks])
174
-
175
- zs = []
176
- ts = []
177
- for i in range(chunk_count):
178
- batch_chunk = [
179
- chunks[i] if i < len(chunks) else self.empty_chunk()
180
- for chunks in batch_chunks
181
- ]
182
-
183
- tokens = [x.tokens for x in batch_chunk]
184
- multipliers = [x.multipliers for x in batch_chunk]
185
- # self.embeddings.fixes = [x.fixes for x in batch_chunk]
186
-
187
- # for fixes in self.embeddings.fixes:
188
- # for position, embedding in fixes:
189
- # used_embeddings[embedding.name] = embedding
190
-
191
- z = self.process_tokens(tokens, multipliers)
192
- zs.append(z)
193
- ts.append(tokens)
194
-
195
- return np.hstack(ts), torch.hstack(zs)
196
-
197
- def process_tokens(self, remade_batch_tokens, batch_multipliers):
198
- """
199
- sends one single prompt chunk to be encoded by transformers neural network.
200
- remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
201
- there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
202
- Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
203
- corresponds to one token.
204
- """
205
- tokens = torch.asarray(remade_batch_tokens).to(self.device())
206
-
207
- # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
208
- if self.id_end != self.id_pad:
209
- for batch_pos in range(len(remade_batch_tokens)):
210
- index = remade_batch_tokens[batch_pos].index(self.id_end)
211
- tokens[batch_pos, index + 1 : tokens.shape[1]] = self.id_pad
212
-
213
- z = self.encode_with_transformers(tokens)
214
-
215
- # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
216
- batch_multipliers = torch.asarray(batch_multipliers).to(self.device())
217
- original_mean = z.mean()
218
- z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
219
- new_mean = z.mean()
220
- z = z * (original_mean / new_mean)
221
-
222
- return z
223
-
224
-
225
- class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
226
- def __init__(self, tokenizer, text_encoder):
227
- super().__init__(text_encoder)
228
- self.tokenizer = tokenizer
229
- self.text_encoder = text_encoder
230
-
231
- vocab = self.tokenizer.get_vocab()
232
-
233
- self.comma_token = vocab.get(",</w>", None)
234
-
235
- self.token_mults = {}
236
- tokens_with_parens = [
237
- (k, v)
238
- for k, v in vocab.items()
239
- if "(" in k or ")" in k or "[" in k or "]" in k
240
- ]
241
- for text, ident in tokens_with_parens:
242
- mult = 1.0
243
- for c in text:
244
- if c == "[":
245
- mult /= 1.1
246
- if c == "]":
247
- mult *= 1.1
248
- if c == "(":
249
- mult *= 1.1
250
- if c == ")":
251
- mult /= 1.1
252
-
253
- if mult != 1.0:
254
- self.token_mults[ident] = mult
255
-
256
- self.id_start = self.tokenizer.bos_token_id
257
- self.id_end = self.tokenizer.eos_token_id
258
- self.id_pad = self.id_end
259
-
260
- def tokenize(self, texts):
261
- tokenized = self.tokenizer(
262
- texts, truncation=False, add_special_tokens=False
263
- )["input_ids"]
264
-
265
- return tokenized
266
-
267
- def encode_with_transformers(self, tokens):
268
- CLIP_stop_at_last_layers = 1
269
- tokens = tokens.to(self.text_encoder.device)
270
- outputs = self.text_encoder(tokens, output_hidden_states=True)
271
-
272
- if CLIP_stop_at_last_layers > 1:
273
- z = outputs.hidden_states[-CLIP_stop_at_last_layers]
274
- z = self.text_encoder.text_model.final_layer_norm(z)
275
- else:
276
- z = outputs.last_hidden_state
277
-
278
- return z
279
-
280
-
281
- re_attention = re.compile(
282
- r"""
283
- \\\(|
284
- \\\)|
285
- \\\[|
286
- \\]|
287
- \\\\|
288
- \\|
289
- \(|
290
- \[|
291
- :([+-]?[.\d]+)\)|
292
- \)|
293
- ]|
294
- [^\\()\[\]:]+|
295
- :
296
- """,
297
- re.X,
298
- )
299
-
300
- re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
301
-
302
-
303
- def parse_prompt_attention(text):
304
- """
305
- Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
306
- Accepted tokens are:
307
- (abc) - increases attention to abc by a multiplier of 1.1
308
- (abc:3.12) - increases attention to abc by a multiplier of 3.12
309
- [abc] - decreases attention to abc by a multiplier of 1.1
310
- \( - literal character '('
311
- \[ - literal character '['
312
- \) - literal character ')'
313
- \] - literal character ']'
314
- \\ - literal character '\'
315
- anything else - just text
316
-
317
- >>> parse_prompt_attention('normal text')
318
- [['normal text', 1.0]]
319
- >>> parse_prompt_attention('an (important) word')
320
- [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
321
- >>> parse_prompt_attention('(unbalanced')
322
- [['unbalanced', 1.1]]
323
- >>> parse_prompt_attention('\(literal\]')
324
- [['(literal]', 1.0]]
325
- >>> parse_prompt_attention('(unnecessary)(parens)')
326
- [['unnecessaryparens', 1.1]]
327
- >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
328
- [['a ', 1.0],
329
- ['house', 1.5730000000000004],
330
- [' ', 1.1],
331
- ['on', 1.0],
332
- [' a ', 1.1],
333
- ['hill', 0.55],
334
- [', sun, ', 1.1],
335
- ['sky', 1.4641000000000006],
336
- ['.', 1.1]]
337
- """
338
-
339
- res = []
340
- round_brackets = []
341
- square_brackets = []
342
-
343
- round_bracket_multiplier = 1.1
344
- square_bracket_multiplier = 1 / 1.1
345
-
346
- def multiply_range(start_position, multiplier):
347
- for p in range(start_position, len(res)):
348
- res[p][1] *= multiplier
349
-
350
- for m in re_attention.finditer(text):
351
- text = m.group(0)
352
- weight = m.group(1)
353
-
354
- if text.startswith("\\"):
355
- res.append([text[1:], 1.0])
356
- elif text == "(":
357
- round_brackets.append(len(res))
358
- elif text == "[":
359
- square_brackets.append(len(res))
360
- elif weight is not None and len(round_brackets) > 0:
361
- multiply_range(round_brackets.pop(), float(weight))
362
- elif text == ")" and len(round_brackets) > 0:
363
- multiply_range(round_brackets.pop(), round_bracket_multiplier)
364
- elif text == "]" and len(square_brackets) > 0:
365
- multiply_range(square_brackets.pop(), square_bracket_multiplier)
366
- else:
367
- parts = re.split(re_break, text)
368
- for i, part in enumerate(parts):
369
- if i > 0:
370
- res.append(["BREAK", -1])
371
- res.append([part, 1.0])
372
-
373
- for pos in round_brackets:
374
- multiply_range(pos, round_bracket_multiplier)
375
-
376
- for pos in square_brackets:
377
- multiply_range(pos, square_bracket_multiplier)
378
-
379
- if len(res) == 0:
380
- res = [["", 1.0]]
381
-
382
- # merge runs of identical weights
383
- i = 0
384
- while i + 1 < len(res):
385
- if res[i][1] == res[i + 1][1]:
386
- res[i][0] += res[i + 1][0]
387
- res.pop(i + 1)
388
- else:
389
- i += 1
390
-
391
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_subprocesses.py DELETED
@@ -1,79 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from abc import abstractmethod
4
- from signal import Signals
5
-
6
- from ._resources import AsyncResource
7
- from ._streams import ByteReceiveStream, ByteSendStream
8
-
9
-
10
- class Process(AsyncResource):
11
- """An asynchronous version of :class:`subprocess.Popen`."""
12
-
13
- @abstractmethod
14
- async def wait(self) -> int:
15
- """
16
- Wait until the process exits.
17
-
18
- :return: the exit code of the process
19
- """
20
-
21
- @abstractmethod
22
- def terminate(self) -> None:
23
- """
24
- Terminates the process, gracefully if possible.
25
-
26
- On Windows, this calls ``TerminateProcess()``.
27
- On POSIX systems, this sends ``SIGTERM`` to the process.
28
-
29
- .. seealso:: :meth:`subprocess.Popen.terminate`
30
- """
31
-
32
- @abstractmethod
33
- def kill(self) -> None:
34
- """
35
- Kills the process.
36
-
37
- On Windows, this calls ``TerminateProcess()``.
38
- On POSIX systems, this sends ``SIGKILL`` to the process.
39
-
40
- .. seealso:: :meth:`subprocess.Popen.kill`
41
- """
42
-
43
- @abstractmethod
44
- def send_signal(self, signal: Signals) -> None:
45
- """
46
- Send a signal to the subprocess.
47
-
48
- .. seealso:: :meth:`subprocess.Popen.send_signal`
49
-
50
- :param signal: the signal number (e.g. :data:`signal.SIGHUP`)
51
- """
52
-
53
- @property
54
- @abstractmethod
55
- def pid(self) -> int:
56
- """The process ID of the process."""
57
-
58
- @property
59
- @abstractmethod
60
- def returncode(self) -> int | None:
61
- """
62
- The return code of the process. If the process has not yet terminated, this will be
63
- ``None``.
64
- """
65
-
66
- @property
67
- @abstractmethod
68
- def stdin(self) -> ByteSendStream | None:
69
- """The stream for the standard input of the process."""
70
-
71
- @property
72
- @abstractmethod
73
- def stdout(self) -> ByteReceiveStream | None:
74
- """The stream for the standard output of the process."""
75
-
76
- @property
77
- @abstractmethod
78
- def stderr(self) -> ByteReceiveStream | None:
79
- """The stream for the standard error output of the process."""