parquet-converter commited on
Commit
e15815a
·
1 Parent(s): e4b3b51

Update parquet files (step 57 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/17TheWord/RealESRGAN/realesrgan/__init__.py +0 -6
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab Setup Free ((BETTER)).md +0 -38
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Enscape 3D Full Version Cracked from FileCR.md +0 -40
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans MOD APK 2022 Download and Install the Latest Version with Unlimited Everything.md +0 -91
  5. spaces/1phancelerku/anime-remove-background/Blue Is The Colour The Ultimate Chelsea Song Download Guide.md +0 -54
  6. spaces/1phancelerku/anime-remove-background/CFL Football 99 The Ultimate Canadian Gridiron Simulation.md +0 -85
  7. spaces/1phancelerku/anime-remove-background/Delhi Blue App How to Use the First Ever Common Mobility App in Delhi.md +0 -101
  8. spaces/1phancelerku/anime-remove-background/Download Mortal Kombat Tamil Dubbed Movie in HD Quality from Isaimini.md +0 -147
  9. spaces/2023Liu2023/bingo/src/components/ui/button.tsx +0 -57
  10. spaces/232labs/VToonify/vtoonify/model/raft/core/datasets.py +0 -235
  11. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py +0 -25
  12. spaces/52Hz/CMFNet_deblurring/model/block.py +0 -146
  13. spaces/801artistry/RVC801/i18n.py +0 -43
  14. spaces/A-Celsius/ADR_Predictor/app.py +0 -103
  15. spaces/A00001/bingothoo/cloudflare/worker.js +0 -18
  16. spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/layers_123821KB.py +0 -118
  17. spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/losses.py +0 -30
  18. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/wavenet.py +0 -97
  19. spaces/AILab-CVC/SEED-LLaMA/scripts/start_frontend_14b.sh +0 -1
  20. spaces/AIWaves/SOP_Generation-single/Component/PromptComponent.py +0 -126
  21. spaces/AchyuthGamer/OpenGPT/g4f/Provider/unfinished/Komo.py +0 -44
  22. spaces/AgentVerse/agentVerse/scripts/evaluate_math.py +0 -93
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Factory.js +0 -13
  24. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetExpandedChildWidth.js +0 -6
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/kandinsky_v22.md +0 -357
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/wildcard_stable_diffusion.py +0 -418
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +0 -429
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py +0 -290
  29. spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r101_fpn_1x_coco.py +0 -2
  30. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py +0 -2
  31. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/README.md +0 -83
  32. spaces/AnnasBlackHat/Image-Downloader/gofile.py +0 -25
  33. spaces/Anonymous-sub/Rerender/ControlNet/annotator/openpose/util.py +0 -164
  34. spaces/Apex-X/Tm/roop/processors/frame/core.py +0 -88
  35. spaces/Apex-X/nono/roop/face_reference.py +0 -21
  36. spaces/Arnaudding001/OpenAI_whisperLive/utils.py +0 -115
  37. spaces/Arnx/MusicGenXvAKN/audiocraft/modules/lstm.py +0 -25
  38. spaces/Artples/Chat-with-Llama-2-70b/README.md +0 -13
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/utf8prober.py +0 -82
  40. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/diagnose.py +0 -37
  41. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/palette.py +0 -100
  42. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/_macos_compat.py +0 -12
  43. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/build_py.py +0 -368
  44. spaces/BIASLab/sars-cov-2-classification-fcgr/src/utils.py +0 -41
  45. spaces/Banbri/zcvzcv/src/types.ts +0 -130
  46. spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/pages/03_📝_Upload_Video_File_and_Transcript.py +0 -130
  47. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/httpsession.py +0 -510
  48. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/network/session.py +0 -517
  49. spaces/Boilin/URetinex-Net/network/architecture.py +0 -41
  50. spaces/CAMP-ViL/Xplainer/model.py +0 -158
spaces/17TheWord/RealESRGAN/realesrgan/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- # flake8: noqa
2
- from .archs import *
3
- from .data import *
4
- from .models import *
5
- from .utils import *
6
- #from .version import *
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab Setup Free ((BETTER)).md DELETED
@@ -1,38 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab for Free</h1>
3
-
4
- <p>Daum PotPlayer is a versatile media player that supports various formats and codecs. It has a sleek interface, advanced features and high performance. If you are looking for a free and portable media player that can run on any Windows system, you should try Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab.</p>
5
-
6
- <p>This version of Daum PotPlayer is portable, which means you don't need to install it on your computer. You can simply download it and run it from a USB flash drive or any other removable device. This way, you can enjoy your favorite media files on any computer without leaving any traces behind.</p>
7
- <h2>Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab Setup Free</h2><br /><p><b><b>Download File</b> &#10031;&#10031;&#10031; <a href="https://byltly.com/2uKA1K">https://byltly.com/2uKA1K</a></b></p><br /><br />
8
-
9
- <p>Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab is also stable, which means it has been tested and verified to work without any errors or bugs. It is compatible with both 32-bit and 64-bit Windows systems, so you don't need to worry about compatibility issues.</p>
10
-
11
- <p>To download and install Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab for free, follow these simple steps:</p>
12
-
13
- <ol>
14
- <li>Click on this link to download the zip file: <a href="https://www.file-upload.com/7f8c0z0n9y2j">https://www.file-upload.com/7f8c0z0n9y2j</a></li>
15
- <li>Extract the zip file to a folder of your choice.</li>
16
- <li>Open the folder and double-click on the file named "PotPlayerMini.exe" to launch the media player.</li>
17
- <li>Enjoy your media files with Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab.</li>
18
- </ol>
19
-
20
- <p>That's it! You have successfully downloaded and installed Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab for free. If you like this media player, you can also check out the official website of Daum PotPlayer for more information and updates: <a href="https://potplayer.daum.net/">https://potplayer.daum.net/</a></p>
21
-
22
- <p>Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab has many features that make it a powerful and convenient media player. Here are some of the features that you can enjoy with this media player:</p>
23
-
24
- <ul>
25
- <li>It supports various formats and codecs, including AVI, MP4, MKV, FLV, WMV, MOV, MP3, AAC, FLAC, OGG, WMA and more.</li>
26
- <li>It has a built-in subtitle finder that can automatically search and download subtitles for your media files.</li>
27
- <li>It has a screen capture function that can capture screenshots or record videos of your media playback.</li>
28
- <li>It has a 3D mode that can convert 2D videos to 3D videos or play 3D videos with various options.</li>
29
- <li>It has a playlist function that can create and manage playlists of your media files.</li>
30
- <li>It has a skin function that can change the appearance and theme of the media player.</li>
31
- <li>It has a hotkey function that can assign keyboard shortcuts to various commands and functions.</li>
32
- <li>It has a preference function that can customize the settings and options of the media player.</li>
33
- </ul>
34
-
35
- <p>With Daum PotPlayer 1.6.52515 Stable Portable (x86 X64) By SamLab, you can enjoy your media files with high quality and convenience. It is a free and portable media player that you can take anywhere and use anytime. Download it now and see for yourself how amazing it is.</p>
36
- <p></p> 81aa517590<br />
37
- <br />
38
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Enscape 3D Full Version Cracked from FileCR.md DELETED
@@ -1,40 +0,0 @@
1
- <br />
2
- <h1>Enscape Download Cracked: How to Get Enscape 3D for Free</h1>
3
- <p>Enscape 3D is a powerful and easy-to-use real-time rendering and virtual reality plugin for various CAD software such as Revit, SketchUp, Rhino, and ArchiCAD. It allows you to create stunning and realistic 3D visualizations of your projects with just one click. You can also explore your designs in immersive VR using devices such as Oculus Rift, HTC Vive, and Windows Mixed Reality. </p>
4
- <p>However, Enscape 3D is not a free software. It requires a license to use its full features and functions. The official price of Enscape 3D is $58.99 per month or $469.00 per year for a single user. If you want to use it for multiple users or projects, you will need to pay more.</p>
5
- <h2>enscape download cracked</h2><br /><p><b><b>DOWNLOAD</b> &rarr;&rarr;&rarr; <a href="https://byltly.com/2uKxvK">https://byltly.com/2uKxvK</a></b></p><br /><br />
6
- <p>But what if you want to use Enscape 3D for free? Is there a way to download Enscape cracked version without paying anything? The answer is yes, but it comes with some risks and drawbacks. In this article, we will show you how to download Enscape cracked version from a website called FileCR, and what are the pros and cons of using it.</p>
7
- <h2>How to Download Enscape Cracked Version from FileCR</h2>
8
- <p>FileCR is a website that offers free downloads of various software, including Enscape 3D. It claims that the software is cracked, meaning that it has been modified to bypass the license verification and activation process. However, this also means that the software may not be safe or reliable, as it may contain viruses, malware, or other harmful code.</p>
9
- <p>If you still want to download Enscape cracked version from FileCR, you can follow these steps:</p>
10
- <ol>
11
- <li>Go to the FileCR website and search for Enscape 3D. You can also use this link to go directly to the download page.</li>
12
- <li>Click on the download button and wait for the file to be downloaded on your PC. The file size is about 122 MB.</li>
13
- <li>Extract the file using WinRAR or any other software that can unzip files.</li>
14
- <li>Open the extracted folder and run the setup.exe file as administrator.</li>
15
- <li>Follow the instructions on the screen to install Enscape 3D on your PC.</li>
16
- <li>Once the installation is complete, open the crack folder and copy the patch file.</li>
17
- <li>Paste the patch file into the installation directory of Enscape 3D (usually C:\Program Files\Enscape).</li>
18
- <li>Run the patch file as administrator and click on the patch button.</li>
19
- <li>Enjoy using Enscape 3D for free.</li>
20
- </ol>
21
- <h2>Pros and Cons of Using Enscape Cracked Version</h2>
22
- <p>Using Enscape cracked version may seem tempting, but it also has some disadvantages that you should be aware of. Here are some of the pros and cons of using Enscape cracked version:</p>
23
- <p></p>
24
- <h3>Pros</h3>
25
- <ul>
26
- <li>You can use Enscape 3D for free without paying for a license.</li>
27
- <li>You can access all the features and functions of Enscape 3D without any limitations.</li>
28
- <li>You can create high-quality 3D renderings and VR experiences with Enscape 3D.</li>
29
- </ul>
30
- <h3>Cons</h3>
31
- <ul>
32
- <li>You may expose your PC to viruses, malware, or other harmful code that may damage your system or compromise your data.</li>
33
- <li>You may violate the intellectual property rights of Enscape GmbH, the developer of Enscape 3D, and face legal consequences.</li>
34
- <li>You may not receive any updates, bug fixes, or technical support from Enscape GmbH.</li>
35
- <li>You may experience errors, crashes, or performance issues with Enscape 3D.</li>
36
- </ul>
37
- <h2>Conclusion</h2>
38
- <p>Enscape 3D is a great software for creating realistic 3D visualizations and VR experiences of your projects. However, it is not a free software and requires a license to use. If you want to use it for free, you</p> ddb901b051<br />
39
- <br />
40
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans MOD APK 2022 Download and Install the Latest Version with Unlimited Everything.md DELETED
@@ -1,91 +0,0 @@
1
- <br />
2
- <h1>Clash of Clans Mod APK Download Unlimited Everything 2022 New Version</h1>
3
- <p>Are you a fan of strategy games that challenge your mind and skills? Do you love to build your own village and defend it from enemies? Do you enjoy joining forces with other players and competing for glory and rewards? If you answered yes to any of these questions, then you must have heard of Clash of Clans, one of the most popular and addictive games in the world. But what if we told you that you can make your gaming experience even better with Clash of Clans Mod APK, a modified version of the original game that gives you unlimited everything? Sounds too good to be true, right? Well, in this article, we will tell you everything you need to know about Clash of Clans Mod APK, how to download and install it on your Android device, and what benefits you can get from using it. So, without further ado, let's get started!</p>
4
- <h2>What is Clash of Clans?</h2>
5
- <h3>A brief introduction to the game and its features</h3>
6
- <p>Clash of Clans is a freemium mobile strategy game developed and published by Supercell, a Finnish game company. The game was released for iOS devices in August 2012 and for Android devices in October 2013. Since then, it has become one of the most downloaded and played games in the world, with over 500 million downloads on Google Play alone.</p>
7
- <h2>clash of clans mod apk download unlimited everything 2022 new version</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash; <a href="https://urlin.us/2uSUtz">https://urlin.us/2uSUtz</a></b></p><br /><br />
8
- <p>The game is set in a fantasy world where you are the chief of a village. Your main goal is to build and upgrade your village, train and upgrade your troops, and attack other players' villages to loot their resources. You can also join or create a clan with other players and participate in clan wars, clan games, and clan leagues. The game features various types of buildings, troops, spells, heroes, and items that you can use to enhance your strategy and gameplay.</p>
9
- <h3>Why do people love Clash of Clans?</h3>
10
- <h4>The thrill of strategy and combat</h4>
11
- <p>One of the main reasons why people love Clash of Clans is because it offers a thrilling and satisfying experience of strategy and combat. You have to plan your attacks carefully, choosing the right troops, spells, heroes, and strategies for each situation. You also have to defend your village from enemy attacks, placing your buildings, traps, walls, and defenses wisely. The game tests your skills, creativity, and decision-making abilities in every battle.</p>
12
- <h4>The joy of building and customizing your own village</h4>
13
- <p>Another reason why people love Clash of Clans is because it allows them to build and customize their own village according to their preferences. You can choose from different themes, layouts, designs, and decorations for your village. You can also upgrade your buildings, troops, spells, heroes, and items to make them more powerful and efficient. You can express your personality and style through your village and impress your friends and foes.</p>
14
- <h4>The fun of joining and competing with other clans</h4>
15
- <p>A third reason why people love Clash of Clans is because it gives them the opportunity to join and compete with other clans from around the world. You can chat, donate, request, and share tips with your clanmates. You can also challenge them to friendly battles and practice your skills. You can also participate in clan wars, clan games, and clan leagues, where you can cooperate with your clanmates to win trophies, rewards, and glory. You can also compare your progress and achievements with other players on the global and local leaderboards.</p>
16
- <h2>What is Clash of Clans Mod APK?</h2>
17
- <h3>A modified version of the original game that offers unlimited resources and features</h3>
18
- <p>Clash of Clans Mod APK is a modified version of the original game that offers unlimited resources and features that are not available in the official version. It is created by third-party developers who modify the game files to unlock and enhance the game's functionality. Clash of Clans Mod APK is not endorsed or affiliated with Supercell, the original developer of the game.</p>
19
- <p>Clash of Clans Mod APK allows you to enjoy the game without any limitations or restrictions. You can get unlimited gems, gold, elixir, and dark elixir to upgrade your troops, buildings, and spells. You can also get unlimited access to all the heroes, troops, and spells in the game. You can also create and join any clan you want, without any requirements or limitations. You can also play the game without any ads, bans, or errors.</p>
20
- <h3>How to download and install Clash of Clans Mod APK on your Android device</h3>
21
- <p>If you want to download and install Clash of Clans Mod APK on your Android device, you need to follow these simple steps:</p>
22
- <h4>Step 1: Enable unknown sources on your device settings</h4>
23
- <p>Before you can install Clash of Clans Mod APK on your device, you need to enable unknown sources on your device settings. This will allow you to install apps that are not downloaded from the Google Play Store. To do this, go to your device settings > security > unknown sources > enable.</p>
24
- <p>clash of clans hack apk unlimited gems gold elixir 2022<br />
25
- coc mod apk download latest version 2022 with unlimited troops<br />
26
- clash of clans modded apk free download for android 2022<br />
27
- how to download clash of clans mod apk with unlimited resources 2022<br />
28
- clash of clans cheat apk 2022 no root required<br />
29
- coc hack apk 2022 online generator<br />
30
- clash of clans mod menu apk download 2022<br />
31
- coc mod apk 2022 private server with unlimited money<br />
32
- clash of clans cracked apk 2022 working<br />
33
- coc hack version download 2022 without survey<br />
34
- clash of clans unlimited everything apk 2022 offline<br />
35
- coc mod apk 2022 update with new features<br />
36
- clash of clans hack tool apk download 2022<br />
37
- coc modded apk 2022 anti ban<br />
38
- clash of clans hack apk 2022 mediafire link<br />
39
- coc hack apk download 2022 no human verification<br />
40
- clash of clans mod apk 2022 mega mod<br />
41
- coc mod apk 2022 unlimited gems and coins<br />
42
- clash of clans hack apk download 2022 for pc<br />
43
- coc mod apk 2022 latest version android 1<br />
44
- clash of clans modded apk 2022 unlimited dark elixir<br />
45
- coc hack apk 2022 direct download link<br />
46
- clash of clans cheat engine apk 2022<br />
47
- coc mod apk download 2022 revdl<br />
48
- clash of clans hacked version download 2022 apkpure<br />
49
- coc mod apk 2022 unlimited everything ihackedit<br />
50
- clash of clans hack app download 2022 for ios<br />
51
- coc mod apk download 2022 rexdl<br />
52
- clash of clans hack version download 2022 uptodown<br />
53
- coc mod apk download 2022 plenixclash<br />
54
- clash of clans hack game download 2022 for laptop<br />
55
- coc mod apk download 2022 fhx server<br />
56
- clash of clans hack version download 2022 malavida<br />
57
- coc mod apk download 2022 nulls royale<br />
58
- clash of clans hack version download 2022 happymod<br />
59
- coc mod apk download 2022 magic s1 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30</p>
60
- <h4>Step 2: Download the Clash of Clans Mod APK file from a trusted source</h4>
61
- <p>Next, you need to download the Clash of Clans Mod APK file from a trusted source. There are many websites that offer Clash of Clans Mod APK files, but not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and choose a reputable website that provides authentic and updated Clash of Clans Mod APK files. One such website is [clashofclansmodapk.net], where you can find the latest version of Clash of Clans Mod APK for free.</p>
62
- <h4>Step 3: Locate and install the APK file on your device</h4>
63
- <p>After you have downloaded the Clash of Clans Mod APK file from a trusted source, you need to locate and install it on your device. To do this, go to your device file manager > downloads > find the Clash of Clans Mod APK file > tap on it > install.</p>
64
- <h4>Step 4: Launch the game and enjoy the unlimited everything</h4>
65
- <p>Finally, you can launch the game and enjoy the unlimited everything that Clash of Clans Mod APK offers. You will see that you have unlimited gems, gold, elixir, and dark elixir in your account. You will also see that you have access to all the heroes, troops, and spells in the game. You will also be able to create and join any clan you want. You will also be able to play the game without any ads, bans, or errors.</p>
66
- <h2>What are the benefits of using Clash of Clans Mod APK?</h2>
67
- <h3>Unlimited gems, gold, elixir, and dark elixir to upgrade your troops, buildings, and spells</h3>
68
- <p>One of the main benefits of using Clash of Clans Mod APK is that you can get unlimited gems, gold, elixir, and dark elixir to upgrade your troops , buildings, and spells. These resources are essential for improving your village and army, as they allow you to unlock new levels, abilities, and features. With unlimited resources, you don't have to worry about running out of them or spending real money to buy them. You can upgrade your troops, buildings, and spells as much as you want, without any waiting time or cost.</p>
69
- <h3>Unlimited access to all the heroes, troops, and spells in the game</h3>
70
- <p>Another benefit of using Clash of Clans Mod APK is that you can get unlimited access to all the heroes, troops, and spells in the game. Heroes are powerful units that have special abilities and can be used in both offense and defense. Troops are the main units that you use to attack and defend your village. Spells are magical effects that can boost your troops, damage your enemies, or alter the battlefield. With unlimited access, you don't have to unlock them by completing certain tasks or reaching certain levels. You can use any hero, troop, or spell you want, without any limitations or restrictions.</p>
71
- <h3>Unlimited ability to create and join any clan you want</h3>
72
- <p>A third benefit of using Clash of Clans Mod APK is that you can create and join any clan you want, without any requirements or limitations. Clans are groups of players who share a common interest and goal in the game. By joining a clan, you can chat, donate, request, and share tips with your clanmates. You can also participate in clan wars, clan games, and clan leagues, where you can cooperate with your clanmates to win trophies, rewards, and glory. With unlimited ability, you don't have to meet any criteria or follow any rules to create or join a clan. You can choose any clan name, logo, description, and type you want. You can also invite or accept anyone you want to your clan.</p>
73
- <h3>Unlimited fun and excitement with no ads, no bans, and no restrictions</h3>
74
- <p>A fourth benefit of using Clash of Clans Mod APK is that you can have unlimited fun and excitement with no ads, no bans, and no restrictions. Ads are annoying pop-ups that interrupt your gameplay and try to sell you something. Bans are penalties that prevent you from playing the game for a certain period of time or permanently. Restrictions are rules that limit your actions or options in the game. With Clash of Clans Mod APK, you don't have to deal with any of these problems. You can play the game without any ads, bans, or restrictions. You can enjoy the game as much as you want, without any worries or hassles.</p>
75
- <h2>Conclusion</h2>
76
- <p>Clash of Clans is a fantastic game that offers a lot of fun and excitement for strategy game lovers. However, if you want to take your gaming experience to the next level, you should try Clash of Clans Mod APK, a modified version of the original game that gives you unlimited everything. With Clash of Clans Mod APK, you can get unlimited gems, gold, elixir, and dark elixir to upgrade your troops , buildings, and spells. You can also get unlimited access to all the heroes, troops, and spells in the game. You can also create and join any clan you want, without any requirements or limitations. You can also play the game without any ads, bans, or restrictions. You can enjoy the game as much as you want, without any worries or hassles.</p>
77
- <p>If you are interested in downloading and installing Clash of Clans Mod APK on your Android device, you can follow the simple steps that we have explained in this article. You can also visit [clashofclansmodapk.net] to get the latest version of Clash of Clans Mod APK for free. We hope that this article has helped you understand what Clash of Clans Mod APK is, how to use it, and what benefits you can get from it. We also hope that you have fun and excitement with Clash of Clans Mod APK. Thank you for reading and happy gaming!</p>
78
- <h2>FAQs</h2>
79
- <p>Here are some frequently asked questions about Clash of Clans Mod APK:</p>
80
- <h4>Is Clash of Clans Mod APK safe to use?</h4>
81
- <p>Clash of Clans Mod APK is safe to use as long as you download it from a trusted source like [clashofclansmodapk.net]. However, you should be aware that using Clash of Clans Mod APK is against the terms and conditions of Supercell, the original developer of the game. Therefore, you should use it at your own risk and discretion.</p>
82
- <h4>Will I get banned for using Clash of Clans Mod APK?</h4>
83
- <p>There is a low chance that you will get banned for using Clash of Clans Mod APK, as the modded version has anti-ban features that prevent detection from Supercell's servers. However, there is no guarantee that you will not get banned in the future, as Supercell may update their security measures and algorithms. Therefore, you should use Clash of Clans Mod APK at your own risk and discretion.</p>
84
- <h4>Can I play Clash of Clans Mod APK with my friends who use the official version?</h4>
85
- <p>No, you cannot play Clash of Clans Mod APK with your friends who use the official version, as the modded version and the official version are not compatible with each other. You can only play Clash of Clans Mod APK with other players who use the same modded version.</p>
86
- <h4>Can I update Clash of Clans Mod APK to the latest version?</h4>
87
- <p>Yes, you can update Clash of Clans Mod APK to the latest version by visiting [clashofclansmodapk.net] and downloading the new version of the modded file. However, you should be careful not to update the game from the Google Play Store, as this will overwrite the modded version and restore the official version.</p>
88
- <h4>Can I switch back to the official version of Clash of Clans after using Clash of Clans Mod APK?</h4>
89
- <p>Yes, you can switch back to the official version of Clash of Clans after using Clash of Clans Mod APK by uninstalling the modded version and installing the official version from the Google Play Store. However, you should be aware that you will lose all your progress and data in the modded version, as they are not transferable to the official version.</p> 197e85843d<br />
90
- <br />
91
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Blue Is The Colour The Ultimate Chelsea Song Download Guide.md DELETED
@@ -1,54 +0,0 @@
1
- <br />
2
- <h1>Download Chelsea Song Blue Is The Colour</h1> | | H2: Introduction | <p>If you are a fan of Chelsea Football Club, you might have heard of their famous anthem "Blue Is the Colour". This song is a terrace chant that has been associated with the club since 1972, when it was performed by the squad and released as a single to coincide with their appearance in the League Cup final of that year. The song has become one of the most well-known English football songs, and it is still played at every home game and any cup finals Chelsea compete in. It is also a popular song among Chelsea fans around the world, who sing it with pride and passion.</p> | | H2: History of the Song | <h2>History of the Song</h2> | | H3: Origin and Release | <h3>Origin and Release</h3><p>The song was produced by Larry Page, who commissioned Daniel Boone and lyricist David Balfe (under the pseudonym Rod McQueen) to write the song for Chelsea F.C. The song was sung by members of the squad, who included Tommy Baldwin, Stewart Houston, Charlie Cooke, John Dempsey, Ron Harris, Marvin Hinton, John Hollins, Peter Houseman, Alan Hudson, Steve Kember, Eddie McCreadie, Paddy Mulligan, Peter Osgood, David Webb and Chris Garland. The song was released on Page's label Penny Farthing Records and reached number 5 in the UK Charts and number 8 in Ireland in March 1972.</p>
3
- <h2>download chelsea song blue is the colour</h2><br /><p><b><b>Download</b> &#8250;&#8250;&#8250; <a href="https://jinyurl.com/2uNMh5">https://jinyurl.com/2uNMh5</a></b></p><br /><br /> | | H3: Lyrics and Meaning | <h3>Lyrics and Meaning</h3><p>The lyrics of the song are simple but catchy, expressing the love and loyalty of Chelsea fans for their club. The chorus goes like this:</p><blockquote><p>Blue is the colour, football is the game<br>We're all together and winning is our aim<br>So cheer us on through the sun and rain<br>Cos Chelsea, Chelsea is our name.</p></blockquote><p>The verses describe the atmosphere at Stamford Bridge, where Chelsea play their home games, and invite other fans to join them in supporting their team. The song also mentions some of the famous players who have worn the blue shirt over the years.</p> | | Outline | Article | | --- | --- | | H2: How to Download the Song | <h2>How to Download the Song</h2> | | H3: Online Sources | <h3>Online Sources</h3><p>If you want to download the Chelsea song "Blue Is the Colour" to your device, you have several options. You can find the song on various online platforms, such as Apple Music, Spotify, YouTube, and others. You can either stream the song online or download it for offline listening, depending on your preference and subscription. You can also purchase the song from iTunes or Amazon Music if you want to support the original artists and producers.</p> | | H3: Offline Sources | <h3>Offline Sources</h3><p>If you prefer to have a physical copy of the song, you can also look for offline sources, such as CDs, vinyls, or cassettes. You can search for the song on online marketplaces, such as eBay or Discogs, or visit your local record store or thrift shop. You might be able to find a rare or vintage edition of the song that has a special value or quality. However, you will need a compatible device to play the song, such as a CD player, a turntable, or a cassette deck.</p> | | H3: Tips and Tricks | <h3>Tips and Tricks</h3><p>Here are some tips and tricks to help you download and enjoy the Chelsea song "Blue Is the Colour":</p><ul><li>Make sure you have enough storage space on your device before downloading the song.</li><li>Check the quality and format of the song before downloading it. You might want to choose a high-quality MP3 or WAV file for better sound.</li><li>Use a reliable and secure internet connection to avoid interruptions or errors during the download process.</li><li>Use headphones or speakers to enhance your listening experience and feel the atmosphere of the song.</li><li>Share the song with your friends and family who are also Chelsea fans and sing along with them.</li></ul> | | H2: Conclusion | <h2>Conclusion</h2><p>"Blue Is the Colour" is more than just a song. It is a symbol of Chelsea Football Club and its fans. It is a way of expressing their identity, passion, and loyalty. It is a part of their history, culture, and tradition. It is a source of inspiration, motivation, and joy. It is a song that unites them in good times and bad times. It is a song that celebrates their achievements and aspirations. It is a song that makes them proud to be blue.</p>
4
- <p>download chelsea anthem theme song lyrics mp3<br />
5
- download chelsea blue is the colour original<br />
6
- download chelsea fc anthem blue is the colour mp3 + lyrics<br />
7
- download chelsea football club blue is the colour 1972<br />
8
- download chelsea blue is the colour instrumental<br />
9
- download chelsea blue is the colour apple music<br />
10
- download chelsea blue is the colour ringtone<br />
11
- download chelsea blue is the colour video<br />
12
- download chelsea blue is the colour goalball<br />
13
- download chelsea blue is the colour afriblinks<br />
14
- how to download chelsea song blue is the colour for free<br />
15
- where to download chelsea song blue is the colour online<br />
16
- best site to download chelsea song blue is the colour<br />
17
- download chelsea song blue is the colour youtube<br />
18
- download chelsea song blue is the colour spotify<br />
19
- download chelsea song blue is the colour soundcloud<br />
20
- download chelsea song blue is the colour itunes<br />
21
- download chelsea song blue is the colour amazon music<br />
22
- download chelsea song blue is the colour deezer<br />
23
- download chelsea song blue is the colour tidal<br />
24
- download chelsea song blue is the colour lyrics pdf<br />
25
- download chelsea song blue is the colour chords<br />
26
- download chelsea song blue is the colour karaoke version<br />
27
- download chelsea song blue is the colour remix<br />
28
- download chelsea song blue is the colour cover<br />
29
- download chelsea song blue is the colour live performance<br />
30
- download chelsea song blue is the colour piano tutorial<br />
31
- download chelsea song blue is the colour guitar tab<br />
32
- download chelsea song blue is the colour sheet music<br />
33
- download chelsea song blue is the colour midi file<br />
34
- download chelsea song blue is the colour history<br />
35
- download chelsea song blue is the colour meaning<br />
36
- download chelsea song blue is the colour trivia<br />
37
- download chelsea song blue is the colour facts<br />
38
- download chelsea song blue is the colour review<br />
39
- download chelsea song blue is the colour reaction<br />
40
- download chelsea song blue is the colour analysis<br />
41
- download chelsea song blue is the colour podcast<br />
42
- download chelsea song blue is the colour blog post<br />
43
- download chelsea song blue is the colour article<br />
44
- download chelsea song blue is the colour news report<br />
45
- download chelsea song blue is the colour wikipedia page<br />
46
- download chelsea song blue is the colour quiz questions and answers <br />
47
- download chelsea song blue is the colour crossword puzzle clues and solutions <br />
48
- download chelsea song blue is the colour word search puzzle words and hints <br />
49
- download chelsea song blue is the colour trivia game cards and rules <br />
50
- download chelsea song blue is the colour bingo game cards and markers <br />
51
- download chelsea song blue is the colour flashcards and study guide <br />
52
- download chelsea song blue is the colour poster and wallpaper</p><p>If you are a Chelsea fan, you should definitely download this song and add it to your playlist. It will make you feel closer to your club and your fellow supporters. It will make you feel part of something bigger than yourself. It will make you feel blue is the colour.</p> | | H2: FAQs | <h2>FAQs</h2><ol><li><strong>Who wrote "Blue Is the Colour"?</strong><br>The song was written by Daniel Boone and David Balfe (under the pseudonym Rod McQueen) and produced by Larry Page in 1972.</li><li><strong>Who sang "Blue Is the Colour"?</strong><br>The song was sung by members of the Chelsea squad in 1972, who included Tommy Baldwin, Stewart Houston, Charlie Cooke, John Dempsey, Ron Harris, Marvin Hinton, John Hollins, Peter Houseman, Alan Hudson, Steve Kember, Eddie McCreadie, Paddy Mulligan, Peter Osgood, David Webb and Chris Garland.</li><li><strong>When was "Blue Is the Colour" released?</strong><br>The song was released on Page's label Penny Farthing Records in February 1972 to coincide with Chelsea's appearance in the League Cup final of that year against Stoke City.</li><li><strong>How popular was "Blue Is the Colour"?</strong><br>The song reached number 5 in the UK Charts and number 8 in Ireland in March 1972. It also became popular in many other countries with local versions of the song released.</li><li><strong>Why is "Blue Is the Colour" important for Chelsea fans?</strong><br>The song is important for Chelsea fans because it is their anthem that represents their love and loyalty for their club. It is also a terrace chant that creates a lively and festive atmosphere at Stamford Bridge and any cup finals Chelsea compete in.</li></ol> | | Custom Message | |</p> 197e85843d<br />
53
- <br />
54
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/CFL Football 99 The Ultimate Canadian Gridiron Simulation.md DELETED
@@ -1,85 +0,0 @@
1
-
2
- <h1>CFL Football '99: The Only Video Game Based on the Canadian Football League</h1>
3
- <p>If you are a fan of Canadian football, you might have wondered why there are so few video games that feature this sport. In fact, there is only one game that is officially licensed by the Canadian Football League (CFL) and its players association: CFL Football '99. This game was developed by a small company in British Columbia and released in 1999 for Windows PCs. It is a rare and obscure title that has a cult following among some Canadian football enthusiasts. In this article, we will explore the history, gameplay, and legacy of CFL Football '99, the only video game based on the CFL.</p>
4
- <h2>cfl football 99 video game download</h2><br /><p><b><b>DOWNLOAD</b> &#10040;&#10040;&#10040; <a href="https://jinyurl.com/2uNJWc">https://jinyurl.com/2uNJWc</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is CFL Football '99?</h3>
7
- <p>CFL Football '99 is a gridiron football video game that simulates the rules, teams, players, and stadiums of the Canadian Football League. It is an officially licensed product of the CFL and the Canadian Football League Players Association (CFLPA). The game features all nine teams that were active in the 1998 season, as well as historical teams from previous seasons. The game also includes a full season mode, a playoff mode, a practice mode, and a custom league mode.</p>
8
- <h3>Who developed CFL Football '99?</h3>
9
- <p>CFL Football '99 was developed by David Winter, an entrepreneur from Victoria, British Columbia. Winter originally specialized in administrative and industrial applications, doing business through his private firm Wintervalley Software. He obtained the rights to the CFL brand in 1998 and launched a new company, Canadian Digital Entertainment Inc. (CDE), for the purpose of marketing CFL Football '99. Part of the game's development was outsourced to American middleware provider Phantom Reality.</p>
10
- <h3>Why is CFL Football '99 unique?</h3>
11
- <p>CFL Football '99 is unique because it is the only video game based on the CFL to date. There have been other football games that featured Canadian rules or teams, such as Tecmo Bowl or Madden NFL, but none of them had the official license or endorsement of the CFL or its players. CFL Football '99 is also unique because it is a simulation game that tries to recreate the realistic aspects of Canadian football, such as the larger field size, the 12 players per side, the three downs, and the single point for missed field goals.</p>
12
- <h2>Gameplay and Features</h2>
13
- <h3>How does CFL Football '99 simulate Canadian football?</h3>
14
- <p>CFL Football '99 uses a 2D graphics engine that shows the action from a top-down perspective. The player can control any of the players on the field using the keyboard or a joystick. The game has a realistic physics system that accounts for factors such as wind, weather, fatigue, injuries, penalties, and fumbles. The game also has an advanced artificial intelligence that adjusts to the player's skill level and strategy.</p>
15
- <h3>What are the modes and options in CFL Football '99?</h3>
16
- <p>CFL Football '99 offers several modes and options for different types of players. The game has a full season mode that allows the player to choose one of the nine teams from the 1998 season and play through a 18-game schedule, followed by playoffs and the Grey Cup. The game also has a playoff mode that lets the player skip directly to the postseason and compete for the championship. The game has a practice mode that allows the player to test their skills in various drills and scenarios. The game also has a custom league mode that enables the player to create their own league with up to 16 teams, each with their own roster, logo, and stadium. The player can also edit the teams, players, and schedules to their liking.</p>
17
- <h3>How does CFL Football '99 compare to other football games?</h3>
18
- <p>CFL Football '99 is a niche game that caters to a specific audience of Canadian football fans. It is not as polished or popular as other football games, such as the Madden NFL series or the NFL 2K series, that focus on the American version of the sport. However, CFL Football '99 has some advantages over other football games, such as its authenticity, its customization options, and its historical value. CFL Football '99 is a game that celebrates the uniqueness and diversity of Canadian football and its culture.</p>
19
- <h2>Reception and Legacy</h2>
20
- <h3>How did critics and players react to CFL Football '99?</h3>
21
- <p>CFL Football '99 received mixed reviews from critics and players. Some praised the game for its realism, its depth, and its originality. Others criticized the game for its outdated graphics, its bugs, and its lack of polish. The game sold poorly, partly due to its limited distribution and marketing. The game also faced competition from other football games that had more resources and exposure. CFL Football '99 was mostly appreciated by hardcore fans of Canadian football who were looking for a game that represented their sport.</p>
22
- <h3>What were the challenges and limitations of CFL Football '99?</h3>
23
- <p>CFL Football '99 was a game that faced many challenges and limitations during its development and release. The game was developed by a small team with a low budget and a tight deadline. The game had to use an existing engine that was not designed for Canadian football. The game had to deal with technical issues such as compatibility, performance, and stability. The game had to overcome legal hurdles such as obtaining the license from the CFL and the CFLPA. The game had to cope with market realities such as low demand, high piracy, and strong competition.</p>
24
- <h3>What happened to the developer and the franchise after CFL Football '99?</h3>
25
- <p>CFL Football '99 was the first and last game developed by CDE. The company went out of business shortly after the game's release, due to financial losses and legal disputes. David Winter, the founder of CDE, returned to his original business of Wintervalley Software. He later released a patch for CFL Football '99 that fixed some of the bugs and added some features. He also released a sequel called CFL 2000 that was based on the same engine but updated with new rosters and graphics. However, these projects were unofficial and unauthorized by the CFL or the CFLPA. CFL Football '99 remains the only official video game based on the CFL.</p>
26
- <p>cfl football 99 pc game free download<br />
27
- how to play cfl football 99 on windows 10<br />
28
- cfl football 99 mods and patches<br />
29
- canuck play maximum football 2019 cfl edition<br />
30
- canadian football 2017 xbox one download<br />
31
- cfl football video game history<br />
32
- wintervalley software cfl football 99<br />
33
- canadian digital entertainment cfl football 99<br />
34
- cfl football 99 play designer tool<br />
35
- cfl football 99 roster editor tool<br />
36
- cfl football 99 game manual pdf<br />
37
- cfl football 99 gameplay videos and screenshots<br />
38
- cfl football 99 review and rating<br />
39
- cfl football 99 reddit discussion and tips<br />
40
- cfl football 99 vb programmers journal article<br />
41
- cfl football 99 license expired<br />
42
- cfl football 99 system requirements and compatibility<br />
43
- cfl football 99 abandonware download site<br />
44
- cfl football 99 custom teams and players<br />
45
- cfl football 99 game modes and options<br />
46
- cfl football 99 canadian rules and field size<br />
47
- cfl football 99 american rules and field size<br />
48
- cfl football 99 college rules and field size<br />
49
- cfl football 99 doug flutie mode<br />
50
- cfl football 99 spring league practice mode<br />
51
- cfl football 99 weather effects and game play<br />
52
- cfl football 99 multiple player body styles<br />
53
- cfl football 99 post-play replay and camera control<br />
54
- cfl football 99 online multiplayer mode<br />
55
- cfl football 99 tournament action at retro's e-sports bar<br />
56
- cfl football 99 feedback and updates from developers<br />
57
- cfl football 99 news and media coverage page<br />
58
- cfl football 99 twitter and facebook page<br />
59
- canuck play other games in pre-development<br />
60
- canuck play spies code breaking secret missions game<br />
61
- canuck play canadian comic book super heroes game<br />
62
- canuck play e for everyone rating games<br />
63
- canuck play legacy titles maximum football game<br />
64
- canuck play contact information and homepage link<br />
65
- canuck play development blog and newsletter sign up</p>
66
- <h2>Conclusion</h2>
67
- <h3>Summary of the main points</h3>
68
- <p>CFL Football '99 is a gridiron football video game that simulates the rules, teams, players, and stadiums of the Canadian Football League. It is an officially licensed product of the CFL and the CFLPA. It is a simulation game that tries to recreate the realistic aspects of Canadian football. It is a niche game that caters to a specific audience of Canadian football fans. It is a rare and obscure title that has a cult following among some Canadian football enthusiasts.</p>
69
- <h3>Call to action for the readers</h3>
70
- <p>If you are interested in playing CFL Football '99, you can download it from various websites that host old games. You might need an emulator or a compatibility mode to run it on modern computers. You can also check out some videos or reviews of the game online to see how it looks and plays. You can also join some forums or communities of Canadian football fans who still play or discuss the game. You can also share your thoughts or experiences with CFL Football '99 in the comments section below.</p>
71
- <h4>FAQs</h4>
72
- <ul>
73
- <li><b>Q: Is CFL Football '99 compatible with Windows 10?</b></li>
74
- <li>A: No, CFL Football '99 is not compatible with Windows 10 or any other recent version of Windows. You might need an emulator or a compatibility mode to run it on modern computers.</li>
75
- <li><b>Q: Where can I buy CFL Football '99?</b></li>
76
- <li>A: You can't buy CFL Football '99 from any official source, as the game is out of print and no longer supported by the developer or the publisher. You might find some copies on online auction sites or second-hand stores, but they are very rare and expensive.</li>
77
- <li><b>Q: Is there a newer version of CFL Football '99?</b></li>
78
- <li>A: No, there is no newer version of CFL Football '99 that is officially licensed by the CFL or the CFLPA. The only sequel to CFL Football '99 is CFL 200 0, which was released by David Winter in 2000, but it is an unofficial and unauthorized project that uses the same engine as CFL Football '99.</li>
79
- <li><b>Q: How can I play CFL Football '99 online with other players?</b></li>
80
- <li>A: CFL Football '99 does not have a built-in online multiplayer mode, but you might be able to play it online with other players using third-party software or services that allow you to connect and share your screen with other users. However, this might not work well or at all, depending on your internet connection and compatibility issues.</li>
81
- <li><b>Q: Are there any mods or patches for CFL Football '99?</b></li>
82
- <li>A: Yes, there are some mods and patches for CFL Football '99 that add new features, fix bugs, or update the rosters and graphics. You can find some of them on websites that host old games or fan-made content. However, these mods and patches are unofficial and unauthorized by the CFL or the CFLPA, and they might not work properly or cause problems with your game.</li>
83
- </ul></p> 197e85843d<br />
84
- <br />
85
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Delhi Blue App How to Use the First Ever Common Mobility App in Delhi.md DELETED
@@ -1,101 +0,0 @@
1
- <br />
2
- <h1>How to Download Delhi Blue App and Why You Should Do It</h1>
3
- <p>If you are looking for a safe, reliable, and sustainable taxi service in Delhi NCR or Bengaluru, you should download Delhi Blue App on your smartphone. Delhi Blue App is India's first all-electric cab service that offers you a comfortable, convenient, and eco-friendly travel experience. In this article, we will tell you what Delhi Blue App is, how to download it on your Android or iOS device, and how to use it for your travel needs.</p>
4
- <h2>What is Delhi Blue App?</h2>
5
- <h3>A brief introduction to the app and its features</h3>
6
- <p>Delhi Blue App is a mobile app that allows you to book cabs that run on electricity instead of fossil fuels. The app is developed by BluSmart, a company that aims to revolutionize the way people travel in cabs in urban India. The app has several features that make it user-friendly and convenient, such as:</p>
7
- <h2>download delhi blue app</h2><br /><p><b><b>Download Zip</b> &#10002; <a href="https://jinyurl.com/2uNR3c">https://jinyurl.com/2uNR3c</a></b></p><br /><br />
8
- <ul>
9
- <li>Easy booking: You can book a cab in just a few taps on your phone. You can also schedule your ride in advance or request a ride later.</li>
10
- <li>Transparent pricing: You can see the fare estimate before you confirm your booking. There are no surge prices, hidden charges, or cancellation fees.</li>
11
- <li>Safe and secure: You can trust the drivers who are verified and trained by BluSmart. You can also share your ride details with your family and friends for extra safety.</li>
12
- <li>Customer support: You can contact the customer care team anytime through the app or call them at +91-8880500500.</li>
13
- </ul>
14
- <h3>The benefits of using the app for cab booking, airport transfers, and eco-friendly travel</h3>
15
- <p>By using Delhi Blue App, you can enjoy several benefits that make your travel experience better, such as:</p>
16
- <ul>
17
- <li>Comfort: You can travel in spacious, air-conditioned cabs that have free Wi-Fi, charging ports, and music system.</li>
18
- <li>Convenience: You can book a cab anytime and anywhere in Delhi NCR or Bengaluru. You can also use the app for airport transfers to & from the IGI Airport, Delhi & Kempegowda International Airport Bengaluru.</li>
19
- <li>Eco-friendliness: You can reduce your carbon footprint by traveling in cabs that run on clean energy. You can also contribute to BluSmart's mission of planting one tree for every ride you take.</li>
20
- </ul>
21
- <h2>How to Download Delhi Blue App on Your Android or iOS Device</h2>
22
- <h3>The steps to download the app from Google Play or App Store</h3>
23
- <p>To download Delhi Blue App on your smartphone, you need to follow these simple steps:</p>
24
- <p>How to download delhi blue app for free<br />
25
- Download delhi blue app and get discounts on online shopping<br />
26
- Delhi blue app review: why you should download it today<br />
27
- Download delhi blue app and earn cashback on every purchase<br />
28
- Benefits of downloading delhi blue app for your business<br />
29
- Download delhi blue app and join the largest community of online shoppers<br />
30
- Delhi blue app features: what you can do with it after downloading<br />
31
- Download delhi blue app and access exclusive deals and offers<br />
32
- Delhi blue app vs other shopping apps: which one should you download<br />
33
- Download delhi blue app and save money on travel, food, entertainment, and more<br />
34
- How to use delhi blue app after downloading it on your phone<br />
35
- Download delhi blue app and enjoy hassle-free online shopping experience<br />
36
- Delhi blue app customer support: how to contact them after downloading the app<br />
37
- Download delhi blue app and get rewarded for your loyalty and referrals<br />
38
- Delhi blue app testimonials: what users are saying about it after downloading<br />
39
- Download delhi blue app and find the best products and services for your needs<br />
40
- Delhi blue app FAQs: everything you need to know before downloading the app<br />
41
- Download delhi blue app and compare prices, reviews, ratings, and more<br />
42
- Delhi blue app updates: what's new in the latest version of the app<br />
43
- Download delhi blue app and get personalized recommendations based on your preferences<br />
44
- How to uninstall delhi blue app if you don't like it after downloading<br />
45
- Download delhi blue app and participate in surveys, contests, quizzes, and more<br />
46
- Delhi blue app privacy policy: how they protect your data after downloading the app<br />
47
- Download delhi blue app and share your feedback and suggestions with the developers<br />
48
- Delhi blue app alternatives: what other apps can you download instead of delhi blue app</p>
49
- <ol>
50
- <li>Open Google Play or App Store on your device.</li>
51
- <li>Search for "BluSmart" or "Delhi Blue App" in the search bar.</li>
52
- <li>Tap on the app icon and then tap on "Install" (for Android) or "Get" (for iOS).</li>
53
- <li>Wait for the app to download and install on your device.</li>
54
- </ol>
55
- <h3>How to sign up and create an account on the app</h3>
56
- <p>To use Delhi Blue App, you need to sign up and create an account on the app. Here's how:</p>
57
- <ol>
58
- <li>Open the app on your device and tap on "Sign Up".</li>
59
- <li>Enter your name, email address, phone number, and password.</li <li>Verify your phone number by entering the OTP sent to your number.</li>
60
- <li>Agree to the terms and conditions and tap on "Create Account".</li>
61
- <li>You can also sign up using your Google or Facebook account.</li>
62
- </ol>
63
- <p>Congratulations, you have successfully created your account on Delhi Blue App. You can now start booking cabs and enjoy the benefits of the app.</p>
64
- <h2>How to Use Delhi Blue App for Your Travel Needs</h2>
65
- <h3>How to book a cab, choose a payment method, and track your ride</h3>
66
- <p>Booking a cab on Delhi Blue App is very easy and quick. Just follow these steps:</p>
67
- <ol>
68
- <li>Open the app on your device and enter your pickup and drop locations.</li>
69
- <li>Select the type of cab you want from the available options.</li>
70
- <li>Tap on "Book Now" or "Ride Later" depending on your preference.</li>
71
- <li>Choose your payment method from the options of cash, card, wallet, or UPI.</li>
72
- <li>Confirm your booking and wait for the driver to arrive at your location.</li>
73
- <li>You can track your ride on the app and see the driver's details, cab number, and estimated time of arrival.</li>
74
- <li>Enjoy your ride and rate your experience on the app after completing your trip.</li>
75
- </ol>
76
- <h3>How to get discounts, rewards, and referrals on the app</h3>
77
- <p>Delhi Blue App also offers you various discounts, rewards, and referrals that make your travel more affordable and rewarding. Here are some ways to avail them:</p>
78
- <ul>
79
- <li>You can use promo codes and coupons to get discounts on your rides. You can find them on the app or on the website of BluSmart.</li>
80
- <li>You can earn rewards points for every ride you take on the app. You can redeem them for free rides or vouchers from partner brands.</li>
81
- <li>You can refer your friends and family to the app and get Rs. 100 off on your next ride for every successful referral. Your referrals will also get Rs. 100 off on their first ride.</li>
82
- </ul>
83
- <h2>Conclusion</h2>
84
- <h3>A summary of the main points and a call to action</h3>
85
- <p>Delhi Blue App is a great way to travel in cabs that are safe, reliable, and eco-friendly. You can download the app on your Android or iOS device and book cabs anytime and anywhere in Delhi NCR or Bengaluru. You can also enjoy various features and benefits of the app, such as transparent pricing, customer support, comfort, convenience, and eco-friendliness. You can also get discounts, rewards, and referrals on the app that make your travel more affordable and rewarding. So what are you waiting for? Download Delhi Blue App today and join the green revolution in urban mobility.</p>
86
- <h2>FAQs</h2>
87
- <h3>Five common questions and answers about the app</h3>
88
- <ol>
89
- <li><b>Q: How is Delhi Blue App different from other cab services?</b></li>
90
- <li>A: Delhi Blue App is different from other cab services because it offers you cabs that run on electricity instead of fossil fuels. This makes them more eco-friendly, cost-effective, and noise-free. Delhi Blue App also has no surge pricing, hidden charges, or cancellation fees.</li>
91
- <li><b>Q: How can I contact Delhi Blue App customer care?</b></li>
92
- <li>A: You can contact Delhi Blue App customer care through the app or call them at +91-8880500500. You can also email them at [email protected] or visit their website at www.blusmart.in.</li>
93
- <li><b>Q: How can I cancel my booking on Delhi Blue App?</b></li>
94
- <li>A: You can cancel your booking on Delhi Blue App anytime before the driver arrives at your location. You will not be charged any cancellation fee. To cancel your booking, tap on "Cancel" on the app and select a reason for cancellation.</li>
95
- <li><b>Q: How can I pay for my ride on Delhi Blue App?</b></li>
96
- <li>A: You can pay for your ride on Delhi Blue App using cash, card, wallet, or UPI. You can choose your preferred payment method before confirming your booking. You can also change your payment method after completing your trip.</li>
97
- <li><b>Q: How can I give feedback or suggestions to Delhi Blue App?</b></li>
98
- <li>A: You can give feedback or suggestions to Delhi Blue App by rating your ride experience on the app after completing your trip. You can also write a review or share your comments on the app or on social media platforms like Facebook, Twitter, Instagram, or LinkedIn.</li>
99
- </ol></p> 401be4b1e0<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Mortal Kombat Tamil Dubbed Movie in HD Quality from Isaimini.md DELETED
@@ -1,147 +0,0 @@
1
- <br />
2
- <h1>Mortal Kombat Tamil Dubbed Movie Download Isaimini: A Review</h1>
3
- <p>Mortal Kombat is one of the most anticipated movies of 2021, based on the popular video game series of the same name. It is a reboot of the previous film adaptations, featuring a new cast and a new storyline. The movie has been released in multiple languages, including Tamil, to cater to the diverse fan base. But how good is the movie, and how can you watch it in Tamil? In this article, we will review the Mortal Kombat Tamil dubbed movie download Isaimini option, and also give you some insights into the plot, the characters, and the quality of the movie.</p>
4
- <h2>mortal kombat tamil dubbed movie download isaimini</h2><br /><p><b><b>Download</b> &middot;&middot;&middot;&middot;&middot; <a href="https://jinyurl.com/2uNNHD">https://jinyurl.com/2uNNHD</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is Mortal Kombat?</h3>
7
- <p>Mortal Kombat is a media franchise that originated from a fighting video game developed by Midway Games in 1992. The game features a variety of characters, each with their own special abilities and moves, who compete in a tournament called Mortal Kombat. The tournament is a way to determine the fate of different realms, such as Earthrealm, Outworld, and Netherrealm, which are constantly at war with each other. The game is known for its violent and graphic content, such as fatalities, brutalities, and x-rays.</p>
8
- <h3>Why is it popular in Tamil Nadu?</h3>
9
- <p>Mortal Kombat has a huge fan following in Tamil Nadu, especially among the young generation. There are several reasons for this popularity. First of all, the game has a lot of cultural references and influences from various mythologies and religions, such as Hinduism, Buddhism, Taoism, and Norse mythology. Some of the characters are inspired by gods, demons, and heroes from these traditions, such as Raiden, Shiva, Goro, and Scorpion. Secondly, the game has a lot of action and thrill, which appeals to the Tamil audience who love masala movies. Thirdly, the game has a lot of humor and sarcasm, which matches the Tamil sense of humor. Fourthly, the game has a lot of customization options, which allows the players to create their own characters and costumes.</p>
10
- <h3>How to download Mortal Kombat Tamil dubbed movie from Isaimini?</h3>
11
- <p>Isaimini is one of the most popular websites for downloading Tamil movies and songs. It offers a wide range of genres and categories, such as action, comedy, romance, horror, thriller, drama, and animation. It also provides dubbed versions of Hollywood and Bollywood movies, such as Mortal Kombat. To download Mortal Kombat Tamil dubbed movie from Isaimini, you need to follow these steps:</p>
12
- <ol>
13
- <li>Go to the official website of Isaimini using a VPN or proxy service.</li>
14
- <li>Search for Mortal Kombat in the search bar or browse through the categories.</li>
15
- <li>Select the movie from the list of results and click on it.</li>
16
- <li>Choose the quality and format of the movie that you want to download.</li>
17
- <li>Click on the download link and wait for the movie to be downloaded.</li>
18
- </ol>
19
- <p>Note: Downloading movies from Isaimini is illegal and may expose you to cyber risks. We do not endorse or promote piracy in any way. We recommend that you watch movies from legal sources only.</p>
20
- <h2>Plot summary</h2>
21
- <h3>The main characters</h3>
22
- <p>The movie follows the lives of several characters who are chosen to participate in the Mortal Kombat tournament. They are:</p>
23
- <p>mortal kombat 2021 tamil voice over full movie free download<br />
24
- watch mortal kombat tamil dubbed online hd quality<br />
25
- mortal kombat tamil audio track download for english movie<br />
26
- how to download mortal kombat tamil dubbed movie from isaimini<br />
27
- mortal kombat tamil dubbed movie review and rating<br />
28
- mortal kombat tamil dubbed movie trailer and release date<br />
29
- mortal kombat tamil dubbed movie cast and crew details<br />
30
- mortal kombat tamil dubbed movie download telegram link<br />
31
- mortal kombat tamil dubbed movie download in moviesda<br />
32
- mortal kombat tamil dubbed movie download in tamilyogi<br />
33
- mortal kombat tamil dubbed movie download in kuttymovies<br />
34
- mortal kombat tamil dubbed movie download in tamilrockers<br />
35
- mortal kombat tamil dubbed movie download in isaidub<br />
36
- mortal kombat tamil dubbed movie download in madrasrockers<br />
37
- mortal kombat tamil dubbed movie download in filmyzilla<br />
38
- mortal kombat tamil dubbed movie download in filmywap<br />
39
- mortal kombat tamil dubbed movie download in 9xmovies<br />
40
- mortal kombat tamil dubbed movie download in worldfree4u<br />
41
- mortal kombat tamil dubbed movie download in 123movies<br />
42
- mortal kombat tamil dubbed movie download in movierulz<br />
43
- mortal kombat tamil dubbed movie download in bolly4u<br />
44
- mortal kombat tamil dubbed movie download in pagalworld<br />
45
- mortal kombat tamil dubbed movie download in skymovieshd<br />
46
- mortal kombat tamil dubbed movie download in mp4moviez<br />
47
- mortal kombat tamil dubbed movie download in 7starhd<br />
48
- mortal kombat tamil dubbed movie download 480p 720p 1080p<br />
49
- mortal kombat tamil dubbed movie download hdrip dvdrip bluray<br />
50
- mortal kombat tamil dubbed movie download mkv avi mp4 format<br />
51
- mortal kombat tamil dubbed movie watch online dailymotion youtube<br />
52
- mortal kombat tamil dubbed movie watch online with english subtitles<br />
53
- mortal kombat full movie in tamil language free download<br />
54
- isaimini website for downloading mortal kombat full movie in tamil<br />
55
- best alternative sites to isaimini for downloading mortal kombat full movie in tamil<br />
56
- how to unblock isaimini site to access mortal kombat full movie in tamil<br />
57
- is it legal to download mortal kombat full movie in tamil from isaimini site<br />
58
- is it safe to download mortal kombat full movie in tamil from isaimini site<br />
59
- how to avoid ads and pop-ups while downloading mortal kombat full movie in tamil from isaimini site<br />
60
- how to use vpn to download mortal kombat full movie in tamil from isaimini site<br />
61
- how to use torrent to download mortal kombat full movie in tamil from isaimini site<br />
62
- how to use idm to download mortal kombat full movie in tamil from isaimini site</p>
63
- <ul>
64
- <li>Cole Young (Lewis Tan): A former MMA fighter who has a mysterious dragon mark on his chest. He is unaware of his lineage and his connection to the legendary warrior Hanzo Hasashi, also known as Scorpion (Hiroyuki Sanada).</li>
65
- <li>Sonya Blade (Jessica McNamee): A former Special Forces officer who has been tracking down the dragon mark and the Mortal Kombat tournament. She is partnered with Jax (Mehcad Brooks), who also has the mark.</li>
66
- <li>Kano (Josh Lawson): A mercenary and a leader of the Black Dragon crime syndicate. He has a cybernetic eye that shoots laser beams. He is captured by Sonya and forced to join her team.</li>
67
- <li>Liu Kang (Ludi Lin): A Shaolin monk and a descendant of the great Kung Lao. He has mastered the art of fire manipulation and can summon a dragon of flames.</li>
68
- <li>Kung Lao (Max Huang): A cousin of Liu Kang and a fellow Shaolin monk. He wields a razor-sharp hat that can cut through anything.</li>
69
- <li>Raiden (Tadanobu Asano): The god of thunder and the protector of Earthrealm. He can teleport, manipulate lightning, and create force fields. He guides and trains the chosen fighters for the tournament.</li>
70
- <li>Shang Tsung (Chin Han): The sorcerer and the ruler of Outworld. He can steal souls, shapeshift, and use dark magic. He is the main antagonist of the movie, who wants to conquer Earthrealm by cheating in the tournament.</li>
71
- <li>Sub-Zero (Joe Taslim): A cryomancer and an assassin who works for Shang Tsung. He can create and manipulate ice, and is the archenemy of Scorpion. He is responsible for killing Scorpion's family and clan in the past.</li>
72
- <li>Mileena (Sisi Stringer): A mutant hybrid of Tarkatan and Edenian races. She has sharp teeth, claws, and a taste for blood. She is loyal to Shang Tsung and serves as his enforcer.</li>
73
- <li>Goro (voiced by Angus Sampson): A four-armed Shokan prince and a champion of Mortal Kombat. He is a formidable opponent who can crush anyone with his brute strength.</li>
74
- <li>Reptile (voiced by Samuel Hargrave): A reptilian creature who can spit acid, turn invisible, and crawl on walls. He is one of Shang Tsung's minions who attacks the Earthrealm fighters.</li>
75
- <li>Nitara (Mel Jarnson): A winged vampire who feeds on blood. She is another one of Shang Tsung's henchmen who faces off against Kung Lao.</li>
76
- <li>Kabal (voiced by Damon Herriman): A former Black Dragon member who has a grudge against Kano. He wears a respirator mask and uses hooked swords. He can move at super speed and create sonic booms.</li>
77
- </ul>
78
- <h3>The story arc</h3>
79
- <p>The movie begins with a flashback to 17th century Japan, where Hanzo Hasashi, a ninja leader of the Shirai Ryu clan, is attacked by Bi-Han, a rival assassin of the Lin Kuei clan. Bi-Han kills Hanzo's wife and son with his ice powers, and then kills Hanzo himself. However, Hanzo's blood is collected by Raiden, who transports his body to the Netherrealm, where he becomes Scorpion, a vengeful specter.</p>
80
- <p>In the present day, Cole Young is a struggling MMA fighter who has a dragon mark on his chest. He is targeted by Bi-Han, who now goes by Sub-Zero, and is rescued by Jax, who also has the mark. Jax tells Cole to find Sonya Blade, who knows more about the mark and the Mortal Kombat tournament. Cole meets Sonya at her hideout, where he also encounters Kano, who has been captured by Sonya. Sonya explains that the mark is a sign of being chosen to fight in Mortal Kombat, a tournament that decides the fate of different realms. She also reveals that Earthrealm has lost nine out of ten tournaments to Outworld, and if they lose one more, Outworld will invade and enslave Earthrealm.</p>
81
- <p>Sonya, Cole, and Kano are attacked by Reptile, who is sent by Shang Tsung to kill them. They manage to defeat Reptile with Kano's help, who rips out his heart. Kano agrees to join Sonya and Cole in exchange for money, and they fly to Raiden's temple in China. There they meet Liu Kang and Kung Lao, who are also chosen fighters for Earthrealm. They also meet Raiden, who is not impressed by their lack of skills and abilities. Raiden explains that each fighter has a special power called Arcana, that they need to unlock in order to fight in the tournament. He also warns them that Shang Tsung and his warriors are trying to kill them before the tournament begins, in order to secure their victory.</p>
82
- <p>The Earthrealm fighters begin their training under Liu Kang and Kung Lao, who teach them how to use their Arcana. Kano discovers his Arcana first, which is a laser eye. Cole, however, struggles to find his Arcana, and is constantly defeated by Kung Lao. Raiden tells Cole that he is a descendant of Hanzo Hasashi, and that he has a special destiny. He also shows him Hanzo's kunai, which is a dagger with a rope attached to it.</p>
83
- <p>Meanwhile, Shang Tsung sends Sub-Zero, Mileena, Nitara, Kabal, and Goro to attack the temple. Raiden creates a force field to protect the temple, but Kano betrays the Earthrealm fighters and disables the field, allowing the invaders to enter. A series of battles ensue, in which Kung Lao kills Nitara with his hat, Liu Kang kills Kabal with his fire dragon, and Sonya kills Kano with a garden gnome. Cole fights Goro and unlocks his Arcana, which is a suit of armor that absorbs damage and enhances his strength. He kills Goro with Hanzo's kunai.</p>
84
- <h3>The climax and the ending</h3>
85
- <p>Shang Tsung arrives and kills Kung Lao by stealing his soul. He declares that he will kill all the Earthrealm fighters and take over their realm. Raiden intervenes and teleports the Earthrealm fighters to different locations, where they can face their enemies one-on-one. He also gives Cole Hanzo's kunai and tells him to find Scorpion in the Netherrealm.</p>
86
- <p>Cole travels to the Netherrealm and uses Hanzo's kunai to summon Scorpion from his hellish prison. Scorpion recognizes Cole as his bloodline and agrees to help him fight Sub-Zero, who has kidnapped Cole's family. They return to Earthrealm and confront Sub-Zero in an abandoned gym. A fierce fight ensues, in which Scorpion and Cole manage to overpower Sub-Zero with their combined skills and powers. Scorpion finishes Sub-Zero with his signature move, "Get over here!", and burns him alive with his fire breath.</p>
87
- <p>Scorpion thanks Cole for freeing him from his curse and tells him to protect his family and his realm. He then disappears into flames. Cole reunites with his family and embraces them. Raiden appears and congratulates Cole for his victory. He also warns him that Shang Tsung will return with more warriors, and that they need to prepare for the next tournament. He tells Cole to find more champions for Earthrealm, and gives him a hint by showing him a poster of Johnny Cage, a famous Hollywood actor and martial artist.</p>
88
- <p>Cole decides to leave his MMA career and travel to Hollywood to recruit Johnny Cage. The movie ends with a shot of Johnny Cage's poster, which has his name and a slogan: "You won't believe what comes next".</p>
89
- <h2>Analysis and critique</h2>
90
- <h3>The strengths of the movie</h3>
91
- <p>The movie has several strengths that make it an enjoyable and entertaining watch for Mortal Kombat fans and newcomers alike. Some of these strengths are:</p>
92
- <ul>
93
- <li>The movie stays faithful to the source material, by incorporating many elements from the video games, such as the characters, the moves, the fatalities, the lore, and the Easter eggs.</li>
94
- <li>The movie has a lot of action and gore, which are essential for a Mortal Kombat movie. The fight scenes are well-choreographed, well-shot, and well-edited, showcasing the skills and abilities of each fighter.</li>
95
- <li>The movie has a lot of humor and fun, which balance out the seriousness and darkness of the story. The movie does not take itself too seriously, and pokes fun at some of the clichés and tropes of the genre.</li>
96
- <li>The movie has a good cast of actors who deliver solid performances. The actors fit their roles well, and bring out the personalities and emotions of their characters.</li>
97
- <li>The movie has a good production value, with impressive visual effects, sound design, music score, costumes, and sets. The movie creates a convincing world of Mortal Kombat, with its different realms, cultures, and creatures.</li>
98
- </ul>
99
- <h3>The weaknesses of the movie</h3>
100
- <p>The movie also has some weaknesses that prevent it from being a perfect adaptation of Mortal Kombat. Some of these weaknesses are:</p>
101
- <ul>
102
- <li>The movie has a weak plot that lacks depth and originality. The movie follows a generic formula of a hero's journey, with a lot of exposition and clichés. The movie does not explore the themes and motivations of the characters, nor does it develop the relationships and conflicts among them. The movie also does not explain the rules and logic of the Mortal Kombat tournament, and why it is so important for the realms.</li>
103
- <li>The movie has a rushed pacing that does not allow enough time for the characters and the story to breathe. The movie tries to cram too much information and action in a short span of time, resulting in a lack of coherence and continuity. The movie also skips over some important scenes and events, such as the actual tournament itself, and the aftermath of the battles.</li>
104
- <li>The movie has a poor dialogue that is cheesy and corny. The movie relies on a lot of exposition and narration to explain the plot and the characters, rather than showing them through actions and interactions. The movie also uses a lot of catchphrases and one-liners that are meant to be cool and witty, but end up being cringey and awkward.</li>
105
- <li>The movie has a mediocre direction that does not bring out the best of the actors and the script. The movie suffers from a lack of vision and style, and does not create a distinctive tone or mood for the movie. The movie also fails to balance the different elements of the movie, such as the drama, the comedy, the horror, and the fantasy.</li>
106
- <li>The movie has a low rating that limits its potential audience and impact. The movie is rated R in the US, which means that it is restricted to viewers who are 17 years or older, or accompanied by an adult. This rating may deter some fans who are younger or more sensitive to violence and gore. The movie may also face censorship or bans in some countries or regions, where such content is deemed inappropriate or offensive.</li>
107
- </ul>
108
- <h3>The comparison with the original version and other adaptations</h3>
109
- <p>The movie is a reboot of the previous film adaptations of Mortal Kombat, which were released in 1995 and 1997. The movie is also based on the video game series of Mortal Kombat, which has been running since 1992. The movie differs from the original version and other adaptations in several ways. Some of these differences are:</p>
110
- <ul>
111
- <li>The movie has a new cast of actors who play the roles of the characters. The movie also introduces some new characters who were not present in the original version or other adaptations, such as Cole Young, Nitara, Kabal, and Goro.</li>
112
- <li>The movie has a new storyline that deviates from the original version or other adaptations. The movie focuses on Cole Young as the main protagonist, who is a descendant of Scorpion. The movie also changes some details and events from the original version or other adaptations, such as the origin of Sub-Zero and Scorpion's rivalry, the role of Raiden and Shang Tsung in the tournament, and the outcome of some battles.</li>
113
- <li>The movie has a darker and grittier tone than the original version or other adaptations. The movie emphasizes more on the violence and gore of Mortal Kombat, by showing more blood, injuries, deaths, and fatalities. The movie also explores more of the dark and sinister aspects of Mortal Kombat, such as the corruption, betrayal, torture, and soul stealing.</li>
114
- <li>The movie has a better quality than the original version or other adaptations. The movie benefits from the advances in technology and filmmaking, by having better visual effects, sound design, music score, costumes, and sets. The movie also benefits from having a higher budget and production value than the original version or other adaptations, which were criticized for being low-budget and cheesy.</li>
115
- </ul>
116
- <h2>Conclusion</h2>
117
- <h3>The final verdict</h3>
118
- <p>Mortal Kombat is a movie that delivers what it promises: a lot of action, gore, and fun. The movie is a faithful adaptation of the video game series, and a satisfying reboot of the film franchise. The movie has a good cast, a good production value, and a good sense of humor. The movie is not perfect, however, and has some flaws in its plot, pacing, dialogue, and direction. The movie is also not for everyone, as it is rated R and may be too violent or offensive for some viewers. The movie is best enjoyed by Mortal Kombat fans and action lovers, who can appreciate the movie for what it is: a guilty pleasure.</p>
119
- <h3>The alternatives to Isaimini</h3>
120
- <p>As mentioned earlier, downloading movies from Isaimini is illegal and risky. Therefore, we suggest that you watch Mortal Kombat from legal sources only. Some of the alternatives to Isaimini are:</p>
121
- <ul>
122
- <li>HBO Max: This is the official streaming platform for Mortal Kombat in the US. You can watch the movie online or offline with a subscription fee of $14.99 per month.</li>
123
- <li>Amazon Prime Video: This is one of the most popular streaming platforms in India. You can rent or buy Mortal Kombat in Tamil or other languages with a fee ranging from ₹75 to ₹150.</li>
124
- <li>Netflix: This is another popular streaming platform in India. You can watch Mortal Kombat in Tamil or other languages with a subscription fee starting from ₹199 per month.</li>
125
- <li>YouTube: This is a free platform where you can watch Mortal Kombat in Tamil or other languages with ads. However, you need to be careful about the quality and legality of the videos.</li>
126
- <li>Theaters: This is the best way to watch Mortal Kombat in Tamil or other languages on the big screen. However, you need to check the availability and safety of the theaters in your area.</li>
127
- </ul>
128
- <h3>The future of Mortal Kombat franchise</h3>
129
- <p>Mortal Kombat is a movie that sets up the stage for more sequels and spin-offs. The movie ends with a cliffhanger that hints at the introduction of Johnny Cage, one of the most iconic characters of Mortal Kombat. The movie also leaves some room for more characters and stories from the video game series, such as Kitana, Sindel, Shao Kahn, Quan Chi, and more. The movie has received mixed reviews from critics and audiences, but has performed well at the box office and streaming platforms. The movie has also generated a lot of buzz and hype among Mortal Kombat fans and newcomers alike. Therefore, it is likely that we will see more Mortal Kombat movies in the future, as long as there is enough demand and support from the fans.</p>
130
- <h2>FAQs</h2>
131
- <p>Here are some frequently asked questions about Mortal Kombat Tamil dubbed movie download Isaimini:</p>
132
- <ol>
133
- <li>Q: Is Mortal Kombat Tamil dubbed movie available on Isaimini?</li>
134
- <li>A: Yes, Mortal Kombat Tamil dubbed movie is available on Isaimini, but it is illegal and risky to download it from there.</li>
135
- <li>Q: How can I watch Mortal Kombat Tamil dubbed movie legally?</li>
136
- <li>A: You can watch Mortal Kombat Tamil dubbed movie legally from platforms such as HBO Max, Amazon Prime Video, Netflix, YouTube, or theaters.</li>
137
- <li>Q: Who are the actors who play the roles of Mortal Kombat characters?</li>
138
- <li>A: The actors who play the roles of Mortal Kombat characters are Lewis Tan as Cole Young/Scorpion's descendant, Hiroyuki Sanada as Hanzo Hasashi/Scorpion, Joe Taslim as Bi-Han/Sub-Zero, Jessica McNamee as Sonya Blade, Mehcad Brooks as Jax, Josh Lawson as Kano, Ludi Lin as Liu Kang, Max Huang as Kung Lao, Tadanobu Asano as Raiden, Chin Han as Shang Tsung, Sisi Stringer as Mileena, Angus Sampson as Goro, Samuel Hargrave as Reptile, Mel Jarnson as Nitara, and Damon Herriman as Kabal.</li>
139
- <li>Q: What are the ratings and reviews of Mortal Kombat movie?</li>
140
- <li>A: Mortal Kombat movie has a rating of 6.2 out of 10 on IMDb, 55% on Rotten Tomatoes, and 44% on Metacritic. The movie has received mixed reviews from critics and audiences, with some praising its action, humor, and fidelity to the source material, and others criticizing its plot, pacing, dialogue, and direction.</li>
141
- <li>Q: When will Mortal Kombat 2 movie be released?</li>
142
- <li>A: There is no official confirmation or announcement about Mortal Kombat 2 movie yet, but the director Simon McQuoid has expressed his interest and willingness to make a sequel, depending on the response and demand from the fans. The movie also sets up the stage for a sequel, by introducing Johnny Cage and teasing more characters and stories from the video game series.</li>
143
- <li>Q: How many Mortal Kombat movies are there?</li>
144
- <li>A: There are three Mortal Kombat movies so far. The first one is Mortal Kombat (1995), directed by Paul W.S. Anderson and starring Christopher Lambert, Robin Shou, Linden Ashby, Bridgette Wilson, and Cary-Hiroyuki Tagawa. The second one is Mortal Kombat: Annihilation (1997), directed by John R. Leonetti and starring Robin Shou, Talisa Soto, Brian Thompson, Sandra Hess, and James Remar. The third one is Mortal Kombat (2021), directed by Simon McQuoid and starring Lewis Tan, Hiroyuki Sanada, Joe Taslim, Jessica McNamee, Mehcad Brooks, Josh Lawson, Ludi Lin, Max Huang, Tadanobu Asano, Chin Han, Sisi Stringer, Angus Sampson, Samuel Hargrave, Mel Jarnson, and Damon Herriman.</li>
145
- </ol></p> 401be4b1e0<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/ui/button.tsx DELETED
@@ -1,57 +0,0 @@
1
- import * as React from 'react'
2
- import { Slot } from '@radix-ui/react-slot'
3
- import { cva, type VariantProps } from 'class-variance-authority'
4
-
5
- import { cn } from '@/lib/utils'
6
-
7
- const buttonVariants = cva(
8
- 'inline-flex items-center justify-center rounded-md text-sm font-medium shadow ring-offset-background transition-colors outline-none disabled:pointer-events-none disabled:opacity-50',
9
- {
10
- variants: {
11
- variant: {
12
- default:
13
- 'bg-primary text-primary-foreground shadow-md hover:bg-primary/90',
14
- destructive:
15
- 'bg-destructive text-destructive-foreground hover:bg-destructive/90',
16
- outline:
17
- 'border border-input hover:bg-accent hover:text-accent-foreground',
18
- secondary:
19
- 'bg-secondary text-secondary-foreground hover:bg-secondary/80',
20
- ghost: 'shadow-none hover:bg-accent hover:text-accent-foreground',
21
- link: 'text-primary underline-offset-4 shadow-none hover:underline'
22
- },
23
- size: {
24
- default: 'h-8 px-4 py-2',
25
- sm: 'h-8 rounded-md px-3',
26
- lg: 'h-11 rounded-md px-8',
27
- icon: 'h-8 w-8 p-0'
28
- }
29
- },
30
- defaultVariants: {
31
- variant: 'default',
32
- size: 'default'
33
- }
34
- }
35
- )
36
-
37
- export interface ButtonProps
38
- extends React.ButtonHTMLAttributes<HTMLButtonElement>,
39
- VariantProps<typeof buttonVariants> {
40
- asChild?: boolean
41
- }
42
-
43
- const Button = React.forwardRef<HTMLButtonElement, ButtonProps>(
44
- ({ className, variant, size, asChild = false, ...props }, ref) => {
45
- const Comp = asChild ? Slot : 'button'
46
- return (
47
- <Comp
48
- className={cn(buttonVariants({ variant, size, className }))}
49
- ref={ref}
50
- {...props}
51
- />
52
- )
53
- }
54
- )
55
- Button.displayName = 'Button'
56
-
57
- export { Button, buttonVariants }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/raft/core/datasets.py DELETED
@@ -1,235 +0,0 @@
1
- # Data loading based on https://github.com/NVIDIA/flownet2-pytorch
2
-
3
- import numpy as np
4
- import torch
5
- import torch.utils.data as data
6
- import torch.nn.functional as F
7
-
8
- import os
9
- import math
10
- import random
11
- from glob import glob
12
- import os.path as osp
13
-
14
- from model.raft.core.utils import frame_utils
15
- from model.raft.core.utils.augmentor import FlowAugmentor, SparseFlowAugmentor
16
-
17
-
18
- class FlowDataset(data.Dataset):
19
- def __init__(self, aug_params=None, sparse=False):
20
- self.augmentor = None
21
- self.sparse = sparse
22
- if aug_params is not None:
23
- if sparse:
24
- self.augmentor = SparseFlowAugmentor(**aug_params)
25
- else:
26
- self.augmentor = FlowAugmentor(**aug_params)
27
-
28
- self.is_test = False
29
- self.init_seed = False
30
- self.flow_list = []
31
- self.image_list = []
32
- self.extra_info = []
33
-
34
- def __getitem__(self, index):
35
-
36
- if self.is_test:
37
- img1 = frame_utils.read_gen(self.image_list[index][0])
38
- img2 = frame_utils.read_gen(self.image_list[index][1])
39
- img1 = np.array(img1).astype(np.uint8)[..., :3]
40
- img2 = np.array(img2).astype(np.uint8)[..., :3]
41
- img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
42
- img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
43
- return img1, img2, self.extra_info[index]
44
-
45
- if not self.init_seed:
46
- worker_info = torch.utils.data.get_worker_info()
47
- if worker_info is not None:
48
- torch.manual_seed(worker_info.id)
49
- np.random.seed(worker_info.id)
50
- random.seed(worker_info.id)
51
- self.init_seed = True
52
-
53
- index = index % len(self.image_list)
54
- valid = None
55
- if self.sparse:
56
- flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])
57
- else:
58
- flow = frame_utils.read_gen(self.flow_list[index])
59
-
60
- img1 = frame_utils.read_gen(self.image_list[index][0])
61
- img2 = frame_utils.read_gen(self.image_list[index][1])
62
-
63
- flow = np.array(flow).astype(np.float32)
64
- img1 = np.array(img1).astype(np.uint8)
65
- img2 = np.array(img2).astype(np.uint8)
66
-
67
- # grayscale images
68
- if len(img1.shape) == 2:
69
- img1 = np.tile(img1[...,None], (1, 1, 3))
70
- img2 = np.tile(img2[...,None], (1, 1, 3))
71
- else:
72
- img1 = img1[..., :3]
73
- img2 = img2[..., :3]
74
-
75
- if self.augmentor is not None:
76
- if self.sparse:
77
- img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid)
78
- else:
79
- img1, img2, flow = self.augmentor(img1, img2, flow)
80
-
81
- img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
82
- img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
83
- flow = torch.from_numpy(flow).permute(2, 0, 1).float()
84
-
85
- if valid is not None:
86
- valid = torch.from_numpy(valid)
87
- else:
88
- valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)
89
-
90
- return img1, img2, flow, valid.float()
91
-
92
-
93
- def __rmul__(self, v):
94
- self.flow_list = v * self.flow_list
95
- self.image_list = v * self.image_list
96
- return self
97
-
98
- def __len__(self):
99
- return len(self.image_list)
100
-
101
-
102
- class MpiSintel(FlowDataset):
103
- def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'):
104
- super(MpiSintel, self).__init__(aug_params)
105
- flow_root = osp.join(root, split, 'flow')
106
- image_root = osp.join(root, split, dstype)
107
-
108
- if split == 'test':
109
- self.is_test = True
110
-
111
- for scene in os.listdir(image_root):
112
- image_list = sorted(glob(osp.join(image_root, scene, '*.png')))
113
- for i in range(len(image_list)-1):
114
- self.image_list += [ [image_list[i], image_list[i+1]] ]
115
- self.extra_info += [ (scene, i) ] # scene and frame_id
116
-
117
- if split != 'test':
118
- self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))
119
-
120
-
121
- class FlyingChairs(FlowDataset):
122
- def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'):
123
- super(FlyingChairs, self).__init__(aug_params)
124
-
125
- images = sorted(glob(osp.join(root, '*.ppm')))
126
- flows = sorted(glob(osp.join(root, '*.flo')))
127
- assert (len(images)//2 == len(flows))
128
-
129
- split_list = np.loadtxt('chairs_split.txt', dtype=np.int32)
130
- for i in range(len(flows)):
131
- xid = split_list[i]
132
- if (split=='training' and xid==1) or (split=='validation' and xid==2):
133
- self.flow_list += [ flows[i] ]
134
- self.image_list += [ [images[2*i], images[2*i+1]] ]
135
-
136
-
137
- class FlyingThings3D(FlowDataset):
138
- def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'):
139
- super(FlyingThings3D, self).__init__(aug_params)
140
-
141
- for cam in ['left']:
142
- for direction in ['into_future', 'into_past']:
143
- image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*')))
144
- image_dirs = sorted([osp.join(f, cam) for f in image_dirs])
145
-
146
- flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*')))
147
- flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])
148
-
149
- for idir, fdir in zip(image_dirs, flow_dirs):
150
- images = sorted(glob(osp.join(idir, '*.png')) )
151
- flows = sorted(glob(osp.join(fdir, '*.pfm')) )
152
- for i in range(len(flows)-1):
153
- if direction == 'into_future':
154
- self.image_list += [ [images[i], images[i+1]] ]
155
- self.flow_list += [ flows[i] ]
156
- elif direction == 'into_past':
157
- self.image_list += [ [images[i+1], images[i]] ]
158
- self.flow_list += [ flows[i+1] ]
159
-
160
-
161
- class KITTI(FlowDataset):
162
- def __init__(self, aug_params=None, split='training', root='datasets/KITTI'):
163
- super(KITTI, self).__init__(aug_params, sparse=True)
164
- if split == 'testing':
165
- self.is_test = True
166
-
167
- root = osp.join(root, split)
168
- images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))
169
- images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))
170
-
171
- for img1, img2 in zip(images1, images2):
172
- frame_id = img1.split('/')[-1]
173
- self.extra_info += [ [frame_id] ]
174
- self.image_list += [ [img1, img2] ]
175
-
176
- if split == 'training':
177
- self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))
178
-
179
-
180
- class HD1K(FlowDataset):
181
- def __init__(self, aug_params=None, root='datasets/HD1k'):
182
- super(HD1K, self).__init__(aug_params, sparse=True)
183
-
184
- seq_ix = 0
185
- while 1:
186
- flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)))
187
- images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)))
188
-
189
- if len(flows) == 0:
190
- break
191
-
192
- for i in range(len(flows)-1):
193
- self.flow_list += [flows[i]]
194
- self.image_list += [ [images[i], images[i+1]] ]
195
-
196
- seq_ix += 1
197
-
198
-
199
- def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'):
200
- """ Create the data loader for the corresponding trainign set """
201
-
202
- if args.stage == 'chairs':
203
- aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True}
204
- train_dataset = FlyingChairs(aug_params, split='training')
205
-
206
- elif args.stage == 'things':
207
- aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True}
208
- clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass')
209
- final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass')
210
- train_dataset = clean_dataset + final_dataset
211
-
212
- elif args.stage == 'sintel':
213
- aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True}
214
- things = FlyingThings3D(aug_params, dstype='frames_cleanpass')
215
- sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')
216
- sintel_final = MpiSintel(aug_params, split='training', dstype='final')
217
-
218
- if TRAIN_DS == 'C+T+K+S+H':
219
- kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True})
220
- hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True})
221
- train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things
222
-
223
- elif TRAIN_DS == 'C+T+K/S':
224
- train_dataset = 100*sintel_clean + 100*sintel_final + things
225
-
226
- elif args.stage == 'kitti':
227
- aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False}
228
- train_dataset = KITTI(aug_params, split='training')
229
-
230
- train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size,
231
- pin_memory=False, shuffle=True, num_workers=4, drop_last=True)
232
-
233
- print('Training with %d image pairs' % len(train_dataset))
234
- return train_loader
235
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200
2
- from .mobilefacenet import get_mbf
3
-
4
-
5
- def get_model(name, **kwargs):
6
- # resnet
7
- if name == "r18":
8
- return iresnet18(False, **kwargs)
9
- elif name == "r34":
10
- return iresnet34(False, **kwargs)
11
- elif name == "r50":
12
- return iresnet50(False, **kwargs)
13
- elif name == "r100":
14
- return iresnet100(False, **kwargs)
15
- elif name == "r200":
16
- return iresnet200(False, **kwargs)
17
- elif name == "r2060":
18
- from .iresnet2060 import iresnet2060
19
- return iresnet2060(False, **kwargs)
20
- elif name == "mbf":
21
- fp16 = kwargs.get("fp16", False)
22
- num_features = kwargs.get("num_features", 512)
23
- return get_mbf(fp16=fp16, num_features=num_features)
24
- else:
25
- raise ValueError()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/CMFNet_deblurring/model/block.py DELETED
@@ -1,146 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- ##########################################################################
4
- def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
5
- layer = nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias, stride=stride)
6
- return layer
7
-
8
-
9
- def conv3x3(in_chn, out_chn, bias=True):
10
- layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
11
- return layer
12
-
13
-
14
- def conv_down(in_chn, out_chn, bias=False):
15
- layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
16
- return layer
17
-
18
- ##########################################################################
19
- ## Supervised Attention Module (RAM)
20
- class SAM(nn.Module):
21
- def __init__(self, n_feat, kernel_size, bias):
22
- super(SAM, self).__init__()
23
- self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias)
24
- self.conv2 = conv(n_feat, 3, kernel_size, bias=bias)
25
- self.conv3 = conv(3, n_feat, kernel_size, bias=bias)
26
-
27
- def forward(self, x, x_img):
28
- x1 = self.conv1(x)
29
- img = self.conv2(x) + x_img
30
- x2 = torch.sigmoid(self.conv3(img))
31
- x1 = x1 * x2
32
- x1 = x1 + x
33
- return x1, img
34
-
35
- ##########################################################################
36
- ## Spatial Attention
37
- class SALayer(nn.Module):
38
- def __init__(self, kernel_size=7):
39
- super(SALayer, self).__init__()
40
- self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)
41
- self.sigmoid = nn.Sigmoid()
42
-
43
- def forward(self, x):
44
- avg_out = torch.mean(x, dim=1, keepdim=True)
45
- max_out, _ = torch.max(x, dim=1, keepdim=True)
46
- y = torch.cat([avg_out, max_out], dim=1)
47
- y = self.conv1(y)
48
- y = self.sigmoid(y)
49
- return x * y
50
-
51
- # Spatial Attention Block (SAB)
52
- class SAB(nn.Module):
53
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
54
- super(SAB, self).__init__()
55
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
56
- self.body = nn.Sequential(*modules_body)
57
- self.SA = SALayer(kernel_size=7)
58
-
59
- def forward(self, x):
60
- res = self.body(x)
61
- res = self.SA(res)
62
- res += x
63
- return res
64
-
65
- ##########################################################################
66
- ## Pixel Attention
67
- class PALayer(nn.Module):
68
- def __init__(self, channel, reduction=16, bias=False):
69
- super(PALayer, self).__init__()
70
- self.pa = nn.Sequential(
71
- nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
72
- nn.ReLU(inplace=True),
73
- nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias), # channel <-> 1
74
- nn.Sigmoid()
75
- )
76
-
77
- def forward(self, x):
78
- y = self.pa(x)
79
- return x * y
80
-
81
- ## Pixel Attention Block (PAB)
82
- class PAB(nn.Module):
83
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
84
- super(PAB, self).__init__()
85
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
86
- self.PA = PALayer(n_feat, reduction, bias=bias)
87
- self.body = nn.Sequential(*modules_body)
88
-
89
- def forward(self, x):
90
- res = self.body(x)
91
- res = self.PA(res)
92
- res += x
93
- return res
94
-
95
- ##########################################################################
96
- ## Channel Attention Layer
97
- class CALayer(nn.Module):
98
- def __init__(self, channel, reduction=16, bias=False):
99
- super(CALayer, self).__init__()
100
- # global average pooling: feature --> point
101
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
102
- # feature channel downscale and upscale --> channel weight
103
- self.conv_du = nn.Sequential(
104
- nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
105
- nn.ReLU(inplace=True),
106
- nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias),
107
- nn.Sigmoid()
108
- )
109
-
110
- def forward(self, x):
111
- y = self.avg_pool(x)
112
- y = self.conv_du(y)
113
- return x * y
114
-
115
- ## Channel Attention Block (CAB)
116
- class CAB(nn.Module):
117
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
118
- super(CAB, self).__init__()
119
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
120
-
121
- self.CA = CALayer(n_feat, reduction, bias=bias)
122
- self.body = nn.Sequential(*modules_body)
123
-
124
- def forward(self, x):
125
- res = self.body(x)
126
- res = self.CA(res)
127
- res += x
128
- return res
129
-
130
-
131
- if __name__ == "__main__":
132
- import time
133
- from thop import profile
134
- # layer = CAB(64, 3, 4, False, nn.PReLU())
135
- layer = PAB(64, 3, 4, False, nn.PReLU())
136
- # layer = SAB(64, 3, 4, False, nn.PReLU())
137
- for idx, m in enumerate(layer.modules()):
138
- print(idx, "-", m)
139
- s = time.time()
140
-
141
- rgb = torch.ones(1, 64, 256, 256, dtype=torch.float, requires_grad=False)
142
- out = layer(rgb)
143
- flops, params = profile(layer, inputs=(rgb,))
144
- print('parameters:', params)
145
- print('flops', flops)
146
- print('time: {:.4f}ms'.format((time.time()-s)*10))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/i18n.py DELETED
@@ -1,43 +0,0 @@
1
- import json
2
-
3
- def load_language_list(language):
4
- try:
5
- with open(f"./i18n/locale/{language}.json", "r", encoding="utf-8") as f:
6
- return json.load(f)
7
- except FileNotFoundError:
8
- raise FileNotFoundError(
9
- f"Failed to load language file for {language}. Check if the correct .json file exists."
10
- )
11
-
12
-
13
- class I18nAuto:
14
- """
15
- A class used for internationalization using JSON language files.
16
-
17
- Examples
18
- --------
19
- >>> i18n = I18nAuto('en_US')
20
- >>> i18n.print()
21
- Using Language: en_US
22
- """
23
- def __init__(self, language=None):
24
- from locale import getdefaultlocale
25
- language = language or getdefaultlocale()[0]
26
- if not self._language_exists(language):
27
- language = "en_US"
28
-
29
- self.language_map = load_language_list(language)
30
- self.language = language
31
-
32
- @staticmethod
33
- def _language_exists(language):
34
- from os.path import exists
35
- return exists(f"./i18n/locale/{language}.json")
36
-
37
- def __call__(self, key):
38
- """Returns the translation of the given key if it exists, else returns the key itself."""
39
- return self.language_map.get(key, key)
40
-
41
- def print(self):
42
- """Prints the language currently in use."""
43
- print(f"Using Language: {self.language}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A-Celsius/ADR_Predictor/app.py DELETED
@@ -1,103 +0,0 @@
1
- import pickle, joblib
2
- import gradio as gr
3
- from datetime import datetime, timedelta, timezone
4
-
5
- model = joblib.load('model.pkl')
6
-
7
- def preprocess_city(selected_city):
8
- # Map the selected city to its one-hot encoded representation
9
- city_mapping = {
10
- 'Hyderabad' : [1, 0, 0, 0, 0, 0, 0],
11
- 'Indore': [1, 0, 0, 0, 0, 0, 0],
12
- 'Jaipur': [0, 1, 0, 0, 0, 0, 0],
13
- 'Mahabaleshwar': [0, 0, 1, 0, 0, 0, 0],
14
- 'Mussoorie': [0, 0, 0, 1, 0, 0, 0],
15
- 'Raipur': [0, 0, 0, 0, 1, 0, 0],
16
- 'Udaipur': [0, 0, 0, 0, 0, 1, 0],
17
- 'Varanasi': [0, 0, 0, 0, 0, 0, 1]
18
- }
19
- return city_mapping[selected_city]
20
-
21
- def preprocess_date(date_string):
22
- # Parse the date string into a datetime object
23
- date_obj = datetime.strptime(date_string, '%Y-%m-%d')
24
- year = date_obj.year
25
- month = date_obj.month
26
- day = date_obj.day
27
- return year, month, day
28
-
29
- def calculate_lead_time(checkin_date):
30
- # Convert input date to datetime object
31
- input_date = datetime.strptime(checkin_date, '%Y-%m-%d')
32
-
33
- # Get current date and time in GMT+5:30 timezone
34
- current_date = datetime.now(timezone(timedelta(hours=5, minutes=30)))
35
-
36
- # Make current_date an aware datetime with the same timezone
37
- current_date = current_date.replace(tzinfo=input_date.tzinfo)
38
-
39
- # Calculate lead time as difference in days
40
- lead_time = (input_date - current_date).days
41
-
42
- return lead_time
43
-
44
- def is_weekend(checkin_date):
45
- # Convert input date to datetime object
46
- input_date = datetime.strptime(checkin_date, '%Y-%m-%d')
47
-
48
- # Calculate the day of the week (0=Monday, 6=Sunday)
49
- day_of_week = input_date.weekday()
50
-
51
- # Check if the day is Friday (4) or Saturday (5)
52
- return 1 if day_of_week == 4 or day_of_week == 5 else 0
53
-
54
- def predict(selected_city, checkin_date, star_rating, text_rating, season, additional_views, room_category):
55
- # Preprocess user input
56
- # Here, selected_city is the name of the city selected from the dropdown
57
- # checkin_date is the date selected using the text input
58
- # star_rating is the selected star rating from the dropdown
59
- # text_rating is the numeric rating from the text box
60
- # season is the selected option from the radio button (On Season or Off Season)
61
- season_binary = 1 if season == 'On Season' else 0
62
- # additional_views is the selected option from the radio button (Yes or No)
63
- additional_views_binary = 1 if additional_views == 'Yes' else 0
64
-
65
- room_categories = ["Dorm", "Standard", "Deluxe", "Executive", "Suite"]
66
- room_category_number = room_categories.index(room_category)
67
-
68
- # Preprocess the date
69
- year, month, day = preprocess_date(checkin_date)
70
-
71
- # Preprocess the selected city
72
- city_encoded = preprocess_city(selected_city)
73
-
74
- # Calculate lead time
75
- lead_time = calculate_lead_time(checkin_date)
76
-
77
- # Calculate if the input date is a weekend (1) or weekday (0)
78
- is_weekend_value = is_weekend(checkin_date)
79
-
80
- # Combine all the input features
81
- input_data = [star_rating, text_rating, season_binary, day, month, year, is_weekend_value, lead_time,room_category_number, additional_views_binary]+city_encoded
82
-
83
- # Make predictions using the model
84
- prediction = model.predict([input_data])
85
- return "{:.2f}".format(prediction[0])
86
-
87
- # Define input components
88
- city_dropdown = gr.components.Dropdown(choices=['Hyderabad', 'Indore', 'Jaipur', 'Mahabaleshwar', 'Mussoorie', 'Raipur', 'Udaipur', 'Varanasi'], label='Select a City')
89
- date_input = gr.components.Textbox(label='Check-in Date (YYYY-MM-DD)')
90
- star_rating_dropdown = gr.components.Dropdown(choices=[1, 2, 3, 4, 5], label='Select Star Rating')
91
- text_rating_input = gr.components.Number(label='Enter Numeric Rating (1-5)')
92
- season_radio = gr.components.Radio(['On Season', 'Off Season'], label='Season')
93
- room_category_dropdown = gr.components.Dropdown(choices=["Dorm", "Standard", "Deluxe", "Executive", "Suite"], label='Select Room Category')
94
- additional_views_radio = gr.components.Radio(['Yes', 'No'], label='Additional Views')
95
-
96
- # Define output component
97
- output = gr.components.Textbox(label='Predicted Output')
98
- # Create the interface
99
- interface = gr.Interface(fn=predict, inputs=[city_dropdown, date_input, star_rating_dropdown, text_rating_input, season_radio, additional_views_radio, room_category_dropdown], outputs=output, title='Model Prediction Interface')
100
-
101
- # Launch the interface
102
- interface.launch()
103
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/cloudflare/worker.js DELETED
@@ -1,18 +0,0 @@
1
- const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。
2
-
3
- export default {
4
- async fetch(request) {
5
- const uri = new URL(request.url);
6
- if (uri.protocol === 'http:') {
7
- uri.protocol = 'https:';
8
- return new Response('', {
9
- status: 301,
10
- headers: {
11
- location: uri.toString(),
12
- },
13
- })
14
- }
15
- uri.host = TRAGET_HOST
16
- return fetch(new Request(uri.toString(), request));
17
- },
18
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/layers_123821KB.py DELETED
@@ -1,118 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from uvr5_pack.lib_v5 import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class SeperableConv2DBNActiv(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
31
- super(SeperableConv2DBNActiv, self).__init__()
32
- self.conv = nn.Sequential(
33
- nn.Conv2d(
34
- nin,
35
- nin,
36
- kernel_size=ksize,
37
- stride=stride,
38
- padding=pad,
39
- dilation=dilation,
40
- groups=nin,
41
- bias=False,
42
- ),
43
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
44
- nn.BatchNorm2d(nout),
45
- activ(),
46
- )
47
-
48
- def __call__(self, x):
49
- return self.conv(x)
50
-
51
-
52
- class Encoder(nn.Module):
53
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
54
- super(Encoder, self).__init__()
55
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
56
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
57
-
58
- def __call__(self, x):
59
- skip = self.conv1(x)
60
- h = self.conv2(skip)
61
-
62
- return h, skip
63
-
64
-
65
- class Decoder(nn.Module):
66
- def __init__(
67
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
68
- ):
69
- super(Decoder, self).__init__()
70
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
71
- self.dropout = nn.Dropout2d(0.1) if dropout else None
72
-
73
- def __call__(self, x, skip=None):
74
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
75
- if skip is not None:
76
- skip = spec_utils.crop_center(skip, x)
77
- x = torch.cat([x, skip], dim=1)
78
- h = self.conv(x)
79
-
80
- if self.dropout is not None:
81
- h = self.dropout(h)
82
-
83
- return h
84
-
85
-
86
- class ASPPModule(nn.Module):
87
- def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
88
- super(ASPPModule, self).__init__()
89
- self.conv1 = nn.Sequential(
90
- nn.AdaptiveAvgPool2d((1, None)),
91
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
92
- )
93
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
94
- self.conv3 = SeperableConv2DBNActiv(
95
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
96
- )
97
- self.conv4 = SeperableConv2DBNActiv(
98
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
99
- )
100
- self.conv5 = SeperableConv2DBNActiv(
101
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
102
- )
103
- self.bottleneck = nn.Sequential(
104
- Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
105
- )
106
-
107
- def forward(self, x):
108
- _, _, h, w = x.size()
109
- feat1 = F.interpolate(
110
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
111
- )
112
- feat2 = self.conv2(x)
113
- feat3 = self.conv3(x)
114
- feat4 = self.conv4(x)
115
- feat5 = self.conv5(x)
116
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
117
- bottle = self.bottleneck(out)
118
- return bottle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/losses.py DELETED
@@ -1,30 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- class ReConsLoss(nn.Module):
5
- def __init__(self, recons_loss, nb_joints):
6
- super(ReConsLoss, self).__init__()
7
-
8
- if recons_loss == 'l1':
9
- self.Loss = torch.nn.L1Loss()
10
- elif recons_loss == 'l2' :
11
- self.Loss = torch.nn.MSELoss()
12
- elif recons_loss == 'l1_smooth' :
13
- self.Loss = torch.nn.SmoothL1Loss()
14
-
15
- # 4 global motion associated to root
16
- # 12 local motion (3 local xyz, 3 vel xyz, 6 rot6d)
17
- # 3 global vel xyz
18
- # 4 foot contact
19
- self.nb_joints = nb_joints
20
- self.motion_dim = (nb_joints - 1) * 12 + 4 + 3 + 4
21
-
22
- def forward(self, motion_pred, motion_gt) :
23
- loss = self.Loss(motion_pred[..., : self.motion_dim], motion_gt[..., :self.motion_dim])
24
- return loss
25
-
26
- def forward_vel(self, motion_pred, motion_gt) :
27
- loss = self.Loss(motion_pred[..., 4 : (self.nb_joints - 1) * 3 + 4], motion_gt[..., 4 : (self.nb_joints - 1) * 3 + 4])
28
- return loss
29
-
30
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/wavenet.py DELETED
@@ -1,97 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
6
- n_channels_int = n_channels[0]
7
- in_act = input_a + input_b
8
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
9
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
10
- acts = t_act * s_act
11
- return acts
12
-
13
-
14
- class WN(torch.nn.Module):
15
- def __init__(self, hidden_size, kernel_size, dilation_rate, n_layers, c_cond=0,
16
- p_dropout=0, share_cond_layers=False, is_BTC=False):
17
- super(WN, self).__init__()
18
- assert (kernel_size % 2 == 1)
19
- assert (hidden_size % 2 == 0)
20
- self.is_BTC = is_BTC
21
- self.hidden_size = hidden_size
22
- self.kernel_size = kernel_size
23
- self.dilation_rate = dilation_rate
24
- self.n_layers = n_layers
25
- self.gin_channels = c_cond
26
- self.p_dropout = p_dropout
27
- self.share_cond_layers = share_cond_layers
28
-
29
- self.in_layers = torch.nn.ModuleList()
30
- self.res_skip_layers = torch.nn.ModuleList()
31
- self.drop = nn.Dropout(p_dropout)
32
-
33
- if c_cond != 0 and not share_cond_layers:
34
- cond_layer = torch.nn.Conv1d(c_cond, 2 * hidden_size * n_layers, 1)
35
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
36
-
37
- for i in range(n_layers):
38
- dilation = dilation_rate ** i
39
- padding = int((kernel_size * dilation - dilation) / 2)
40
- in_layer = torch.nn.Conv1d(hidden_size, 2 * hidden_size, kernel_size,
41
- dilation=dilation, padding=padding)
42
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
43
- self.in_layers.append(in_layer)
44
-
45
- # last one is not necessary
46
- if i < n_layers - 1:
47
- res_skip_channels = 2 * hidden_size
48
- else:
49
- res_skip_channels = hidden_size
50
-
51
- res_skip_layer = torch.nn.Conv1d(hidden_size, res_skip_channels, 1)
52
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
53
- self.res_skip_layers.append(res_skip_layer)
54
-
55
- def forward(self, x, nonpadding=None, cond=None):
56
- if self.is_BTC:
57
- x = x.transpose(1, 2)
58
- cond = cond.transpose(1, 2) if cond is not None else None
59
- nonpadding = nonpadding.transpose(1, 2) if nonpadding is not None else None
60
- if nonpadding is None:
61
- nonpadding = 1
62
- output = torch.zeros_like(x)
63
- n_channels_tensor = torch.IntTensor([self.hidden_size])
64
-
65
- if cond is not None and not self.share_cond_layers:
66
- cond = self.cond_layer(cond)
67
-
68
- for i in range(self.n_layers):
69
- x_in = self.in_layers[i](x)
70
- x_in = self.drop(x_in)
71
- if cond is not None:
72
- cond_offset = i * 2 * self.hidden_size
73
- cond_l = cond[:, cond_offset:cond_offset + 2 * self.hidden_size, :]
74
- else:
75
- cond_l = torch.zeros_like(x_in)
76
-
77
- acts = fused_add_tanh_sigmoid_multiply(x_in, cond_l, n_channels_tensor)
78
-
79
- res_skip_acts = self.res_skip_layers[i](acts)
80
- if i < self.n_layers - 1:
81
- x = (x + res_skip_acts[:, :self.hidden_size, :]) * nonpadding
82
- output = output + res_skip_acts[:, self.hidden_size:, :]
83
- else:
84
- output = output + res_skip_acts
85
- output = output * nonpadding
86
- if self.is_BTC:
87
- output = output.transpose(1, 2)
88
- return output
89
-
90
- def remove_weight_norm(self):
91
- def remove_weight_norm(m):
92
- try:
93
- nn.utils.remove_weight_norm(m)
94
- except ValueError: # this module didn't have weight norm
95
- return
96
-
97
- self.apply(remove_weight_norm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/scripts/start_frontend_14b.sh DELETED
@@ -1 +0,0 @@
1
- python3 gradio_demo/seed_llama_gradio.py --server_port 80 --request_address http://127.0.0.1:7890/generate --model_type seed-llama-14b
 
 
spaces/AIWaves/SOP_Generation-single/Component/PromptComponent.py DELETED
@@ -1,126 +0,0 @@
1
- from abc import abstractmethod
2
-
3
-
4
- class PromptComponent:
5
- def __init__(self):
6
- pass
7
-
8
- @abstractmethod
9
- def get_prompt(self, agent):
10
- pass
11
-
12
- class TaskComponent(PromptComponent):
13
- def __init__(self, task):
14
- super().__init__()
15
- self.task = task
16
-
17
- def get_prompt(self, agent):
18
- return f"""The task you need to execute is: {self.task}.\n"""
19
-
20
-
21
- class OutputComponent(PromptComponent):
22
- def __init__(self, output):
23
- super().__init__()
24
- self.output = output
25
-
26
- def get_prompt(self, agent):
27
- return f"""Please contact the above to extract <{self.output}> and </{self.output}>, \
28
- do not perform additional output, please output in strict accordance with the above format!\n"""
29
-
30
-
31
- class SystemComponent(PromptComponent):
32
- def __init__(self,system_prompt):
33
- super().__init__()
34
- self.system_prompt = system_prompt
35
-
36
- def get_prompt(self, agent):
37
- return self.system_prompt
38
-
39
- class LastComponent(PromptComponent):
40
- def __init__(self, last_prompt):
41
- super().__init__()
42
- self.last_prompt = last_prompt
43
-
44
- def get_prompt(self, agent):
45
- return self.last_prompt
46
-
47
-
48
- class StyleComponent(PromptComponent):
49
- """
50
- 角色、风格组件
51
- """
52
-
53
- def __init__(self, role):
54
- super().__init__()
55
- self.role = role
56
-
57
- def get_prompt(self, agent):
58
- name = agent.name
59
- style = agent.style
60
- return f"""Now your role is:\n{self.role}, your name is:\n{name}. \
61
- You need to follow the output style:\n{style}.\n"""
62
-
63
-
64
- class RuleComponent(PromptComponent):
65
- def __init__(self, rule):
66
- super().__init__()
67
- self.rule = rule
68
-
69
- def get_prompt(self, agent):
70
- return f"""The rule you need to follow is:\n{self.rule}.\n"""
71
-
72
-
73
- class DemonstrationComponent(PromptComponent):
74
- """
75
- input a list,the example of answer.
76
- """
77
-
78
- def __init__(self, demonstrations):
79
- super().__init__()
80
- self.demonstrations = demonstrations
81
-
82
-
83
- def get_prompt(self, agent):
84
- prompt = f"Here are demonstrations you can refer to:\n{self.demonstrations}"
85
- return prompt
86
-
87
-
88
- class CoTComponent(PromptComponent):
89
- """
90
- input a list,the example of answer.
91
- """
92
-
93
- def __init__(self, demonstrations):
94
- super().__init__()
95
- self.demonstrations = demonstrations
96
-
97
- def add_demonstration(self, demonstration):
98
- self.demonstrations.append(demonstration)
99
-
100
- def get_prompt(self, agent):
101
- prompt = "You need to think in detail before outputting, the thinking case is as follows:\n"
102
- for demonstration in self.demonstrations:
103
- prompt += "\n" + demonstration
104
- return prompt
105
-
106
-
107
- class CustomizeComponent(PromptComponent):
108
- """
109
- Custom template
110
- template(str) : example: "i am {}"
111
- keywords(list) : example : ["name"]
112
- example : agent.environment.shared_memory["name"] = "Lilong"
113
- the component will get the keyword attribute from the environment, and then add it to the template.
114
- Return : "i am Lilong"
115
- """
116
- def __init__(self, template, keywords) -> None:
117
- super().__init__()
118
- self.template = template
119
- self.keywords = keywords
120
-
121
- def get_prompt(self, agent):
122
- template_keyword = {}
123
- for keyword in self.keywords:
124
- current_keyword = agent.environment.shared_memory[keyword] if keyword in agent.environment.shared_memory else ""
125
- template_keyword[keyword] = current_keyword
126
- return self.template.format(**template_keyword)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/unfinished/Komo.py DELETED
@@ -1,44 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
-
5
- from ...requests import StreamSession
6
- from ...typing import AsyncGenerator
7
- from ..base_provider import AsyncGeneratorProvider, format_prompt
8
-
9
- class Komo(AsyncGeneratorProvider):
10
- url = "https://komo.ai/api/ask"
11
- supports_gpt_35_turbo = True
12
-
13
- @classmethod
14
- async def create_async_generator(
15
- cls,
16
- model: str,
17
- messages: list[dict[str, str]],
18
- **kwargs
19
- ) -> AsyncGenerator:
20
- async with StreamSession(impersonate="chrome107") as session:
21
- prompt = format_prompt(messages)
22
- data = {
23
- "query": prompt,
24
- "FLAG_URLEXTRACT": "false",
25
- "token": "",
26
- "FLAG_MODELA": "1",
27
- }
28
- headers = {
29
- 'authority': 'komo.ai',
30
- 'accept': 'text/event-stream',
31
- 'cache-control': 'no-cache',
32
- 'referer': 'https://komo.ai/',
33
- }
34
-
35
- async with session.get(cls.url, params=data, headers=headers) as response:
36
- response.raise_for_status()
37
- next = False
38
- async for line in response.iter_lines():
39
- if line == b"event: line":
40
- next = True
41
- elif next and line.startswith(b"data: "):
42
- yield json.loads(line[6:])
43
- next = False
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/scripts/evaluate_math.py DELETED
@@ -1,93 +0,0 @@
1
- import re
2
- import json
3
- import subprocess
4
- from importlib import reload
5
- from argparse import ArgumentParser
6
-
7
- parser = ArgumentParser()
8
- parser.add_argument("--path", type=str, required=True)
9
- parser.add_argument("--max_line", type=int, default=1000000000000)
10
- parser.add_argument("--ci_smoke_test", action="store_true")
11
- args = parser.parse_args()
12
-
13
-
14
- def check_corr(result: str, correct_solution: str, tol: float = 1e-3):
15
- result = result.replace(",", "")
16
- if result.strip() == correct_solution.strip():
17
- return 1
18
- try:
19
- result = float(result.strip())
20
- correct_solution = float(correct_solution.strip())
21
- return abs(result - correct_solution) < tol
22
- except:
23
- return 0
24
-
25
-
26
- # final_accs = []
27
- # for i in range(2):
28
- # acc = 0
29
- # total = 0
30
- # with open(args.path) as f:
31
- # for line in f:
32
- # line = json.loads(line)
33
- # label = str(line["label"])
34
- # if i == 0:
35
- # code = line["response"]
36
- # else:
37
- # code = line["logs"][0]["content"]
38
- # total += 1
39
- # code = code.strip().replace("```", "")
40
- # code = code.lstrip("python3")
41
- # code = code.lstrip("python")
42
- # with open("tmp.py", "w") as f:
43
- # f.write(code)
44
-
45
- # try:
46
- # import tmp
47
-
48
- # reload(tmp)
49
- # result = str(tmp.solution())
50
- # is_corr = check_corr(result, label)
51
-
52
- # is_corr = int(is_corr)
53
- # # Step 2
54
- # if is_corr:
55
- # acc += 1
56
- # except:
57
- # print(code)
58
- # final_accs.append(acc / total)
59
- # print(final_accs)
60
-
61
- final_accs = []
62
- err_cnts = []
63
- for i in range(2):
64
- acc = 0
65
- total = 0
66
- err_cnt = 0
67
- with open(args.path) as f:
68
- for idx, line in enumerate(f):
69
- if idx == args.max_line:
70
- break
71
- line = json.loads(line)
72
- label = str(line["label"])
73
- if i == 0:
74
- response = line["response"]
75
- else:
76
- if line["logs"][0]["module"] == "Role Assigner":
77
- response = line["logs"][1]["content"]
78
- else:
79
- response = line["logs"][0]["content"]
80
- total += 1
81
- result = re.findall(r"\\boxed\{(.+?)\}", response)
82
- if len(result) == 0:
83
- err_cnt += 1
84
- print(response)
85
- continue
86
- result = result[0]
87
- acc += check_corr(result, label)
88
- final_accs.append(acc / total)
89
- err_cnts.append(err_cnt)
90
- print(final_accs)
91
- print(err_cnts)
92
- if args.ci_smoke_test is True:
93
- assert final_accs[0] == 1.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import Clock from './Clock.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('clock', function (config) {
6
- var gameObject = new Clock(this.scene, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.Spinner.Clock', Clock);
12
-
13
- export default Clock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetExpandedChildWidth.js DELETED
@@ -1,6 +0,0 @@
1
- // Override
2
- var GetExpandedChildWidth = function (child, parentWidth) {
3
- return parentWidth;
4
- }
5
-
6
- export default GetExpandedChildWidth;
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/kandinsky_v22.md DELETED
@@ -1,357 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
3
- the License. You may obtain a copy of the License at
4
- http://www.apache.org/licenses/LICENSE-2.0
5
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
6
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
7
- specific language governing permissions and limitations under the License.
8
- -->
9
-
10
- # Kandinsky 2.2
11
-
12
- The Kandinsky 2.2 release includes robust new text-to-image models that support text-to-image generation, image-to-image generation, image interpolation, and text-guided image inpainting. The general workflow to perform these tasks using Kandinsky 2.2 is the same as in Kandinsky 2.1. First, you will need to use a prior pipeline to generate image embeddings based on your text prompt, and then use one of the image decoding pipelines to generate the output image. The only difference is that in Kandinsky 2.2, all of the decoding pipelines no longer accept the `prompt` input, and the image generation process is conditioned with only `image_embeds` and `negative_image_embeds`.
13
-
14
- Same as with Kandinsky 2.1, the easiest way to perform text-to-image generation is to use the combined Kandinsky pipeline. This process is exactly the same as Kandinsky 2.1. All you need to do is to replace the Kandinsky 2.1 checkpoint with 2.2.
15
-
16
- ```python
17
- from diffusers import AutoPipelineForText2Image
18
- import torch
19
-
20
- pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
21
- pipe.enable_model_cpu_offload()
22
-
23
- prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting"
24
- negative_prompt = "low quality, bad quality"
25
-
26
- image = pipe(prompt=prompt, negative_prompt=negative_prompt, prior_guidance_scale =1.0, height=768, width=768).images[0]
27
- ```
28
-
29
- Now, let's look at an example where we take separate steps to run the prior pipeline and text-to-image pipeline. This way, we can understand what's happening under the hood and how Kandinsky 2.2 differs from Kandinsky 2.1.
30
-
31
- First, let's create the prior pipeline and text-to-image pipeline with Kandinsky 2.2 checkpoints.
32
-
33
- ```python
34
- from diffusers import DiffusionPipeline
35
- import torch
36
-
37
- pipe_prior = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16)
38
- pipe_prior.to("cuda")
39
-
40
- t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
41
- t2i_pipe.to("cuda")
42
- ```
43
-
44
- You can then use `pipe_prior` to generate image embeddings.
45
-
46
- ```python
47
- prompt = "portrait of a women, blue eyes, cinematic"
48
- negative_prompt = "low quality, bad quality"
49
-
50
- image_embeds, negative_image_embeds = pipe_prior(prompt, guidance_scale=1.0).to_tuple()
51
- ```
52
-
53
- Now you can pass these embeddings to the text-to-image pipeline. When using Kandinsky 2.2 you don't need to pass the `prompt` (but you do with the previous version, Kandinsky 2.1).
54
-
55
- ```
56
- image = t2i_pipe(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768).images[
57
- 0
58
- ]
59
- image.save("portrait.png")
60
- ```
61
- ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/%20blue%20eyes.png)
62
-
63
- We used the text-to-image pipeline as an example, but the same process applies to all decoding pipelines in Kandinsky 2.2. For more information, please refer to our API section for each pipeline.
64
-
65
- ### Text-to-Image Generation with ControlNet Conditioning
66
-
67
- In the following, we give a simple example of how to use [`KandinskyV22ControlnetPipeline`] to add control to the text-to-image generation with a depth image.
68
-
69
- First, let's take an image and extract its depth map.
70
-
71
- ```python
72
- from diffusers.utils import load_image
73
-
74
- img = load_image(
75
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png"
76
- ).resize((768, 768))
77
- ```
78
- ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png)
79
-
80
- We can use the `depth-estimation` pipeline from transformers to process the image and retrieve its depth map.
81
-
82
- ```python
83
- import torch
84
- import numpy as np
85
-
86
- from transformers import pipeline
87
- from diffusers.utils import load_image
88
-
89
-
90
- def make_hint(image, depth_estimator):
91
- image = depth_estimator(image)["depth"]
92
- image = np.array(image)
93
- image = image[:, :, None]
94
- image = np.concatenate([image, image, image], axis=2)
95
- detected_map = torch.from_numpy(image).float() / 255.0
96
- hint = detected_map.permute(2, 0, 1)
97
- return hint
98
-
99
-
100
- depth_estimator = pipeline("depth-estimation")
101
- hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
102
- ```
103
- Now, we load the prior pipeline and the text-to-image controlnet pipeline
104
-
105
- ```python
106
- from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
107
-
108
- pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
109
- "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
110
- )
111
- pipe_prior = pipe_prior.to("cuda")
112
-
113
- pipe = KandinskyV22ControlnetPipeline.from_pretrained(
114
- "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
115
- )
116
- pipe = pipe.to("cuda")
117
- ```
118
-
119
- We pass the prompt and negative prompt through the prior to generate image embeddings
120
-
121
- ```python
122
- prompt = "A robot, 4k photo"
123
-
124
- negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
125
-
126
- generator = torch.Generator(device="cuda").manual_seed(43)
127
- image_emb, zero_image_emb = pipe_prior(
128
- prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
129
- ).to_tuple()
130
- ```
131
-
132
- Now we can pass the image embeddings and the depth image we extracted to the controlnet pipeline. With Kandinsky 2.2, only prior pipelines accept `prompt` input. You do not need to pass the prompt to the controlnet pipeline.
133
-
134
- ```python
135
- images = pipe(
136
- image_embeds=image_emb,
137
- negative_image_embeds=zero_image_emb,
138
- hint=hint,
139
- num_inference_steps=50,
140
- generator=generator,
141
- height=768,
142
- width=768,
143
- ).images
144
-
145
- images[0].save("robot_cat.png")
146
- ```
147
-
148
- The output image looks as follow:
149
- ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat_text2img.png)
150
-
151
- ### Image-to-Image Generation with ControlNet Conditioning
152
-
153
- Kandinsky 2.2 also includes a [`KandinskyV22ControlnetImg2ImgPipeline`] that will allow you to add control to the image generation process with both the image and its depth map. This pipeline works really well with [`KandinskyV22PriorEmb2EmbPipeline`], which generates image embeddings based on both a text prompt and an image.
154
-
155
- For our robot cat example, we will pass the prompt and cat image together to the prior pipeline to generate an image embedding. We will then use that image embedding and the depth map of the cat to further control the image generation process.
156
-
157
- We can use the same cat image and its depth map from the last example.
158
-
159
- ```python
160
- import torch
161
- import numpy as np
162
-
163
- from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline
164
- from diffusers.utils import load_image
165
- from transformers import pipeline
166
-
167
- img = load_image(
168
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/cat.png"
169
- ).resize((768, 768))
170
-
171
-
172
- def make_hint(image, depth_estimator):
173
- image = depth_estimator(image)["depth"]
174
- image = np.array(image)
175
- image = image[:, :, None]
176
- image = np.concatenate([image, image, image], axis=2)
177
- detected_map = torch.from_numpy(image).float() / 255.0
178
- hint = detected_map.permute(2, 0, 1)
179
- return hint
180
-
181
-
182
- depth_estimator = pipeline("depth-estimation")
183
- hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
184
-
185
- pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained(
186
- "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
187
- )
188
- pipe_prior = pipe_prior.to("cuda")
189
-
190
- pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained(
191
- "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
192
- )
193
- pipe = pipe.to("cuda")
194
-
195
- prompt = "A robot, 4k photo"
196
- negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
197
-
198
- generator = torch.Generator(device="cuda").manual_seed(43)
199
-
200
- # run prior pipeline
201
-
202
- img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator)
203
- negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator)
204
-
205
- # run controlnet img2img pipeline
206
- images = pipe(
207
- image=img,
208
- strength=0.5,
209
- image_embeds=img_emb.image_embeds,
210
- negative_image_embeds=negative_emb.image_embeds,
211
- hint=hint,
212
- num_inference_steps=50,
213
- generator=generator,
214
- height=768,
215
- width=768,
216
- ).images
217
-
218
- images[0].save("robot_cat.png")
219
- ```
220
-
221
- Here is the output. Compared with the output from our text-to-image controlnet example, it kept a lot more cat facial details from the original image and worked into the robot style we asked for.
222
-
223
- ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat.png)
224
-
225
- ## Optimization
226
-
227
- Running Kandinsky in inference requires running both a first prior pipeline: [`KandinskyPriorPipeline`]
228
- and a second image decoding pipeline which is one of [`KandinskyPipeline`], [`KandinskyImg2ImgPipeline`], or [`KandinskyInpaintPipeline`].
229
-
230
- The bulk of the computation time will always be the second image decoding pipeline, so when looking
231
- into optimizing the model, one should look into the second image decoding pipeline.
232
-
233
- When running with PyTorch < 2.0, we strongly recommend making use of [`xformers`](https://github.com/facebookresearch/xformers)
234
- to speed-up the optimization. This can be done by simply running:
235
-
236
- ```py
237
- from diffusers import DiffusionPipeline
238
- import torch
239
-
240
- t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
241
- t2i_pipe.enable_xformers_memory_efficient_attention()
242
- ```
243
-
244
- When running on PyTorch >= 2.0, PyTorch's SDPA attention will automatically be used. For more information on
245
- PyTorch's SDPA, feel free to have a look at [this blog post](https://pytorch.org/blog/accelerated-diffusers-pt-20/).
246
-
247
- To have explicit control , you can also manually set the pipeline to use PyTorch's 2.0 efficient attention:
248
-
249
- ```py
250
- from diffusers.models.attention_processor import AttnAddedKVProcessor2_0
251
-
252
- t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor2_0())
253
- ```
254
-
255
- The slowest and most memory intense attention processor is the default `AttnAddedKVProcessor` processor.
256
- We do **not** recommend using it except for testing purposes or cases where very high determistic behaviour is desired.
257
- You can set it with:
258
-
259
- ```py
260
- from diffusers.models.attention_processor import AttnAddedKVProcessor
261
-
262
- t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor())
263
- ```
264
-
265
- With PyTorch >= 2.0, you can also use Kandinsky with `torch.compile` which depending
266
- on your hardware can signficantly speed-up your inference time once the model is compiled.
267
- To use Kandinsksy with `torch.compile`, you can do:
268
-
269
- ```py
270
- t2i_pipe.unet.to(memory_format=torch.channels_last)
271
- t2i_pipe.unet = torch.compile(t2i_pipe.unet, mode="reduce-overhead", fullgraph=True)
272
- ```
273
-
274
- After compilation you should see a very fast inference time. For more information,
275
- feel free to have a look at [Our PyTorch 2.0 benchmark](https://huggingface.co/docs/diffusers/main/en/optimization/torch2.0).
276
-
277
- <Tip>
278
-
279
- To generate images directly from a single pipeline, you can use [`KandinskyV22CombinedPipeline`], [`KandinskyV22Img2ImgCombinedPipeline`], [`KandinskyV22InpaintCombinedPipeline`].
280
- These combined pipelines wrap the [`KandinskyV22PriorPipeline`] and [`KandinskyV22Pipeline`], [`KandinskyV22Img2ImgPipeline`], [`KandinskyV22InpaintPipeline`] respectively into a single
281
- pipeline for a simpler user experience
282
-
283
- </Tip>
284
-
285
- ## Available Pipelines:
286
-
287
- | Pipeline | Tasks |
288
- |---|---|
289
- | [pipeline_kandinsky2_2.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py) | *Text-to-Image Generation* |
290
- | [pipeline_kandinsky2_2_combined.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py) | *End-to-end Text-to-Image, image-to-image, Inpainting Generation* |
291
- | [pipeline_kandinsky2_2_inpaint.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py) | *Image-Guided Image Generation* |
292
- | [pipeline_kandinsky2_2_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py) | *Image-Guided Image Generation* |
293
- | [pipeline_kandinsky2_2_controlnet.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py) | *Image-Guided Image Generation* |
294
- | [pipeline_kandinsky2_2_controlnet_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py) | *Image-Guided Image Generation* |
295
-
296
-
297
- ### KandinskyV22Pipeline
298
-
299
- [[autodoc]] KandinskyV22Pipeline
300
- - all
301
- - __call__
302
-
303
- ### KandinskyV22ControlnetPipeline
304
-
305
- [[autodoc]] KandinskyV22ControlnetPipeline
306
- - all
307
- - __call__
308
-
309
- ### KandinskyV22ControlnetImg2ImgPipeline
310
-
311
- [[autodoc]] KandinskyV22ControlnetImg2ImgPipeline
312
- - all
313
- - __call__
314
-
315
- ### KandinskyV22Img2ImgPipeline
316
-
317
- [[autodoc]] KandinskyV22Img2ImgPipeline
318
- - all
319
- - __call__
320
-
321
- ### KandinskyV22InpaintPipeline
322
-
323
- [[autodoc]] KandinskyV22InpaintPipeline
324
- - all
325
- - __call__
326
-
327
- ### KandinskyV22PriorPipeline
328
-
329
- [[autodoc]] KandinskyV22PriorPipeline
330
- - all
331
- - __call__
332
- - interpolate
333
-
334
- ### KandinskyV22PriorEmb2EmbPipeline
335
-
336
- [[autodoc]] KandinskyV22PriorEmb2EmbPipeline
337
- - all
338
- - __call__
339
- - interpolate
340
-
341
- ### KandinskyV22CombinedPipeline
342
-
343
- [[autodoc]] KandinskyV22CombinedPipeline
344
- - all
345
- - __call__
346
-
347
- ### KandinskyV22Img2ImgCombinedPipeline
348
-
349
- [[autodoc]] KandinskyV22Img2ImgCombinedPipeline
350
- - all
351
- - __call__
352
-
353
- ### KandinskyV22InpaintCombinedPipeline
354
-
355
- [[autodoc]] KandinskyV22InpaintCombinedPipeline
356
- - all
357
- - __call__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/wildcard_stable_diffusion.py DELETED
@@ -1,418 +0,0 @@
1
- import inspect
2
- import os
3
- import random
4
- import re
5
- from dataclasses import dataclass
6
- from typing import Callable, Dict, List, Optional, Union
7
-
8
- import torch
9
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
-
11
- from diffusers import DiffusionPipeline
12
- from diffusers.configuration_utils import FrozenDict
13
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
14
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
15
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
16
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
17
- from diffusers.utils import deprecate, logging
18
-
19
-
20
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
21
-
22
- global_re_wildcard = re.compile(r"__([^_]*)__")
23
-
24
-
25
- def get_filename(path: str):
26
- # this doesn't work on Windows
27
- return os.path.basename(path).split(".txt")[0]
28
-
29
-
30
- def read_wildcard_values(path: str):
31
- with open(path, encoding="utf8") as f:
32
- return f.read().splitlines()
33
-
34
-
35
- def grab_wildcard_values(wildcard_option_dict: Dict[str, List[str]] = {}, wildcard_files: List[str] = []):
36
- for wildcard_file in wildcard_files:
37
- filename = get_filename(wildcard_file)
38
- read_values = read_wildcard_values(wildcard_file)
39
- if filename not in wildcard_option_dict:
40
- wildcard_option_dict[filename] = []
41
- wildcard_option_dict[filename].extend(read_values)
42
- return wildcard_option_dict
43
-
44
-
45
- def replace_prompt_with_wildcards(
46
- prompt: str, wildcard_option_dict: Dict[str, List[str]] = {}, wildcard_files: List[str] = []
47
- ):
48
- new_prompt = prompt
49
-
50
- # get wildcard options
51
- wildcard_option_dict = grab_wildcard_values(wildcard_option_dict, wildcard_files)
52
-
53
- for m in global_re_wildcard.finditer(new_prompt):
54
- wildcard_value = m.group()
55
- replace_value = random.choice(wildcard_option_dict[wildcard_value.strip("__")])
56
- new_prompt = new_prompt.replace(wildcard_value, replace_value, 1)
57
-
58
- return new_prompt
59
-
60
-
61
- @dataclass
62
- class WildcardStableDiffusionOutput(StableDiffusionPipelineOutput):
63
- prompts: List[str]
64
-
65
-
66
- class WildcardStableDiffusionPipeline(DiffusionPipeline):
67
- r"""
68
- Example Usage:
69
- pipe = WildcardStableDiffusionPipeline.from_pretrained(
70
- "CompVis/stable-diffusion-v1-4",
71
-
72
- torch_dtype=torch.float16,
73
- )
74
- prompt = "__animal__ sitting on a __object__ wearing a __clothing__"
75
- out = pipe(
76
- prompt,
77
- wildcard_option_dict={
78
- "clothing":["hat", "shirt", "scarf", "beret"]
79
- },
80
- wildcard_files=["object.txt", "animal.txt"],
81
- num_prompt_samples=1
82
- )
83
-
84
-
85
- Pipeline for text-to-image generation with wild cards using Stable Diffusion.
86
-
87
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
88
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
89
-
90
- Args:
91
- vae ([`AutoencoderKL`]):
92
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
93
- text_encoder ([`CLIPTextModel`]):
94
- Frozen text-encoder. Stable Diffusion uses the text portion of
95
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
96
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
97
- tokenizer (`CLIPTokenizer`):
98
- Tokenizer of class
99
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
100
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
101
- scheduler ([`SchedulerMixin`]):
102
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
103
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
104
- safety_checker ([`StableDiffusionSafetyChecker`]):
105
- Classification module that estimates whether generated images could be considered offensive or harmful.
106
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
107
- feature_extractor ([`CLIPImageProcessor`]):
108
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
109
- """
110
-
111
- def __init__(
112
- self,
113
- vae: AutoencoderKL,
114
- text_encoder: CLIPTextModel,
115
- tokenizer: CLIPTokenizer,
116
- unet: UNet2DConditionModel,
117
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
118
- safety_checker: StableDiffusionSafetyChecker,
119
- feature_extractor: CLIPImageProcessor,
120
- ):
121
- super().__init__()
122
-
123
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
124
- deprecation_message = (
125
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
126
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
127
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
128
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
129
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
130
- " file"
131
- )
132
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
133
- new_config = dict(scheduler.config)
134
- new_config["steps_offset"] = 1
135
- scheduler._internal_dict = FrozenDict(new_config)
136
-
137
- if safety_checker is None:
138
- logger.warning(
139
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
140
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
141
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
142
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
143
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
144
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
145
- )
146
-
147
- self.register_modules(
148
- vae=vae,
149
- text_encoder=text_encoder,
150
- tokenizer=tokenizer,
151
- unet=unet,
152
- scheduler=scheduler,
153
- safety_checker=safety_checker,
154
- feature_extractor=feature_extractor,
155
- )
156
-
157
- @torch.no_grad()
158
- def __call__(
159
- self,
160
- prompt: Union[str, List[str]],
161
- height: int = 512,
162
- width: int = 512,
163
- num_inference_steps: int = 50,
164
- guidance_scale: float = 7.5,
165
- negative_prompt: Optional[Union[str, List[str]]] = None,
166
- num_images_per_prompt: Optional[int] = 1,
167
- eta: float = 0.0,
168
- generator: Optional[torch.Generator] = None,
169
- latents: Optional[torch.FloatTensor] = None,
170
- output_type: Optional[str] = "pil",
171
- return_dict: bool = True,
172
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
173
- callback_steps: int = 1,
174
- wildcard_option_dict: Dict[str, List[str]] = {},
175
- wildcard_files: List[str] = [],
176
- num_prompt_samples: Optional[int] = 1,
177
- **kwargs,
178
- ):
179
- r"""
180
- Function invoked when calling the pipeline for generation.
181
-
182
- Args:
183
- prompt (`str` or `List[str]`):
184
- The prompt or prompts to guide the image generation.
185
- height (`int`, *optional*, defaults to 512):
186
- The height in pixels of the generated image.
187
- width (`int`, *optional*, defaults to 512):
188
- The width in pixels of the generated image.
189
- num_inference_steps (`int`, *optional*, defaults to 50):
190
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
191
- expense of slower inference.
192
- guidance_scale (`float`, *optional*, defaults to 7.5):
193
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
194
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
195
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
196
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
197
- usually at the expense of lower image quality.
198
- negative_prompt (`str` or `List[str]`, *optional*):
199
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
200
- if `guidance_scale` is less than `1`).
201
- num_images_per_prompt (`int`, *optional*, defaults to 1):
202
- The number of images to generate per prompt.
203
- eta (`float`, *optional*, defaults to 0.0):
204
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
205
- [`schedulers.DDIMScheduler`], will be ignored for others.
206
- generator (`torch.Generator`, *optional*):
207
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
208
- deterministic.
209
- latents (`torch.FloatTensor`, *optional*):
210
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
211
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
212
- tensor will ge generated by sampling using the supplied random `generator`.
213
- output_type (`str`, *optional*, defaults to `"pil"`):
214
- The output format of the generate image. Choose between
215
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
216
- return_dict (`bool`, *optional*, defaults to `True`):
217
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
218
- plain tuple.
219
- callback (`Callable`, *optional*):
220
- A function that will be called every `callback_steps` steps during inference. The function will be
221
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
222
- callback_steps (`int`, *optional*, defaults to 1):
223
- The frequency at which the `callback` function will be called. If not specified, the callback will be
224
- called at every step.
225
- wildcard_option_dict (Dict[str, List[str]]):
226
- dict with key as `wildcard` and values as a list of possible replacements. For example if a prompt, "A __animal__ sitting on a chair". A wildcard_option_dict can provide possible values for "animal" like this: {"animal":["dog", "cat", "fox"]}
227
- wildcard_files: (List[str])
228
- List of filenames of txt files for wildcard replacements. For example if a prompt, "A __animal__ sitting on a chair". A file can be provided ["animal.txt"]
229
- num_prompt_samples: int
230
- Number of times to sample wildcards for each prompt provided
231
-
232
- Returns:
233
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
234
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
235
- When returning a tuple, the first element is a list with the generated images, and the second element is a
236
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
237
- (nsfw) content, according to the `safety_checker`.
238
- """
239
-
240
- if isinstance(prompt, str):
241
- prompt = [
242
- replace_prompt_with_wildcards(prompt, wildcard_option_dict, wildcard_files)
243
- for i in range(num_prompt_samples)
244
- ]
245
- batch_size = len(prompt)
246
- elif isinstance(prompt, list):
247
- prompt_list = []
248
- for p in prompt:
249
- for i in range(num_prompt_samples):
250
- prompt_list.append(replace_prompt_with_wildcards(p, wildcard_option_dict, wildcard_files))
251
- prompt = prompt_list
252
- batch_size = len(prompt)
253
- else:
254
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
255
-
256
- if height % 8 != 0 or width % 8 != 0:
257
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
258
-
259
- if (callback_steps is None) or (
260
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
261
- ):
262
- raise ValueError(
263
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
264
- f" {type(callback_steps)}."
265
- )
266
-
267
- # get prompt text embeddings
268
- text_inputs = self.tokenizer(
269
- prompt,
270
- padding="max_length",
271
- max_length=self.tokenizer.model_max_length,
272
- return_tensors="pt",
273
- )
274
- text_input_ids = text_inputs.input_ids
275
-
276
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
277
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
278
- logger.warning(
279
- "The following part of your input was truncated because CLIP can only handle sequences up to"
280
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
281
- )
282
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
283
- text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
284
-
285
- # duplicate text embeddings for each generation per prompt, using mps friendly method
286
- bs_embed, seq_len, _ = text_embeddings.shape
287
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
288
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
289
-
290
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
291
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
292
- # corresponds to doing no classifier free guidance.
293
- do_classifier_free_guidance = guidance_scale > 1.0
294
- # get unconditional embeddings for classifier free guidance
295
- if do_classifier_free_guidance:
296
- uncond_tokens: List[str]
297
- if negative_prompt is None:
298
- uncond_tokens = [""] * batch_size
299
- elif type(prompt) is not type(negative_prompt):
300
- raise TypeError(
301
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
302
- f" {type(prompt)}."
303
- )
304
- elif isinstance(negative_prompt, str):
305
- uncond_tokens = [negative_prompt]
306
- elif batch_size != len(negative_prompt):
307
- raise ValueError(
308
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
309
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
310
- " the batch size of `prompt`."
311
- )
312
- else:
313
- uncond_tokens = negative_prompt
314
-
315
- max_length = text_input_ids.shape[-1]
316
- uncond_input = self.tokenizer(
317
- uncond_tokens,
318
- padding="max_length",
319
- max_length=max_length,
320
- truncation=True,
321
- return_tensors="pt",
322
- )
323
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
324
-
325
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
326
- seq_len = uncond_embeddings.shape[1]
327
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
328
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
329
-
330
- # For classifier free guidance, we need to do two forward passes.
331
- # Here we concatenate the unconditional and text embeddings into a single batch
332
- # to avoid doing two forward passes
333
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
334
-
335
- # get the initial random noise unless the user supplied it
336
-
337
- # Unlike in other pipelines, latents need to be generated in the target device
338
- # for 1-to-1 results reproducibility with the CompVis implementation.
339
- # However this currently doesn't work in `mps`.
340
- latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
341
- latents_dtype = text_embeddings.dtype
342
- if latents is None:
343
- if self.device.type == "mps":
344
- # randn does not exist on mps
345
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
346
- self.device
347
- )
348
- else:
349
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
350
- else:
351
- if latents.shape != latents_shape:
352
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
353
- latents = latents.to(self.device)
354
-
355
- # set timesteps
356
- self.scheduler.set_timesteps(num_inference_steps)
357
-
358
- # Some schedulers like PNDM have timesteps as arrays
359
- # It's more optimized to move all timesteps to correct device beforehand
360
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
361
-
362
- # scale the initial noise by the standard deviation required by the scheduler
363
- latents = latents * self.scheduler.init_noise_sigma
364
-
365
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
366
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
367
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
368
- # and should be between [0, 1]
369
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
370
- extra_step_kwargs = {}
371
- if accepts_eta:
372
- extra_step_kwargs["eta"] = eta
373
-
374
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
375
- # expand the latents if we are doing classifier free guidance
376
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
377
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
378
-
379
- # predict the noise residual
380
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
381
-
382
- # perform guidance
383
- if do_classifier_free_guidance:
384
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
385
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
386
-
387
- # compute the previous noisy sample x_t -> x_t-1
388
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
389
-
390
- # call the callback, if provided
391
- if callback is not None and i % callback_steps == 0:
392
- callback(i, t, latents)
393
-
394
- latents = 1 / 0.18215 * latents
395
- image = self.vae.decode(latents).sample
396
-
397
- image = (image / 2 + 0.5).clamp(0, 1)
398
-
399
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
400
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
401
-
402
- if self.safety_checker is not None:
403
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
404
- self.device
405
- )
406
- image, has_nsfw_concept = self.safety_checker(
407
- images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
408
- )
409
- else:
410
- has_nsfw_concept = None
411
-
412
- if output_type == "pil":
413
- image = self.numpy_to_pil(image)
414
-
415
- if not return_dict:
416
- return (image, has_nsfw_concept)
417
-
418
- return WildcardStableDiffusionOutput(images=image, nsfw_content_detected=has_nsfw_concept, prompts=prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py DELETED
@@ -1,429 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Callable, List, Optional, Union
16
-
17
- import torch
18
- from transformers import (
19
- XLMRobertaTokenizer,
20
- )
21
-
22
- from ...models import UNet2DConditionModel, VQModel
23
- from ...schedulers import DDIMScheduler, DDPMScheduler
24
- from ...utils import (
25
- is_accelerate_available,
26
- is_accelerate_version,
27
- logging,
28
- randn_tensor,
29
- replace_example_docstring,
30
- )
31
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
32
- from .text_encoder import MultilingualCLIP
33
-
34
-
35
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
-
37
- EXAMPLE_DOC_STRING = """
38
- Examples:
39
- ```py
40
- >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
41
- >>> import torch
42
-
43
- >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
44
- >>> pipe_prior.to("cuda")
45
-
46
- >>> prompt = "red cat, 4k photo"
47
- >>> out = pipe_prior(prompt)
48
- >>> image_emb = out.image_embeds
49
- >>> negative_image_emb = out.negative_image_embeds
50
-
51
- >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
52
- >>> pipe.to("cuda")
53
-
54
- >>> image = pipe(
55
- ... prompt,
56
- ... image_embeds=image_emb,
57
- ... negative_image_embeds=negative_image_emb,
58
- ... height=768,
59
- ... width=768,
60
- ... num_inference_steps=100,
61
- ... ).images
62
-
63
- >>> image[0].save("cat.png")
64
- ```
65
- """
66
-
67
-
68
- def get_new_h_w(h, w, scale_factor=8):
69
- new_h = h // scale_factor**2
70
- if h % scale_factor**2 != 0:
71
- new_h += 1
72
- new_w = w // scale_factor**2
73
- if w % scale_factor**2 != 0:
74
- new_w += 1
75
- return new_h * scale_factor, new_w * scale_factor
76
-
77
-
78
- class KandinskyPipeline(DiffusionPipeline):
79
- """
80
- Pipeline for text-to-image generation using Kandinsky
81
-
82
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
83
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
84
-
85
- Args:
86
- text_encoder ([`MultilingualCLIP`]):
87
- Frozen text-encoder.
88
- tokenizer ([`XLMRobertaTokenizer`]):
89
- Tokenizer of class
90
- scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
91
- A scheduler to be used in combination with `unet` to generate image latents.
92
- unet ([`UNet2DConditionModel`]):
93
- Conditional U-Net architecture to denoise the image embedding.
94
- movq ([`VQModel`]):
95
- MoVQ Decoder to generate the image from the latents.
96
- """
97
-
98
- def __init__(
99
- self,
100
- text_encoder: MultilingualCLIP,
101
- tokenizer: XLMRobertaTokenizer,
102
- unet: UNet2DConditionModel,
103
- scheduler: Union[DDIMScheduler, DDPMScheduler],
104
- movq: VQModel,
105
- ):
106
- super().__init__()
107
-
108
- self.register_modules(
109
- text_encoder=text_encoder,
110
- tokenizer=tokenizer,
111
- unet=unet,
112
- scheduler=scheduler,
113
- movq=movq,
114
- )
115
- self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
116
-
117
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
118
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
119
- if latents is None:
120
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
121
- else:
122
- if latents.shape != shape:
123
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
124
- latents = latents.to(device)
125
-
126
- latents = latents * scheduler.init_noise_sigma
127
- return latents
128
-
129
- def _encode_prompt(
130
- self,
131
- prompt,
132
- device,
133
- num_images_per_prompt,
134
- do_classifier_free_guidance,
135
- negative_prompt=None,
136
- ):
137
- batch_size = len(prompt) if isinstance(prompt, list) else 1
138
- # get prompt text embeddings
139
- text_inputs = self.tokenizer(
140
- prompt,
141
- padding="max_length",
142
- truncation=True,
143
- max_length=77,
144
- return_attention_mask=True,
145
- add_special_tokens=True,
146
- return_tensors="pt",
147
- )
148
-
149
- text_input_ids = text_inputs.input_ids
150
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
151
-
152
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
153
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
154
- logger.warning(
155
- "The following part of your input was truncated because CLIP can only handle sequences up to"
156
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
157
- )
158
-
159
- text_input_ids = text_input_ids.to(device)
160
- text_mask = text_inputs.attention_mask.to(device)
161
-
162
- prompt_embeds, text_encoder_hidden_states = self.text_encoder(
163
- input_ids=text_input_ids, attention_mask=text_mask
164
- )
165
-
166
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
167
- text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
168
- text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
169
-
170
- if do_classifier_free_guidance:
171
- uncond_tokens: List[str]
172
- if negative_prompt is None:
173
- uncond_tokens = [""] * batch_size
174
- elif type(prompt) is not type(negative_prompt):
175
- raise TypeError(
176
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
177
- f" {type(prompt)}."
178
- )
179
- elif isinstance(negative_prompt, str):
180
- uncond_tokens = [negative_prompt]
181
- elif batch_size != len(negative_prompt):
182
- raise ValueError(
183
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
184
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
185
- " the batch size of `prompt`."
186
- )
187
- else:
188
- uncond_tokens = negative_prompt
189
-
190
- uncond_input = self.tokenizer(
191
- uncond_tokens,
192
- padding="max_length",
193
- max_length=77,
194
- truncation=True,
195
- return_attention_mask=True,
196
- add_special_tokens=True,
197
- return_tensors="pt",
198
- )
199
- uncond_text_input_ids = uncond_input.input_ids.to(device)
200
- uncond_text_mask = uncond_input.attention_mask.to(device)
201
-
202
- negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder(
203
- input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask
204
- )
205
-
206
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
207
-
208
- seq_len = negative_prompt_embeds.shape[1]
209
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
210
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
211
-
212
- seq_len = uncond_text_encoder_hidden_states.shape[1]
213
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
214
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
215
- batch_size * num_images_per_prompt, seq_len, -1
216
- )
217
- uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
218
-
219
- # done duplicates
220
-
221
- # For classifier free guidance, we need to do two forward passes.
222
- # Here we concatenate the unconditional and text embeddings into a single batch
223
- # to avoid doing two forward passes
224
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
225
- text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
226
-
227
- text_mask = torch.cat([uncond_text_mask, text_mask])
228
-
229
- return prompt_embeds, text_encoder_hidden_states, text_mask
230
-
231
- def enable_model_cpu_offload(self, gpu_id=0):
232
- r"""
233
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
234
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
235
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
236
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
237
- """
238
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
239
- from accelerate import cpu_offload_with_hook
240
- else:
241
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
242
-
243
- device = torch.device(f"cuda:{gpu_id}")
244
-
245
- if self.device.type != "cpu":
246
- self.to("cpu", silence_dtype_warnings=True)
247
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
248
-
249
- hook = None
250
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
251
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
252
-
253
- # We'll offload the last model manually.
254
- self.final_offload_hook = hook
255
-
256
- @torch.no_grad()
257
- @replace_example_docstring(EXAMPLE_DOC_STRING)
258
- def __call__(
259
- self,
260
- prompt: Union[str, List[str]],
261
- image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
262
- negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
263
- negative_prompt: Optional[Union[str, List[str]]] = None,
264
- height: int = 512,
265
- width: int = 512,
266
- num_inference_steps: int = 100,
267
- guidance_scale: float = 4.0,
268
- num_images_per_prompt: int = 1,
269
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
270
- latents: Optional[torch.FloatTensor] = None,
271
- output_type: Optional[str] = "pil",
272
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
273
- callback_steps: int = 1,
274
- return_dict: bool = True,
275
- ):
276
- """
277
- Function invoked when calling the pipeline for generation.
278
-
279
- Args:
280
- prompt (`str` or `List[str]`):
281
- The prompt or prompts to guide the image generation.
282
- image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
283
- The clip image embeddings for text prompt, that will be used to condition the image generation.
284
- negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
285
- The clip image embeddings for negative text prompt, will be used to condition the image generation.
286
- negative_prompt (`str` or `List[str]`, *optional*):
287
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
288
- if `guidance_scale` is less than `1`).
289
- height (`int`, *optional*, defaults to 512):
290
- The height in pixels of the generated image.
291
- width (`int`, *optional*, defaults to 512):
292
- The width in pixels of the generated image.
293
- num_inference_steps (`int`, *optional*, defaults to 100):
294
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
295
- expense of slower inference.
296
- guidance_scale (`float`, *optional*, defaults to 4.0):
297
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
298
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
299
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
300
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
301
- usually at the expense of lower image quality.
302
- num_images_per_prompt (`int`, *optional*, defaults to 1):
303
- The number of images to generate per prompt.
304
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
305
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
306
- to make generation deterministic.
307
- latents (`torch.FloatTensor`, *optional*):
308
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
309
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
310
- tensor will ge generated by sampling using the supplied random `generator`.
311
- output_type (`str`, *optional*, defaults to `"pil"`):
312
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
313
- (`np.array`) or `"pt"` (`torch.Tensor`).
314
- callback (`Callable`, *optional*):
315
- A function that calls every `callback_steps` steps during inference. The function is called with the
316
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
317
- callback_steps (`int`, *optional*, defaults to 1):
318
- The frequency at which the `callback` function is called. If not specified, the callback is called at
319
- every step.
320
- return_dict (`bool`, *optional*, defaults to `True`):
321
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
322
-
323
- Examples:
324
-
325
- Returns:
326
- [`~pipelines.ImagePipelineOutput`] or `tuple`
327
- """
328
-
329
- if isinstance(prompt, str):
330
- batch_size = 1
331
- elif isinstance(prompt, list):
332
- batch_size = len(prompt)
333
- else:
334
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
335
-
336
- device = self._execution_device
337
-
338
- batch_size = batch_size * num_images_per_prompt
339
- do_classifier_free_guidance = guidance_scale > 1.0
340
-
341
- prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt(
342
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
343
- )
344
-
345
- if isinstance(image_embeds, list):
346
- image_embeds = torch.cat(image_embeds, dim=0)
347
- if isinstance(negative_image_embeds, list):
348
- negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
349
-
350
- if do_classifier_free_guidance:
351
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
352
- negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
353
-
354
- image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
355
- dtype=prompt_embeds.dtype, device=device
356
- )
357
-
358
- self.scheduler.set_timesteps(num_inference_steps, device=device)
359
- timesteps_tensor = self.scheduler.timesteps
360
-
361
- num_channels_latents = self.unet.config.in_channels
362
-
363
- height, width = get_new_h_w(height, width, self.movq_scale_factor)
364
-
365
- # create initial latent
366
- latents = self.prepare_latents(
367
- (batch_size, num_channels_latents, height, width),
368
- text_encoder_hidden_states.dtype,
369
- device,
370
- generator,
371
- latents,
372
- self.scheduler,
373
- )
374
-
375
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
376
- # expand the latents if we are doing classifier free guidance
377
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
378
-
379
- added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
380
- noise_pred = self.unet(
381
- sample=latent_model_input,
382
- timestep=t,
383
- encoder_hidden_states=text_encoder_hidden_states,
384
- added_cond_kwargs=added_cond_kwargs,
385
- return_dict=False,
386
- )[0]
387
-
388
- if do_classifier_free_guidance:
389
- noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
390
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
391
- _, variance_pred_text = variance_pred.chunk(2)
392
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
393
- noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
394
-
395
- if not (
396
- hasattr(self.scheduler.config, "variance_type")
397
- and self.scheduler.config.variance_type in ["learned", "learned_range"]
398
- ):
399
- noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
400
-
401
- # compute the previous noisy sample x_t -> x_t-1
402
- latents = self.scheduler.step(
403
- noise_pred,
404
- t,
405
- latents,
406
- generator=generator,
407
- ).prev_sample
408
-
409
- if callback is not None and i % callback_steps == 0:
410
- callback(i, t, latents)
411
-
412
- # post-processing
413
- image = self.movq.decode(latents, force_not_quantize=True)["sample"]
414
-
415
- if output_type not in ["pt", "np", "pil"]:
416
- raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
417
-
418
- if output_type in ["np", "pil"]:
419
- image = image * 0.5 + 0.5
420
- image = image.clamp(0, 1)
421
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
422
-
423
- if output_type == "pil":
424
- image = self.numpy_to_pil(image)
425
-
426
- if not return_dict:
427
- return (image,)
428
-
429
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py DELETED
@@ -1,290 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import random
18
- import unittest
19
-
20
- import numpy as np
21
- import torch
22
- from PIL import Image
23
-
24
- from diffusers import (
25
- DDIMScheduler,
26
- KandinskyV22ControlnetImg2ImgPipeline,
27
- KandinskyV22PriorEmb2EmbPipeline,
28
- UNet2DConditionModel,
29
- VQModel,
30
- )
31
- from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
32
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
33
-
34
- from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
35
-
36
-
37
- enable_full_determinism()
38
-
39
-
40
- class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
41
- pipeline_class = KandinskyV22ControlnetImg2ImgPipeline
42
- params = ["image_embeds", "negative_image_embeds", "image", "hint"]
43
- batch_params = ["image_embeds", "negative_image_embeds", "image", "hint"]
44
- required_optional_params = [
45
- "generator",
46
- "height",
47
- "width",
48
- "strength",
49
- "guidance_scale",
50
- "num_inference_steps",
51
- "return_dict",
52
- "guidance_scale",
53
- "num_images_per_prompt",
54
- "output_type",
55
- "return_dict",
56
- ]
57
- test_xformers_attention = False
58
-
59
- @property
60
- def text_embedder_hidden_size(self):
61
- return 32
62
-
63
- @property
64
- def time_input_dim(self):
65
- return 32
66
-
67
- @property
68
- def block_out_channels_0(self):
69
- return self.time_input_dim
70
-
71
- @property
72
- def time_embed_dim(self):
73
- return self.time_input_dim * 4
74
-
75
- @property
76
- def cross_attention_dim(self):
77
- return 100
78
-
79
- @property
80
- def dummy_unet(self):
81
- torch.manual_seed(0)
82
-
83
- model_kwargs = {
84
- "in_channels": 8,
85
- # Out channels is double in channels because predicts mean and variance
86
- "out_channels": 8,
87
- "addition_embed_type": "image_hint",
88
- "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
89
- "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
90
- "mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
91
- "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
92
- "layers_per_block": 1,
93
- "encoder_hid_dim": self.text_embedder_hidden_size,
94
- "encoder_hid_dim_type": "image_proj",
95
- "cross_attention_dim": self.cross_attention_dim,
96
- "attention_head_dim": 4,
97
- "resnet_time_scale_shift": "scale_shift",
98
- "class_embed_type": None,
99
- }
100
-
101
- model = UNet2DConditionModel(**model_kwargs)
102
- return model
103
-
104
- @property
105
- def dummy_movq_kwargs(self):
106
- return {
107
- "block_out_channels": [32, 32, 64, 64],
108
- "down_block_types": [
109
- "DownEncoderBlock2D",
110
- "DownEncoderBlock2D",
111
- "DownEncoderBlock2D",
112
- "AttnDownEncoderBlock2D",
113
- ],
114
- "in_channels": 3,
115
- "latent_channels": 4,
116
- "layers_per_block": 1,
117
- "norm_num_groups": 8,
118
- "norm_type": "spatial",
119
- "num_vq_embeddings": 12,
120
- "out_channels": 3,
121
- "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
122
- "vq_embed_dim": 4,
123
- }
124
-
125
- @property
126
- def dummy_movq(self):
127
- torch.manual_seed(0)
128
- model = VQModel(**self.dummy_movq_kwargs)
129
- return model
130
-
131
- def get_dummy_components(self):
132
- unet = self.dummy_unet
133
- movq = self.dummy_movq
134
-
135
- ddim_config = {
136
- "num_train_timesteps": 1000,
137
- "beta_schedule": "linear",
138
- "beta_start": 0.00085,
139
- "beta_end": 0.012,
140
- "clip_sample": False,
141
- "set_alpha_to_one": False,
142
- "steps_offset": 0,
143
- "prediction_type": "epsilon",
144
- "thresholding": False,
145
- }
146
-
147
- scheduler = DDIMScheduler(**ddim_config)
148
-
149
- components = {
150
- "unet": unet,
151
- "scheduler": scheduler,
152
- "movq": movq,
153
- }
154
-
155
- return components
156
-
157
- def get_dummy_inputs(self, device, seed=0):
158
- image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
159
- negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
160
- device
161
- )
162
- # create init_image
163
- image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
164
- image = image.cpu().permute(0, 2, 3, 1)[0]
165
- init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
166
- # create hint
167
- hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
168
-
169
- if str(device).startswith("mps"):
170
- generator = torch.manual_seed(seed)
171
- else:
172
- generator = torch.Generator(device=device).manual_seed(seed)
173
- inputs = {
174
- "image": init_image,
175
- "image_embeds": image_embeds,
176
- "negative_image_embeds": negative_image_embeds,
177
- "hint": hint,
178
- "generator": generator,
179
- "height": 64,
180
- "width": 64,
181
- "num_inference_steps": 10,
182
- "guidance_scale": 7.0,
183
- "strength": 0.2,
184
- "output_type": "np",
185
- }
186
- return inputs
187
-
188
- def test_kandinsky_controlnet_img2img(self):
189
- device = "cpu"
190
-
191
- components = self.get_dummy_components()
192
-
193
- pipe = self.pipeline_class(**components)
194
- pipe = pipe.to(device)
195
-
196
- pipe.set_progress_bar_config(disable=None)
197
-
198
- output = pipe(**self.get_dummy_inputs(device))
199
- image = output.images
200
-
201
- image_from_tuple = pipe(
202
- **self.get_dummy_inputs(device),
203
- return_dict=False,
204
- )[0]
205
-
206
- image_slice = image[0, -3:, -3:, -1]
207
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
208
-
209
- assert image.shape == (1, 64, 64, 3)
210
-
211
- expected_slice = np.array(
212
- [0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736]
213
- )
214
- assert (
215
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
216
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
217
- assert (
218
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
219
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
220
-
221
-
222
- @slow
223
- @require_torch_gpu
224
- class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase):
225
- def tearDown(self):
226
- # clean up the VRAM after each test
227
- super().tearDown()
228
- gc.collect()
229
- torch.cuda.empty_cache()
230
-
231
- def test_kandinsky_controlnet_img2img(self):
232
- expected_image = load_numpy(
233
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
234
- "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy"
235
- )
236
-
237
- init_image = load_image(
238
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
239
- )
240
- init_image = init_image.resize((512, 512))
241
-
242
- hint = load_image(
243
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
244
- "/kandinskyv22/hint_image_cat.png"
245
- )
246
- hint = torch.from_numpy(np.array(hint)).float() / 255.0
247
- hint = hint.permute(2, 0, 1).unsqueeze(0)
248
-
249
- prompt = "A robot, 4k photo"
250
-
251
- pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained(
252
- "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
253
- )
254
- pipe_prior.to(torch_device)
255
-
256
- pipeline = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained(
257
- "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
258
- )
259
- pipeline = pipeline.to(torch_device)
260
-
261
- pipeline.set_progress_bar_config(disable=None)
262
-
263
- generator = torch.Generator(device="cpu").manual_seed(0)
264
-
265
- image_emb, zero_image_emb = pipe_prior(
266
- prompt,
267
- image=init_image,
268
- strength=0.85,
269
- generator=generator,
270
- negative_prompt="",
271
- ).to_tuple()
272
-
273
- output = pipeline(
274
- image=init_image,
275
- image_embeds=image_emb,
276
- negative_image_embeds=zero_image_emb,
277
- hint=hint,
278
- generator=generator,
279
- num_inference_steps=100,
280
- height=512,
281
- width=512,
282
- strength=0.5,
283
- output_type="np",
284
- )
285
-
286
- image = output.images[0]
287
-
288
- assert image.shape == (512, 512, 3)
289
-
290
- assert_mean_pixel_difference(image, expected_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r101_fpn_1x_coco.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './retinanet_r50_fpn_1x_coco.py'
2
- model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_480x480_40k_pascal_context.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/README.md DELETED
@@ -1,83 +0,0 @@
1
- # Multimodal
2
-
3
- ## Description
4
-
5
- Adds support for multimodality (text+images) to text-generation-webui.
6
-
7
- https://user-images.githubusercontent.com/3718215/233817203-69b57e77-0c55-4fd6-b742-3204bb13b8fc.mp4
8
-
9
- ## Usage
10
-
11
- To run this extension, download a LLM that supports multimodality, and then start server.py with the appropriate `--multimodal-pipeline` argument. Examples:
12
-
13
- ```
14
- python server.py --model wojtab_llava-7b-v0-4bit-128g --multimodal-pipeline llava-7b
15
- python3 server.py --model wojtab_llava-13b-v0-4bit-128g --multimodal-pipeline llava-13b
16
- python server.py --model anon8231489123_vicuna-13b-GPTQ-4bit-128g --multimodal-pipeline minigpt4-13b
17
- python server.py --model llama-7b-4bit --multimodal-pipeline minigpt4-7b
18
- ```
19
-
20
- There is built-in support for LLaVA-v0-13B and LLaVA-v0-7b. To install `minigpt4`:
21
-
22
- - clone https://github.com/Wojtab/minigpt-4-pipeline into `extensions/multimodal/pipelines`
23
- - install the requirements.txt
24
-
25
- The same procedure should be used to install other pipelines, which can then be used with `--multimodal-pipeline [pipeline name]`. For additional multimodal pipelines refer to the compatibility section below.
26
-
27
- Do note, that each image takes up a considerable amount of tokens, so adjust `max_new_tokens` to be at most 1700 (recommended value is between 200 to 500), so the images don't get truncated.
28
-
29
- To send an image, just upload it to the extension field below chat, and send a prompt as always. The image will be added to the end of your message. If you wish to modify the placement, include a string `<image>` in your prompt.
30
-
31
- Additionally, there is *Embed all images, not only the last one* checkbox. It modifies the image embeddings, by default (if it's unchecked), all but the most recent images have their embeddings empty, so they are not fed to the network. It seems as if some multimodal networks consider the features in all images at the same time as if they were a single image. Due to this behavior, by default, the extension skips previous images. However, it can lead to sub-par generation on other pipelines. If you want to include all images, just tick this checkbox.
32
-
33
- ## Compatibility
34
- As of now, the following multimodal pipelines are supported:
35
- |Pipeline|`--multimodal-pipeline`|Default LLM|LLM info(for the linked model)|Pipeline repository|
36
- |-|-|-|-|-|
37
- |[LLaVA 13B](https://github.com/haotian-liu/LLaVA)|`llava-13b`|[LLaVA 13B](https://huggingface.co/wojtab/llava-13b-v0-4bit-128g)|GPTQ 4-bit quant, old CUDA|built-in|
38
- |[LLaVA 7B](https://github.com/haotian-liu/LLaVA)|`llava-7b`|[LLaVA 7B](https://huggingface.co/wojtab/llava-7b-v0-4bit-128g)|GPTQ 4-bit quant, old CUDA|built-in|
39
- |[MiniGPT-4 7B](https://github.com/Vision-CAIR/MiniGPT-4)|`minigpt4-7b`|[Vicuna v0 7B](https://huggingface.co/TheBloke/vicuna-7B-GPTQ-4bit-128g)|GPTQ 4-bit quant, new format|[Wojtab/minigpt-4-pipeline](https://github.com/Wojtab/minigpt-4-pipeline)|
40
- |[MiniGPT-4 13B](https://github.com/Vision-CAIR/MiniGPT-4)|`minigpt4-13b`|[Vicuna v0 13B](https://huggingface.co/anon8231489123/vicuna-13b-GPTQ-4bit-128g)|GPTQ 4-bit quant, old CUDA|[Wojtab/minigpt-4-pipeline](https://github.com/Wojtab/minigpt-4-pipeline)|
41
- |[InstructBLIP 7B](https://github.com/salesforce/LAVIS/tree/main/projects/instructblip)|`instructblip-7b`|[Vicuna v1.1 7B](https://huggingface.co/TheBloke/vicuna-7B-1.1-GPTQ-4bit-128g)|GPTQ 4-bit quant|[kjerk/instructblip-pipeline](https://github.com/kjerk/instructblip-pipeline)|
42
- |[InstructBLIP 13B](https://github.com/salesforce/LAVIS/tree/main/projects/instructblip)|`instructblip-13b`|[Vicuna v1.1 13B](https://huggingface.co/TheBloke/vicuna-13B-1.1-GPTQ-4bit-128g)|GPTQ 4-bit quant|[kjerk/instructblip-pipeline](https://github.com/kjerk/instructblip-pipeline)|
43
-
44
- Some pipelines could support different LLMs but do note that while it might work, it isn't a supported configuration.
45
-
46
- DO NOT report bugs if you are using a different LLM.
47
-
48
- DO NOT report bugs with pipelines in this repository (unless they are built-in)
49
-
50
- ## Extension config
51
- This extension uses the following parameters (from `settings.json`):
52
- |Parameter|Description|
53
- |---------|-----------|
54
- |`multimodal-vision_bits`|Number of bits to load vision models (CLIP/ViT) feature extractor in (most pipelines should support either 32 or 16, default=32)|
55
- |`multimodal-vision_device`|Torch device to run the feature extractor on, for example, `cpu` or `cuda:0`, by default `cuda:0` if available|
56
- |`multimodal-projector_bits`|Number of bits to load feature projector model(s) in (most pipelines should support either 32 or 16, default=32)|
57
- |`multimodal-projector_device`|Torch device to run the feature projector model(s) on, for example `cpu` or `cuda:0`, by default `cuda:0` if available|
58
- |`multimodal-add_all_images_to_prompt`|Default value of "Embed all images, not only the last one" checkbox|
59
-
60
- ## Usage through API
61
-
62
- You can run the multimodal inference through API, by inputting the images to prompt. Images are embedded like so: `f'<img src="data:image/jpeg;base64,{img_str}">'`, where `img_str` is base-64 jpeg data. Note that you will need to launch `server.py` with the arguments `--api --extensions multimodal`.
63
-
64
- Python example:
65
-
66
- ```Python
67
- import base64
68
- import requests
69
-
70
- CONTEXT = "You are LLaVA, a large language and vision assistant trained by UW Madison WAIV Lab. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language. Follow the instructions carefully and explain your answers in detail.### Human: Hi!### Assistant: Hi there! How can I help you today?\n"
71
-
72
- with open('extreme_ironing.jpg', 'rb') as f:
73
- img_str = base64.b64encode(f.read()).decode('utf-8')
74
- prompt = CONTEXT + f'### Human: What is unusual about this image: \n<img src="data:image/jpeg;base64,{img_str}">### Assistant: '
75
- print(requests.post('http://127.0.0.1:5000/api/v1/generate', json={'prompt': prompt, 'stopping_strings': ['\n###']}).json())
76
- ```
77
- script output:
78
- ```Python
79
- {'results': [{'text': "The unusual aspect of this image is that a man is standing on top of a yellow minivan while doing his laundry. He has set up a makeshift clothes line using the car's rooftop as an outdoor drying area. This scene is uncommon because people typically do their laundry indoors, in a dedicated space like a laundromat or a room in their home, rather than on top of a moving vehicle. Additionally, hanging clothes on the car could be potentially hazardous or illegal in some jurisdictions due to the risk of damaging the vehicle or causing accidents on the road.\n##"}]}
80
- ```
81
-
82
- ## For pipeline developers/technical description
83
- see [DOCS.md](https://github.com/oobabooga/text-generation-webui/blob/main/extensions/multimodal/DOCS.md)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnnasBlackHat/Image-Downloader/gofile.py DELETED
@@ -1,25 +0,0 @@
1
- import requests
2
-
3
- class Gofile:
4
- def __init__(self, token = None, folder_id= None):
5
- self.token = token
6
- self.folder_id = folder_id
7
-
8
- def find_server(self):
9
- resp = requests.get('https://api.gofile.io/getServer')
10
- result = resp.json()
11
- return result['data']['server']
12
-
13
- def upload(self, files):
14
- server = self.find_server()
15
- url = f'https://{server}.gofile.io/uploadFile'
16
- data_payload = {'token': self.token, 'folderId': self.folder_id}
17
- download_link = []
18
- for file in files:
19
- with open(file, 'rb') as f:
20
- resp = requests.post(url, files = {'file': f}, data= data_payload)
21
- print('upload status: ', resp.status_code)
22
- download_page = resp.json()['data']['downloadPage']
23
- download_link.append(download_page)
24
- print('download page: ',download_page)
25
- return download_link
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/openpose/util.py DELETED
@@ -1,164 +0,0 @@
1
- import math
2
- import numpy as np
3
- import matplotlib
4
- import cv2
5
-
6
-
7
- def padRightDownCorner(img, stride, padValue):
8
- h = img.shape[0]
9
- w = img.shape[1]
10
-
11
- pad = 4 * [None]
12
- pad[0] = 0 # up
13
- pad[1] = 0 # left
14
- pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
15
- pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
16
-
17
- img_padded = img
18
- pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
19
- img_padded = np.concatenate((pad_up, img_padded), axis=0)
20
- pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
21
- img_padded = np.concatenate((pad_left, img_padded), axis=1)
22
- pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
23
- img_padded = np.concatenate((img_padded, pad_down), axis=0)
24
- pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
25
- img_padded = np.concatenate((img_padded, pad_right), axis=1)
26
-
27
- return img_padded, pad
28
-
29
- # transfer caffe model to pytorch which will match the layer name
30
- def transfer(model, model_weights):
31
- transfered_model_weights = {}
32
- for weights_name in model.state_dict().keys():
33
- transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
34
- return transfered_model_weights
35
-
36
- # draw the body keypoint and lims
37
- def draw_bodypose(canvas, candidate, subset):
38
- stickwidth = 4
39
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
40
- [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
41
- [1, 16], [16, 18], [3, 17], [6, 18]]
42
-
43
- colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
44
- [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
45
- [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
46
- for i in range(18):
47
- for n in range(len(subset)):
48
- index = int(subset[n][i])
49
- if index == -1:
50
- continue
51
- x, y = candidate[index][0:2]
52
- cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
53
- for i in range(17):
54
- for n in range(len(subset)):
55
- index = subset[n][np.array(limbSeq[i]) - 1]
56
- if -1 in index:
57
- continue
58
- cur_canvas = canvas.copy()
59
- Y = candidate[index.astype(int), 0]
60
- X = candidate[index.astype(int), 1]
61
- mX = np.mean(X)
62
- mY = np.mean(Y)
63
- length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
64
- angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
65
- polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
66
- cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
67
- canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
68
- # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
69
- # plt.imshow(canvas[:, :, [2, 1, 0]])
70
- return canvas
71
-
72
-
73
- # image drawed by opencv is not good.
74
- def draw_handpose(canvas, all_hand_peaks, show_number=False):
75
- edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
76
- [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
77
-
78
- for peaks in all_hand_peaks:
79
- for ie, e in enumerate(edges):
80
- if np.sum(np.all(peaks[e], axis=1)==0)==0:
81
- x1, y1 = peaks[e[0]]
82
- x2, y2 = peaks[e[1]]
83
- cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
84
-
85
- for i, keyponit in enumerate(peaks):
86
- x, y = keyponit
87
- cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
88
- if show_number:
89
- cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
90
- return canvas
91
-
92
- # detect hand according to body pose keypoints
93
- # please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
94
- def handDetect(candidate, subset, oriImg):
95
- # right hand: wrist 4, elbow 3, shoulder 2
96
- # left hand: wrist 7, elbow 6, shoulder 5
97
- ratioWristElbow = 0.33
98
- detect_result = []
99
- image_height, image_width = oriImg.shape[0:2]
100
- for person in subset.astype(int):
101
- # if any of three not detected
102
- has_left = np.sum(person[[5, 6, 7]] == -1) == 0
103
- has_right = np.sum(person[[2, 3, 4]] == -1) == 0
104
- if not (has_left or has_right):
105
- continue
106
- hands = []
107
- #left hand
108
- if has_left:
109
- left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
110
- x1, y1 = candidate[left_shoulder_index][:2]
111
- x2, y2 = candidate[left_elbow_index][:2]
112
- x3, y3 = candidate[left_wrist_index][:2]
113
- hands.append([x1, y1, x2, y2, x3, y3, True])
114
- # right hand
115
- if has_right:
116
- right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
117
- x1, y1 = candidate[right_shoulder_index][:2]
118
- x2, y2 = candidate[right_elbow_index][:2]
119
- x3, y3 = candidate[right_wrist_index][:2]
120
- hands.append([x1, y1, x2, y2, x3, y3, False])
121
-
122
- for x1, y1, x2, y2, x3, y3, is_left in hands:
123
- # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
124
- # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
125
- # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
126
- # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
127
- # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
128
- # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
129
- x = x3 + ratioWristElbow * (x3 - x2)
130
- y = y3 + ratioWristElbow * (y3 - y2)
131
- distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
132
- distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
133
- width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
134
- # x-y refers to the center --> offset to topLeft point
135
- # handRectangle.x -= handRectangle.width / 2.f;
136
- # handRectangle.y -= handRectangle.height / 2.f;
137
- x -= width / 2
138
- y -= width / 2 # width = height
139
- # overflow the image
140
- if x < 0: x = 0
141
- if y < 0: y = 0
142
- width1 = width
143
- width2 = width
144
- if x + width > image_width: width1 = image_width - x
145
- if y + width > image_height: width2 = image_height - y
146
- width = min(width1, width2)
147
- # the max hand box value is 20 pixels
148
- if width >= 20:
149
- detect_result.append([int(x), int(y), int(width), is_left])
150
-
151
- '''
152
- return value: [[x, y, w, True if left hand else False]].
153
- width=height since the network require squared input.
154
- x, y is the coordinate of top left
155
- '''
156
- return detect_result
157
-
158
- # get max index of 2d array
159
- def npmax(array):
160
- arrayindex = array.argmax(1)
161
- arrayvalue = array.max(1)
162
- i = arrayvalue.argmax()
163
- j = arrayindex[i]
164
- return i, j
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/Tm/roop/processors/frame/core.py DELETED
@@ -1,88 +0,0 @@
1
- import os
2
- import importlib
3
- import psutil
4
- from concurrent.futures import ThreadPoolExecutor, as_completed
5
- from queue import Queue
6
- from types import ModuleType
7
- from typing import Any, List, Callable
8
- from tqdm import tqdm
9
-
10
- import roop
11
-
12
- FRAME_PROCESSORS_MODULES: List[ModuleType] = []
13
- FRAME_PROCESSORS_INTERFACE = [
14
- 'pre_check',
15
- 'pre_start',
16
- 'process_frame',
17
- 'process_frames',
18
- 'process_image',
19
- 'process_video',
20
- 'post_process'
21
- ]
22
-
23
-
24
- def load_frame_processor_module(frame_processor: str) -> Any:
25
- try:
26
- frame_processor_module = importlib.import_module(f'roop.processors.frame.{frame_processor}')
27
- for method_name in FRAME_PROCESSORS_INTERFACE:
28
- if not hasattr(frame_processor_module, method_name):
29
- raise NotImplementedError
30
- except (ImportError, NotImplementedError):
31
- quit(f'Frame processor {frame_processor} crashed.')
32
- return frame_processor_module
33
-
34
-
35
- def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
36
- global FRAME_PROCESSORS_MODULES
37
-
38
- if not FRAME_PROCESSORS_MODULES:
39
- for frame_processor in frame_processors:
40
- frame_processor_module = load_frame_processor_module(frame_processor)
41
- FRAME_PROCESSORS_MODULES.append(frame_processor_module)
42
- return FRAME_PROCESSORS_MODULES
43
-
44
-
45
- def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
46
- with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor:
47
- futures = []
48
- queue = create_queue(temp_frame_paths)
49
- queue_per_future = len(temp_frame_paths) // roop.globals.execution_threads
50
- while not queue.empty():
51
- future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
52
- futures.append(future)
53
- for future in as_completed(futures):
54
- future.result()
55
-
56
-
57
- def create_queue(temp_frame_paths: List[str]) -> Queue[str]:
58
- queue: Queue[str] = Queue()
59
- for frame_path in temp_frame_paths:
60
- queue.put(frame_path)
61
- return queue
62
-
63
-
64
- def pick_queue(queue: Queue[str], queue_per_future: int) -> List[str]:
65
- queues = []
66
- for _ in range(queue_per_future):
67
- if not queue.empty():
68
- queues.append(queue.get())
69
- return queues
70
-
71
-
72
- def process_video(source_path: str, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
73
- progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
74
- total = len(frame_paths)
75
- with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
76
- multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress))
77
-
78
-
79
- def update_progress(progress: Any = None) -> None:
80
- process = psutil.Process(os.getpid())
81
- memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
82
- progress.set_postfix({
83
- 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
84
- 'execution_providers': roop.globals.execution_providers,
85
- 'execution_threads': roop.globals.execution_threads
86
- })
87
- progress.refresh()
88
- progress.update(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/nono/roop/face_reference.py DELETED
@@ -1,21 +0,0 @@
1
- from typing import Optional
2
-
3
- from roop.typing import Face
4
-
5
- FACE_REFERENCE = None
6
-
7
-
8
- def get_face_reference() -> Optional[Face]:
9
- return FACE_REFERENCE
10
-
11
-
12
- def set_face_reference(face: Face) -> None:
13
- global FACE_REFERENCE
14
-
15
- FACE_REFERENCE = face
16
-
17
-
18
- def clear_face_reference() -> None:
19
- global FACE_REFERENCE
20
-
21
- FACE_REFERENCE = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnaudding001/OpenAI_whisperLive/utils.py DELETED
@@ -1,115 +0,0 @@
1
- import textwrap
2
- import unicodedata
3
- import re
4
-
5
- import zlib
6
- from typing import Iterator, TextIO
7
-
8
-
9
- def exact_div(x, y):
10
- assert x % y == 0
11
- return x // y
12
-
13
-
14
- def str2bool(string):
15
- str2val = {"True": True, "False": False}
16
- if string in str2val:
17
- return str2val[string]
18
- else:
19
- raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
20
-
21
-
22
- def optional_int(string):
23
- return None if string == "None" else int(string)
24
-
25
-
26
- def optional_float(string):
27
- return None if string == "None" else float(string)
28
-
29
-
30
- def compression_ratio(text) -> float:
31
- return len(text) / len(zlib.compress(text.encode("utf-8")))
32
-
33
-
34
- def format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'):
35
- assert seconds >= 0, "non-negative timestamp expected"
36
- milliseconds = round(seconds * 1000.0)
37
-
38
- hours = milliseconds // 3_600_000
39
- milliseconds -= hours * 3_600_000
40
-
41
- minutes = milliseconds // 60_000
42
- milliseconds -= minutes * 60_000
43
-
44
- seconds = milliseconds // 1_000
45
- milliseconds -= seconds * 1_000
46
-
47
- hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
48
- return f"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}"
49
-
50
-
51
- def write_txt(transcript: Iterator[dict], file: TextIO):
52
- for segment in transcript:
53
- print(segment['text'].strip(), file=file, flush=True)
54
-
55
-
56
- def write_vtt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None):
57
- print("WEBVTT\n", file=file)
58
- for segment in transcript:
59
- text = process_text(segment['text'], maxLineWidth).replace('-->', '->')
60
-
61
- print(
62
- f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
63
- f"{text}\n",
64
- file=file,
65
- flush=True,
66
- )
67
-
68
-
69
- def write_srt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None):
70
- """
71
- Write a transcript to a file in SRT format.
72
- Example usage:
73
- from pathlib import Path
74
- from whisper.utils import write_srt
75
- result = transcribe(model, audio_path, temperature=temperature, **args)
76
- # save SRT
77
- audio_basename = Path(audio_path).stem
78
- with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
79
- write_srt(result["segments"], file=srt)
80
- """
81
- for i, segment in enumerate(transcript, start=1):
82
- text = process_text(segment['text'].strip(), maxLineWidth).replace('-->', '->')
83
-
84
- # write srt lines
85
- print(
86
- f"{i}\n"
87
- f"{format_timestamp(segment['start'], always_include_hours=True, fractionalSeperator=',')} --> "
88
- f"{format_timestamp(segment['end'], always_include_hours=True, fractionalSeperator=',')}\n"
89
- f"{text}\n",
90
- file=file,
91
- flush=True,
92
- )
93
-
94
- def process_text(text: str, maxLineWidth=None):
95
- if (maxLineWidth is None or maxLineWidth < 0):
96
- return text
97
-
98
- lines = textwrap.wrap(text, width=maxLineWidth, tabsize=4)
99
- return '\n'.join(lines)
100
-
101
- def slugify(value, allow_unicode=False):
102
- """
103
- Taken from https://github.com/django/django/blob/master/django/utils/text.py
104
- Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
105
- dashes to single dashes. Remove characters that aren't alphanumerics,
106
- underscores, or hyphens. Convert to lowercase. Also strip leading and
107
- trailing whitespace, dashes, and underscores.
108
- """
109
- value = str(value)
110
- if allow_unicode:
111
- value = unicodedata.normalize('NFKC', value)
112
- else:
113
- value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
114
- value = re.sub(r'[^\w\s-]', '', value.lower())
115
- return re.sub(r'[-\s]+', '-', value).strip('-_')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/modules/lstm.py DELETED
@@ -1,25 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from torch import nn
8
-
9
-
10
- class StreamableLSTM(nn.Module):
11
- """LSTM without worrying about the hidden state, nor the layout of the data.
12
- Expects input as convolutional layout.
13
- """
14
- def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True):
15
- super().__init__()
16
- self.skip = skip
17
- self.lstm = nn.LSTM(dimension, dimension, num_layers)
18
-
19
- def forward(self, x):
20
- x = x.permute(2, 0, 1)
21
- y, _ = self.lstm(x)
22
- if self.skip:
23
- y = y + x
24
- y = y.permute(1, 2, 0)
25
- return y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artples/Chat-with-Llama-2-70b/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Lauche-AI LEU-Chatbot
3
- emoji: ⚡
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.44.3
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/utf8prober.py DELETED
@@ -1,82 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is mozilla.org code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from typing import Union
29
-
30
- from .charsetprober import CharSetProber
31
- from .codingstatemachine import CodingStateMachine
32
- from .enums import MachineState, ProbingState
33
- from .mbcssm import UTF8_SM_MODEL
34
-
35
-
36
- class UTF8Prober(CharSetProber):
37
- ONE_CHAR_PROB = 0.5
38
-
39
- def __init__(self) -> None:
40
- super().__init__()
41
- self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
42
- self._num_mb_chars = 0
43
- self.reset()
44
-
45
- def reset(self) -> None:
46
- super().reset()
47
- self.coding_sm.reset()
48
- self._num_mb_chars = 0
49
-
50
- @property
51
- def charset_name(self) -> str:
52
- return "utf-8"
53
-
54
- @property
55
- def language(self) -> str:
56
- return ""
57
-
58
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
59
- for c in byte_str:
60
- coding_state = self.coding_sm.next_state(c)
61
- if coding_state == MachineState.ERROR:
62
- self._state = ProbingState.NOT_ME
63
- break
64
- if coding_state == MachineState.ITS_ME:
65
- self._state = ProbingState.FOUND_IT
66
- break
67
- if coding_state == MachineState.START:
68
- if self.coding_sm.get_current_charlen() >= 2:
69
- self._num_mb_chars += 1
70
-
71
- if self.state == ProbingState.DETECTING:
72
- if self.get_confidence() > self.SHORTCUT_THRESHOLD:
73
- self._state = ProbingState.FOUND_IT
74
-
75
- return self.state
76
-
77
- def get_confidence(self) -> float:
78
- unlike = 0.99
79
- if self._num_mb_chars < 6:
80
- unlike *= self.ONE_CHAR_PROB**self._num_mb_chars
81
- return 1.0 - unlike
82
- return unlike
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/diagnose.py DELETED
@@ -1,37 +0,0 @@
1
- import os
2
- import platform
3
-
4
- from pip._vendor.rich import inspect
5
- from pip._vendor.rich.console import Console, get_windows_console_features
6
- from pip._vendor.rich.panel import Panel
7
- from pip._vendor.rich.pretty import Pretty
8
-
9
-
10
- def report() -> None: # pragma: no cover
11
- """Print a report to the terminal with debugging information"""
12
- console = Console()
13
- inspect(console)
14
- features = get_windows_console_features()
15
- inspect(features)
16
-
17
- env_names = (
18
- "TERM",
19
- "COLORTERM",
20
- "CLICOLOR",
21
- "NO_COLOR",
22
- "TERM_PROGRAM",
23
- "COLUMNS",
24
- "LINES",
25
- "JUPYTER_COLUMNS",
26
- "JUPYTER_LINES",
27
- "JPY_PARENT_PID",
28
- "VSCODE_VERBOSE_LOGGING",
29
- )
30
- env = {name: os.getenv(name) for name in env_names}
31
- console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables"))
32
-
33
- console.print(f'platform="{platform.system()}"')
34
-
35
-
36
- if __name__ == "__main__": # pragma: no cover
37
- report()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/palette.py DELETED
@@ -1,100 +0,0 @@
1
- from math import sqrt
2
- from functools import lru_cache
3
- from typing import Sequence, Tuple, TYPE_CHECKING
4
-
5
- from .color_triplet import ColorTriplet
6
-
7
- if TYPE_CHECKING:
8
- from pip._vendor.rich.table import Table
9
-
10
-
11
- class Palette:
12
- """A palette of available colors."""
13
-
14
- def __init__(self, colors: Sequence[Tuple[int, int, int]]):
15
- self._colors = colors
16
-
17
- def __getitem__(self, number: int) -> ColorTriplet:
18
- return ColorTriplet(*self._colors[number])
19
-
20
- def __rich__(self) -> "Table":
21
- from pip._vendor.rich.color import Color
22
- from pip._vendor.rich.style import Style
23
- from pip._vendor.rich.text import Text
24
- from pip._vendor.rich.table import Table
25
-
26
- table = Table(
27
- "index",
28
- "RGB",
29
- "Color",
30
- title="Palette",
31
- caption=f"{len(self._colors)} colors",
32
- highlight=True,
33
- caption_justify="right",
34
- )
35
- for index, color in enumerate(self._colors):
36
- table.add_row(
37
- str(index),
38
- repr(color),
39
- Text(" " * 16, style=Style(bgcolor=Color.from_rgb(*color))),
40
- )
41
- return table
42
-
43
- # This is somewhat inefficient and needs caching
44
- @lru_cache(maxsize=1024)
45
- def match(self, color: Tuple[int, int, int]) -> int:
46
- """Find a color from a palette that most closely matches a given color.
47
-
48
- Args:
49
- color (Tuple[int, int, int]): RGB components in range 0 > 255.
50
-
51
- Returns:
52
- int: Index of closes matching color.
53
- """
54
- red1, green1, blue1 = color
55
- _sqrt = sqrt
56
- get_color = self._colors.__getitem__
57
-
58
- def get_color_distance(index: int) -> float:
59
- """Get the distance to a color."""
60
- red2, green2, blue2 = get_color(index)
61
- red_mean = (red1 + red2) // 2
62
- red = red1 - red2
63
- green = green1 - green2
64
- blue = blue1 - blue2
65
- return _sqrt(
66
- (((512 + red_mean) * red * red) >> 8)
67
- + 4 * green * green
68
- + (((767 - red_mean) * blue * blue) >> 8)
69
- )
70
-
71
- min_index = min(range(len(self._colors)), key=get_color_distance)
72
- return min_index
73
-
74
-
75
- if __name__ == "__main__": # pragma: no cover
76
- import colorsys
77
- from typing import Iterable
78
- from pip._vendor.rich.color import Color
79
- from pip._vendor.rich.console import Console, ConsoleOptions
80
- from pip._vendor.rich.segment import Segment
81
- from pip._vendor.rich.style import Style
82
-
83
- class ColorBox:
84
- def __rich_console__(
85
- self, console: Console, options: ConsoleOptions
86
- ) -> Iterable[Segment]:
87
- height = console.size.height - 3
88
- for y in range(0, height):
89
- for x in range(options.max_width):
90
- h = x / options.max_width
91
- l = y / (height + 1)
92
- r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
93
- r2, g2, b2 = colorsys.hls_to_rgb(h, l + (1 / height / 2), 1.0)
94
- bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
95
- color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
96
- yield Segment("▄", Style(color=color, bgcolor=bgcolor))
97
- yield Segment.line()
98
-
99
- console = Console()
100
- console.print(ColorBox())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/_macos_compat.py DELETED
@@ -1,12 +0,0 @@
1
- import sys
2
- import importlib
3
-
4
-
5
- def bypass_compiler_fixup(cmd, args):
6
- return cmd
7
-
8
-
9
- if sys.platform == 'darwin':
10
- compiler_fixup = importlib.import_module('_osx_support').compiler_fixup
11
- else:
12
- compiler_fixup = bypass_compiler_fixup
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/build_py.py DELETED
@@ -1,368 +0,0 @@
1
- from functools import partial
2
- from glob import glob
3
- from distutils.util import convert_path
4
- import distutils.command.build_py as orig
5
- import os
6
- import fnmatch
7
- import textwrap
8
- import io
9
- import distutils.errors
10
- import itertools
11
- import stat
12
- import warnings
13
- from pathlib import Path
14
- from typing import Dict, Iterable, Iterator, List, Optional, Tuple
15
-
16
- from setuptools._deprecation_warning import SetuptoolsDeprecationWarning
17
- from setuptools.extern.more_itertools import unique_everseen
18
-
19
-
20
- def make_writable(target):
21
- os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
22
-
23
-
24
- class build_py(orig.build_py):
25
- """Enhanced 'build_py' command that includes data files with packages
26
-
27
- The data files are specified via a 'package_data' argument to 'setup()'.
28
- See 'setuptools.dist.Distribution' for more details.
29
-
30
- Also, this version of the 'build_py' command allows you to specify both
31
- 'py_modules' and 'packages' in the same setup operation.
32
- """
33
- editable_mode: bool = False
34
- existing_egg_info_dir: Optional[str] = None #: Private API, internal use only.
35
-
36
- def finalize_options(self):
37
- orig.build_py.finalize_options(self)
38
- self.package_data = self.distribution.package_data
39
- self.exclude_package_data = self.distribution.exclude_package_data or {}
40
- if 'data_files' in self.__dict__:
41
- del self.__dict__['data_files']
42
- self.__updated_files = []
43
-
44
- def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1,
45
- link=None, level=1):
46
- # Overwrite base class to allow using links
47
- if link:
48
- infile = str(Path(infile).resolve())
49
- outfile = str(Path(outfile).resolve())
50
- return super().copy_file(infile, outfile, preserve_mode, preserve_times,
51
- link, level)
52
-
53
- def run(self):
54
- """Build modules, packages, and copy data files to build directory"""
55
- if not (self.py_modules or self.packages) or self.editable_mode:
56
- return
57
-
58
- if self.py_modules:
59
- self.build_modules()
60
-
61
- if self.packages:
62
- self.build_packages()
63
- self.build_package_data()
64
-
65
- # Only compile actual .py files, using our base class' idea of what our
66
- # output files are.
67
- self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
68
-
69
- def __getattr__(self, attr):
70
- "lazily compute data files"
71
- if attr == 'data_files':
72
- self.data_files = self._get_data_files()
73
- return self.data_files
74
- return orig.build_py.__getattr__(self, attr)
75
-
76
- def build_module(self, module, module_file, package):
77
- outfile, copied = orig.build_py.build_module(self, module, module_file, package)
78
- if copied:
79
- self.__updated_files.append(outfile)
80
- return outfile, copied
81
-
82
- def _get_data_files(self):
83
- """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
84
- self.analyze_manifest()
85
- return list(map(self._get_pkg_data_files, self.packages or ()))
86
-
87
- def get_data_files_without_manifest(self):
88
- """
89
- Generate list of ``(package,src_dir,build_dir,filenames)`` tuples,
90
- but without triggering any attempt to analyze or build the manifest.
91
- """
92
- # Prevent eventual errors from unset `manifest_files`
93
- # (that would otherwise be set by `analyze_manifest`)
94
- self.__dict__.setdefault('manifest_files', {})
95
- return list(map(self._get_pkg_data_files, self.packages or ()))
96
-
97
- def _get_pkg_data_files(self, package):
98
- # Locate package source directory
99
- src_dir = self.get_package_dir(package)
100
-
101
- # Compute package build directory
102
- build_dir = os.path.join(*([self.build_lib] + package.split('.')))
103
-
104
- # Strip directory from globbed filenames
105
- filenames = [
106
- os.path.relpath(file, src_dir)
107
- for file in self.find_data_files(package, src_dir)
108
- ]
109
- return package, src_dir, build_dir, filenames
110
-
111
- def find_data_files(self, package, src_dir):
112
- """Return filenames for package's data files in 'src_dir'"""
113
- patterns = self._get_platform_patterns(
114
- self.package_data,
115
- package,
116
- src_dir,
117
- )
118
- globs_expanded = map(partial(glob, recursive=True), patterns)
119
- # flatten the expanded globs into an iterable of matches
120
- globs_matches = itertools.chain.from_iterable(globs_expanded)
121
- glob_files = filter(os.path.isfile, globs_matches)
122
- files = itertools.chain(
123
- self.manifest_files.get(package, []),
124
- glob_files,
125
- )
126
- return self.exclude_data_files(package, src_dir, files)
127
-
128
- def get_outputs(self, include_bytecode=1) -> List[str]:
129
- """See :class:`setuptools.commands.build.SubCommand`"""
130
- if self.editable_mode:
131
- return list(self.get_output_mapping().keys())
132
- return super().get_outputs(include_bytecode)
133
-
134
- def get_output_mapping(self) -> Dict[str, str]:
135
- """See :class:`setuptools.commands.build.SubCommand`"""
136
- mapping = itertools.chain(
137
- self._get_package_data_output_mapping(),
138
- self._get_module_mapping(),
139
- )
140
- return dict(sorted(mapping, key=lambda x: x[0]))
141
-
142
- def _get_module_mapping(self) -> Iterator[Tuple[str, str]]:
143
- """Iterate over all modules producing (dest, src) pairs."""
144
- for (package, module, module_file) in self.find_all_modules():
145
- package = package.split('.')
146
- filename = self.get_module_outfile(self.build_lib, package, module)
147
- yield (filename, module_file)
148
-
149
- def _get_package_data_output_mapping(self) -> Iterator[Tuple[str, str]]:
150
- """Iterate over package data producing (dest, src) pairs."""
151
- for package, src_dir, build_dir, filenames in self.data_files:
152
- for filename in filenames:
153
- target = os.path.join(build_dir, filename)
154
- srcfile = os.path.join(src_dir, filename)
155
- yield (target, srcfile)
156
-
157
- def build_package_data(self):
158
- """Copy data files into build directory"""
159
- for target, srcfile in self._get_package_data_output_mapping():
160
- self.mkpath(os.path.dirname(target))
161
- _outf, _copied = self.copy_file(srcfile, target)
162
- make_writable(target)
163
-
164
- def analyze_manifest(self):
165
- self.manifest_files = mf = {}
166
- if not self.distribution.include_package_data:
167
- return
168
- src_dirs = {}
169
- for package in self.packages or ():
170
- # Locate package source directory
171
- src_dirs[assert_relative(self.get_package_dir(package))] = package
172
-
173
- if (
174
- getattr(self, 'existing_egg_info_dir', None)
175
- and Path(self.existing_egg_info_dir, "SOURCES.txt").exists()
176
- ):
177
- egg_info_dir = self.existing_egg_info_dir
178
- manifest = Path(egg_info_dir, "SOURCES.txt")
179
- files = manifest.read_text(encoding="utf-8").splitlines()
180
- else:
181
- self.run_command('egg_info')
182
- ei_cmd = self.get_finalized_command('egg_info')
183
- egg_info_dir = ei_cmd.egg_info
184
- files = ei_cmd.filelist.files
185
-
186
- check = _IncludePackageDataAbuse()
187
- for path in self._filter_build_files(files, egg_info_dir):
188
- d, f = os.path.split(assert_relative(path))
189
- prev = None
190
- oldf = f
191
- while d and d != prev and d not in src_dirs:
192
- prev = d
193
- d, df = os.path.split(d)
194
- f = os.path.join(df, f)
195
- if d in src_dirs:
196
- if f == oldf:
197
- if check.is_module(f):
198
- continue # it's a module, not data
199
- else:
200
- importable = check.importable_subpackage(src_dirs[d], f)
201
- if importable:
202
- check.warn(importable)
203
- mf.setdefault(src_dirs[d], []).append(path)
204
-
205
- def _filter_build_files(self, files: Iterable[str], egg_info: str) -> Iterator[str]:
206
- """
207
- ``build_meta`` may try to create egg_info outside of the project directory,
208
- and this can be problematic for certain plugins (reported in issue #3500).
209
-
210
- Extensions might also include between their sources files created on the
211
- ``build_lib`` and ``build_temp`` directories.
212
-
213
- This function should filter this case of invalid files out.
214
- """
215
- build = self.get_finalized_command("build")
216
- build_dirs = (egg_info, self.build_lib, build.build_temp, build.build_base)
217
- norm_dirs = [os.path.normpath(p) for p in build_dirs if p]
218
-
219
- for file in files:
220
- norm_path = os.path.normpath(file)
221
- if not os.path.isabs(file) or all(d not in norm_path for d in norm_dirs):
222
- yield file
223
-
224
- def get_data_files(self):
225
- pass # Lazily compute data files in _get_data_files() function.
226
-
227
- def check_package(self, package, package_dir):
228
- """Check namespace packages' __init__ for declare_namespace"""
229
- try:
230
- return self.packages_checked[package]
231
- except KeyError:
232
- pass
233
-
234
- init_py = orig.build_py.check_package(self, package, package_dir)
235
- self.packages_checked[package] = init_py
236
-
237
- if not init_py or not self.distribution.namespace_packages:
238
- return init_py
239
-
240
- for pkg in self.distribution.namespace_packages:
241
- if pkg == package or pkg.startswith(package + '.'):
242
- break
243
- else:
244
- return init_py
245
-
246
- with io.open(init_py, 'rb') as f:
247
- contents = f.read()
248
- if b'declare_namespace' not in contents:
249
- raise distutils.errors.DistutilsError(
250
- "Namespace package problem: %s is a namespace package, but "
251
- "its\n__init__.py does not call declare_namespace()! Please "
252
- 'fix it.\n(See the setuptools manual under '
253
- '"Namespace Packages" for details.)\n"' % (package,)
254
- )
255
- return init_py
256
-
257
- def initialize_options(self):
258
- self.packages_checked = {}
259
- orig.build_py.initialize_options(self)
260
- self.editable_mode = False
261
- self.existing_egg_info_dir = None
262
-
263
- def get_package_dir(self, package):
264
- res = orig.build_py.get_package_dir(self, package)
265
- if self.distribution.src_root is not None:
266
- return os.path.join(self.distribution.src_root, res)
267
- return res
268
-
269
- def exclude_data_files(self, package, src_dir, files):
270
- """Filter filenames for package's data files in 'src_dir'"""
271
- files = list(files)
272
- patterns = self._get_platform_patterns(
273
- self.exclude_package_data,
274
- package,
275
- src_dir,
276
- )
277
- match_groups = (fnmatch.filter(files, pattern) for pattern in patterns)
278
- # flatten the groups of matches into an iterable of matches
279
- matches = itertools.chain.from_iterable(match_groups)
280
- bad = set(matches)
281
- keepers = (fn for fn in files if fn not in bad)
282
- # ditch dupes
283
- return list(unique_everseen(keepers))
284
-
285
- @staticmethod
286
- def _get_platform_patterns(spec, package, src_dir):
287
- """
288
- yield platform-specific path patterns (suitable for glob
289
- or fn_match) from a glob-based spec (such as
290
- self.package_data or self.exclude_package_data)
291
- matching package in src_dir.
292
- """
293
- raw_patterns = itertools.chain(
294
- spec.get('', []),
295
- spec.get(package, []),
296
- )
297
- return (
298
- # Each pattern has to be converted to a platform-specific path
299
- os.path.join(src_dir, convert_path(pattern))
300
- for pattern in raw_patterns
301
- )
302
-
303
-
304
- def assert_relative(path):
305
- if not os.path.isabs(path):
306
- return path
307
- from distutils.errors import DistutilsSetupError
308
-
309
- msg = (
310
- textwrap.dedent(
311
- """
312
- Error: setup script specifies an absolute path:
313
-
314
- %s
315
-
316
- setup() arguments must *always* be /-separated paths relative to the
317
- setup.py directory, *never* absolute paths.
318
- """
319
- ).lstrip()
320
- % path
321
- )
322
- raise DistutilsSetupError(msg)
323
-
324
-
325
- class _IncludePackageDataAbuse:
326
- """Inform users that package or module is included as 'data file'"""
327
-
328
- MESSAGE = """\
329
- Installing {importable!r} as data is deprecated, please list it in `packages`.
330
- !!\n\n
331
- ############################
332
- # Package would be ignored #
333
- ############################
334
- Python recognizes {importable!r} as an importable package,
335
- but it is not listed in the `packages` configuration of setuptools.
336
-
337
- {importable!r} has been automatically added to the distribution only
338
- because it may contain data files, but this behavior is likely to change
339
- in future versions of setuptools (and therefore is considered deprecated).
340
-
341
- Please make sure that {importable!r} is included as a package by using
342
- the `packages` configuration field or the proper discovery methods
343
- (for example by using `find_namespace_packages(...)`/`find_namespace:`
344
- instead of `find_packages(...)`/`find:`).
345
-
346
- You can read more about "package discovery" and "data files" on setuptools
347
- documentation page.
348
- \n\n!!
349
- """
350
-
351
- def __init__(self):
352
- self._already_warned = set()
353
-
354
- def is_module(self, file):
355
- return file.endswith(".py") and file[:-len(".py")].isidentifier()
356
-
357
- def importable_subpackage(self, parent, file):
358
- pkg = Path(file).parent
359
- parts = list(itertools.takewhile(str.isidentifier, pkg.parts))
360
- if parts:
361
- return ".".join([parent, *parts])
362
- return None
363
-
364
- def warn(self, importable):
365
- if importable not in self._already_warned:
366
- msg = textwrap.dedent(self.MESSAGE).format(importable=importable)
367
- warnings.warn(msg, SetuptoolsDeprecationWarning, stacklevel=2)
368
- self._already_warned.add(importable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BIASLab/sars-cov-2-classification-fcgr/src/utils.py DELETED
@@ -1,41 +0,0 @@
1
- import re
2
- from PIL import Image
3
- import numpy as np
4
-
5
-
6
- def clean_seq(seq):
7
- "Remove all characters different from A,C,G,T or N"
8
- seq = seq.upper()
9
- for letter in "BDEFHIJKLMOPQRSUVWXYZ":
10
- seq = seq.replace(letter,"N")
11
- return seq
12
-
13
- def array2img(array):
14
- "FCGR array to grayscale image"
15
- max_color = 255
16
- m, M = array.min(), array.max()
17
- # rescale to [0,1]
18
- img_rescaled = (array - m) / (M-m)
19
-
20
- # invert colors black->white
21
- img_array = np.ceil(max_color - img_rescaled*max_color)
22
- img_array = np.array(img_array, dtype=np.int8)
23
-
24
- # convert to Image
25
- img_pil = Image.fromarray(img_array,'L')
26
- return img_pil
27
-
28
- def count_seqs(fasta):
29
- "Count number of '>' in a fasta file to use with a progress bar"
30
- pattern = ">"
31
- count = 0
32
- for line in fasta:
33
- if re.search(pattern, line):
34
- count +=1
35
- return count
36
-
37
- def generate_fcgr(kmer, fasta, fcgr):
38
- "Generate Image FCGR"
39
- array = fcgr(clean_seq(str(fasta.seq)))
40
- img = array2img(array)
41
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/types.ts DELETED
@@ -1,130 +0,0 @@
1
- export type ProjectionMode = 'cartesian' | 'spherical'
2
-
3
- export type CacheMode = "use" | "renew" | "ignore"
4
-
5
- export interface RenderRequest {
6
- prompt: string
7
-
8
- // whether to use video segmentation
9
- // disabled (default)
10
- // firstframe: we only analyze the first frame
11
- // allframes: we analyze all the frames
12
- segmentation: 'disabled' | 'firstframe' | 'allframes'
13
-
14
- // segmentation will only be executed if we have a non-empty list of actionnables
15
- // actionnables are names of things like "chest", "key", "tree", "chair" etc
16
- actionnables: string[]
17
-
18
- // note: this is the number of frames for Zeroscope,
19
- // which is currently configured to only output 3 seconds, so:
20
- // nbFrames=8 -> 1 sec
21
- // nbFrames=16 -> 2 sec
22
- // nbFrames=24 -> 3 sec
23
- nbFrames: number // min: 1, max: 24
24
-
25
- nbSteps: number // min: 1, max: 50
26
-
27
- seed: number
28
-
29
- width: number // fixed at 1024 for now
30
- height: number // fixed at 512 for now
31
-
32
- // upscaling factor
33
- // 0: no upscaling
34
- // 1: no upscaling
35
- // 2: 2x larger
36
- // 3: 3x larger
37
- // 4x: 4x larger, up to 4096x4096 (warning: a PNG of this size can be 50 Mb!)
38
- upscalingFactor: number
39
-
40
- projection: ProjectionMode
41
-
42
- cache: CacheMode
43
-
44
- wait: boolean // wait until the job is completed
45
-
46
- analyze: boolean // analyze the image to generate a caption (optional)
47
- }
48
-
49
- export interface ImageSegment {
50
- id: number
51
- box: number[]
52
- color: number[]
53
- label: string
54
- score: number
55
- }
56
-
57
- export type RenderedSceneStatus =
58
- | "pending"
59
- | "completed"
60
- | "error"
61
-
62
- export interface RenderedScene {
63
- renderId: string
64
- status: RenderedSceneStatus
65
- assetUrl: string
66
- alt: string
67
- error: string
68
- maskUrl: string
69
- segments: ImageSegment[]
70
- }
71
-
72
- export interface ImageAnalysisRequest {
73
- image: string // in base64
74
- prompt: string
75
- }
76
-
77
- export interface ImageAnalysisResponse {
78
- result: string
79
- error?: string
80
- }
81
-
82
- export type LLMResponse = Array<{panel: number; instructions: string; caption: string }>
83
-
84
- export type LLMEngine =
85
- | "INFERENCE_API"
86
- | "INFERENCE_ENDPOINT"
87
- | "OPENAI"
88
- | "REPLICATE"
89
-
90
- export type RenderingEngine =
91
- | "VIDEOCHAIN"
92
- | "OPENAI"
93
- | "REPLICATE"
94
- | "INFERENCE_API"
95
- | "INFERENCE_ENDPOINT"
96
-
97
- export type PostVisibility =
98
- | "featured" // featured by admins
99
- | "trending" // top trending / received more than 10 upvotes
100
- | "normal" // default visibility
101
-
102
- export type Post = {
103
- postId: string
104
- appId: string
105
- prompt: string
106
- previewUrl: string
107
- assetUrl: string
108
- createdAt: string
109
- visibility: PostVisibility
110
- upvotes: number
111
- downvotes: number
112
- }
113
-
114
- export type CreatePostResponse = {
115
- success?: boolean
116
- error?: string
117
- post: Post
118
- }
119
-
120
- export type GetAppPostsResponse = {
121
- success?: boolean
122
- error?: string
123
- posts: Post[]
124
- }
125
-
126
- export type GetAppPostResponse = {
127
- success?: boolean
128
- error?: string
129
- post: Post
130
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/pages/03_📝_Upload_Video_File_and_Transcript.py DELETED
@@ -1,130 +0,0 @@
1
- import streamlit as st
2
- from streamlit_lottie import st_lottie
3
- from utils import write_vtt, write_srt
4
- import ffmpeg
5
- import requests
6
- from typing import Iterator
7
- from io import StringIO
8
- import numpy as np
9
- import pathlib
10
- import os
11
-
12
-
13
- st.set_page_config(page_title="Auto Subtitled Video Generator", page_icon=":movie_camera:", layout="wide")
14
-
15
- # Define a function that we can use to load lottie files from a link.
16
- @st.cache(allow_output_mutation=True)
17
- def load_lottieurl(url: str):
18
- r = requests.get(url)
19
- if r.status_code != 200:
20
- return None
21
- return r.json()
22
-
23
-
24
- APP_DIR = pathlib.Path(__file__).parent.absolute()
25
-
26
- LOCAL_DIR = APP_DIR / "local_transcript"
27
- LOCAL_DIR.mkdir(exist_ok=True)
28
- save_dir = LOCAL_DIR / "output"
29
- save_dir.mkdir(exist_ok=True)
30
-
31
-
32
- col1, col2 = st.columns([1, 3])
33
- with col1:
34
- lottie = load_lottieurl("https://assets6.lottiefiles.com/packages/lf20_cjnxwrkt.json")
35
- st_lottie(lottie)
36
-
37
- with col2:
38
- st.write("""
39
- ## Auto Subtitled Video Generator
40
- ##### ➠ Upload a video file and a transcript as .srt or .vtt file and get a video with subtitles.
41
- ##### ➠ Processing time will increase as the video length increases. """)
42
-
43
-
44
- def getSubs(segments: Iterator[dict], format: str, maxLineWidth: int) -> str:
45
- segmentStream = StringIO()
46
-
47
- if format == 'vtt':
48
- write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
49
- elif format == 'srt':
50
- write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
51
- else:
52
- raise Exception("Unknown format " + format)
53
-
54
- segmentStream.seek(0)
55
- return segmentStream.read()
56
-
57
-
58
- def split_video_audio(uploaded_file):
59
- with open(f"{save_dir}/input.mp4", "wb") as f:
60
- f.write(uploaded_file.read())
61
- audio = ffmpeg.input(f"{save_dir}/input.mp4")
62
- audio = ffmpeg.output(audio, f"{save_dir}/output.wav", acodec="pcm_s16le", ac=1, ar="16k")
63
- ffmpeg.run(audio, overwrite_output=True)
64
-
65
-
66
- def main():
67
- uploaded_video = st.file_uploader("Upload Video File", type=["mp4", "avi", "mov", "mkv"])
68
- # get the name of the input_file
69
- if uploaded_video is not None:
70
- filename = uploaded_video.name[:-4]
71
- else:
72
- filename = None
73
- transcript_file = st.file_uploader("Upload Transcript File", type=["srt", "vtt"])
74
- if transcript_file is not None:
75
- transcript_name = transcript_file.name
76
- else:
77
- transcript_name = None
78
- if uploaded_video is not None and transcript_file is not None:
79
- if transcript_name[-3:] == "vtt":
80
- with open("uploaded_transcript.vtt", "wb") as f:
81
- f.writelines(transcript_file)
82
- f.close()
83
- with open(os.path.join(os.getcwd(), "uploaded_transcript.vtt"), "rb") as f:
84
- vtt_file = f.read()
85
- if st.button("Generate Video with Subtitles"):
86
- with st.spinner("Generating Subtitled Video"):
87
- split_video_audio(uploaded_video)
88
- video_file = ffmpeg.input(f"{save_dir}/input.mp4")
89
- audio_file = ffmpeg.input(f"{save_dir}/output.wav")
90
- ffmpeg.concat(video_file.filter("subtitles", "uploaded_transcript.vtt"), audio_file, v=1, a=1).output("final.mp4").global_args('-report').run(quiet=True, overwrite_output=True)
91
- video_with_subs = open("final.mp4", "rb")
92
- col3, col4 = st.columns(2)
93
- with col3:
94
- st.video(uploaded_video)
95
- with col4:
96
- st.video(video_with_subs)
97
- st.download_button(label="Download Video with Subtitles",
98
- data=video_with_subs,
99
- file_name=f"{filename}_with_subs.mp4")
100
-
101
- elif transcript_name[-3:] == "srt":
102
- with open("uploaded_transcript.srt", "wb") as f:
103
- f.writelines(transcript_file)
104
- f.close()
105
- with open(os.path.join(os.getcwd(), "uploaded_transcript.srt"), "rb") as f:
106
- srt_file = f.read()
107
- if st.button("Generate Video with Subtitles"):
108
- with st.spinner("Generating Subtitled Video"):
109
- split_video_audio(uploaded_video)
110
- video_file = ffmpeg.input(f"{save_dir}/input.mp4")
111
- audio_file = ffmpeg.input(f"{save_dir}/output.wav")
112
- ffmpeg.concat(video_file.filter("subtitles", "uploaded_transcript.srt"), audio_file, v=1, a=1).output("final.mp4").run(quiet=True, overwrite_output=True)
113
- video_with_subs = open("final.mp4", "rb")
114
- col3, col4 = st.columns(2)
115
- with col3:
116
- st.video(uploaded_video)
117
- with col4:
118
- st.video(video_with_subs)
119
- st.download_button(label="Download Video with Subtitles",
120
- data=video_with_subs,
121
- file_name=f"{filename}_with_subs.mp4")
122
- else:
123
- st.error("Please upload a .srt or .vtt file")
124
- else:
125
- st.info("Please upload a video file and a transcript file ")
126
-
127
-
128
- if __name__ == "__main__":
129
- main()
130
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/httpsession.py DELETED
@@ -1,510 +0,0 @@
1
- import logging
2
- import os
3
- import os.path
4
- import socket
5
- import sys
6
- import warnings
7
- from base64 import b64encode
8
-
9
- from urllib3 import PoolManager, Timeout, proxy_from_url
10
- from urllib3.exceptions import (
11
- ConnectTimeoutError as URLLib3ConnectTimeoutError,
12
- )
13
- from urllib3.exceptions import (
14
- LocationParseError,
15
- NewConnectionError,
16
- ProtocolError,
17
- ProxyError,
18
- )
19
- from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
20
- from urllib3.exceptions import SSLError as URLLib3SSLError
21
- from urllib3.util.retry import Retry
22
- from urllib3.util.ssl_ import (
23
- OP_NO_COMPRESSION,
24
- PROTOCOL_TLS,
25
- OP_NO_SSLv2,
26
- OP_NO_SSLv3,
27
- is_ipaddress,
28
- ssl,
29
- )
30
- from urllib3.util.url import parse_url
31
-
32
- try:
33
- from urllib3.util.ssl_ import OP_NO_TICKET, PROTOCOL_TLS_CLIENT
34
- except ImportError:
35
- # Fallback directly to ssl for version of urllib3 before 1.26.
36
- # They are available in the standard library starting in Python 3.6.
37
- from ssl import OP_NO_TICKET, PROTOCOL_TLS_CLIENT
38
-
39
- try:
40
- # pyopenssl will be removed in urllib3 2.0, we'll fall back to ssl_ at that point.
41
- # This can be removed once our urllib3 floor is raised to >= 2.0.
42
- with warnings.catch_warnings():
43
- warnings.simplefilter("ignore", category=DeprecationWarning)
44
- # Always import the original SSLContext, even if it has been patched
45
- from urllib3.contrib.pyopenssl import (
46
- orig_util_SSLContext as SSLContext,
47
- )
48
- except ImportError:
49
- from urllib3.util.ssl_ import SSLContext
50
-
51
- try:
52
- from urllib3.util.ssl_ import DEFAULT_CIPHERS
53
- except ImportError:
54
- # Defer to system configuration starting with
55
- # urllib3 2.0. This will choose the ciphers provided by
56
- # Openssl 1.1.1+ or secure system defaults.
57
- DEFAULT_CIPHERS = None
58
-
59
- import botocore.awsrequest
60
- from botocore.compat import (
61
- IPV6_ADDRZ_RE,
62
- ensure_bytes,
63
- filter_ssl_warnings,
64
- unquote,
65
- urlparse,
66
- )
67
- from botocore.exceptions import (
68
- ConnectionClosedError,
69
- ConnectTimeoutError,
70
- EndpointConnectionError,
71
- HTTPClientError,
72
- InvalidProxiesConfigError,
73
- ProxyConnectionError,
74
- ReadTimeoutError,
75
- SSLError,
76
- )
77
-
78
- filter_ssl_warnings()
79
- logger = logging.getLogger(__name__)
80
- DEFAULT_TIMEOUT = 60
81
- MAX_POOL_CONNECTIONS = 10
82
- DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem')
83
-
84
- try:
85
- from certifi import where
86
- except ImportError:
87
-
88
- def where():
89
- return DEFAULT_CA_BUNDLE
90
-
91
-
92
- def get_cert_path(verify):
93
- if verify is not True:
94
- return verify
95
-
96
- cert_path = where()
97
- logger.debug(f"Certificate path: {cert_path}")
98
-
99
- return cert_path
100
-
101
-
102
- def create_urllib3_context(
103
- ssl_version=None, cert_reqs=None, options=None, ciphers=None
104
- ):
105
- """This function is a vendored version of the same function in urllib3
106
-
107
- We vendor this function to ensure that the SSL contexts we construct
108
- always use the std lib SSLContext instead of pyopenssl.
109
- """
110
- # PROTOCOL_TLS is deprecated in Python 3.10
111
- if not ssl_version or ssl_version == PROTOCOL_TLS:
112
- ssl_version = PROTOCOL_TLS_CLIENT
113
-
114
- context = SSLContext(ssl_version)
115
-
116
- if ciphers:
117
- context.set_ciphers(ciphers)
118
- elif DEFAULT_CIPHERS:
119
- context.set_ciphers(DEFAULT_CIPHERS)
120
-
121
- # Setting the default here, as we may have no ssl module on import
122
- cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
123
-
124
- if options is None:
125
- options = 0
126
- # SSLv2 is easily broken and is considered harmful and dangerous
127
- options |= OP_NO_SSLv2
128
- # SSLv3 has several problems and is now dangerous
129
- options |= OP_NO_SSLv3
130
- # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
131
- # (issue urllib3#309)
132
- options |= OP_NO_COMPRESSION
133
- # TLSv1.2 only. Unless set explicitly, do not request tickets.
134
- # This may save some bandwidth on wire, and although the ticket is encrypted,
135
- # there is a risk associated with it being on wire,
136
- # if the server is not rotating its ticketing keys properly.
137
- options |= OP_NO_TICKET
138
-
139
- context.options |= options
140
-
141
- # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
142
- # necessary for conditional client cert authentication with TLS 1.3.
143
- # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
144
- # versions of Python. We only enable on Python 3.7.4+ or if certificate
145
- # verification is enabled to work around Python issue #37428
146
- # See: https://bugs.python.org/issue37428
147
- if (
148
- cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)
149
- ) and getattr(context, "post_handshake_auth", None) is not None:
150
- context.post_handshake_auth = True
151
-
152
- def disable_check_hostname():
153
- if (
154
- getattr(context, "check_hostname", None) is not None
155
- ): # Platform-specific: Python 3.2
156
- # We do our own verification, including fingerprints and alternative
157
- # hostnames. So disable it here
158
- context.check_hostname = False
159
-
160
- # The order of the below lines setting verify_mode and check_hostname
161
- # matter due to safe-guards SSLContext has to prevent an SSLContext with
162
- # check_hostname=True, verify_mode=NONE/OPTIONAL. This is made even more
163
- # complex because we don't know whether PROTOCOL_TLS_CLIENT will be used
164
- # or not so we don't know the initial state of the freshly created SSLContext.
165
- if cert_reqs == ssl.CERT_REQUIRED:
166
- context.verify_mode = cert_reqs
167
- disable_check_hostname()
168
- else:
169
- disable_check_hostname()
170
- context.verify_mode = cert_reqs
171
-
172
- # Enable logging of TLS session keys via defacto standard environment variable
173
- # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.
174
- if hasattr(context, "keylog_filename"):
175
- sslkeylogfile = os.environ.get("SSLKEYLOGFILE")
176
- if sslkeylogfile and not sys.flags.ignore_environment:
177
- context.keylog_filename = sslkeylogfile
178
-
179
- return context
180
-
181
-
182
- def ensure_boolean(val):
183
- """Ensures a boolean value if a string or boolean is provided
184
-
185
- For strings, the value for True/False is case insensitive
186
- """
187
- if isinstance(val, bool):
188
- return val
189
- else:
190
- return val.lower() == 'true'
191
-
192
-
193
- def mask_proxy_url(proxy_url):
194
- """
195
- Mask proxy url credentials.
196
-
197
- :type proxy_url: str
198
- :param proxy_url: The proxy url, i.e. https://username:[email protected]
199
-
200
- :return: Masked proxy url, i.e. https://***:***@proxy.com
201
- """
202
- mask = '*' * 3
203
- parsed_url = urlparse(proxy_url)
204
- if parsed_url.username:
205
- proxy_url = proxy_url.replace(parsed_url.username, mask, 1)
206
- if parsed_url.password:
207
- proxy_url = proxy_url.replace(parsed_url.password, mask, 1)
208
- return proxy_url
209
-
210
-
211
- def _is_ipaddress(host):
212
- """Wrap urllib3's is_ipaddress to support bracketed IPv6 addresses."""
213
- return is_ipaddress(host) or bool(IPV6_ADDRZ_RE.match(host))
214
-
215
-
216
- class ProxyConfiguration:
217
- """Represents a proxy configuration dictionary and additional settings.
218
-
219
- This class represents a proxy configuration dictionary and provides utility
220
- functions to retreive well structured proxy urls and proxy headers from the
221
- proxy configuration dictionary.
222
- """
223
-
224
- def __init__(self, proxies=None, proxies_settings=None):
225
- if proxies is None:
226
- proxies = {}
227
- if proxies_settings is None:
228
- proxies_settings = {}
229
-
230
- self._proxies = proxies
231
- self._proxies_settings = proxies_settings
232
-
233
- def proxy_url_for(self, url):
234
- """Retrieves the corresponding proxy url for a given url."""
235
- parsed_url = urlparse(url)
236
- proxy = self._proxies.get(parsed_url.scheme)
237
- if proxy:
238
- proxy = self._fix_proxy_url(proxy)
239
- return proxy
240
-
241
- def proxy_headers_for(self, proxy_url):
242
- """Retrieves the corresponding proxy headers for a given proxy url."""
243
- headers = {}
244
- username, password = self._get_auth_from_url(proxy_url)
245
- if username and password:
246
- basic_auth = self._construct_basic_auth(username, password)
247
- headers['Proxy-Authorization'] = basic_auth
248
- return headers
249
-
250
- @property
251
- def settings(self):
252
- return self._proxies_settings
253
-
254
- def _fix_proxy_url(self, proxy_url):
255
- if proxy_url.startswith('http:') or proxy_url.startswith('https:'):
256
- return proxy_url
257
- elif proxy_url.startswith('//'):
258
- return 'http:' + proxy_url
259
- else:
260
- return 'http://' + proxy_url
261
-
262
- def _construct_basic_auth(self, username, password):
263
- auth_str = f'{username}:{password}'
264
- encoded_str = b64encode(auth_str.encode('ascii')).strip().decode()
265
- return f'Basic {encoded_str}'
266
-
267
- def _get_auth_from_url(self, url):
268
- parsed_url = urlparse(url)
269
- try:
270
- return unquote(parsed_url.username), unquote(parsed_url.password)
271
- except (AttributeError, TypeError):
272
- return None, None
273
-
274
-
275
- class URLLib3Session:
276
- """A basic HTTP client that supports connection pooling and proxies.
277
-
278
- This class is inspired by requests.adapters.HTTPAdapter, but has been
279
- boiled down to meet the use cases needed by botocore. For the most part
280
- this classes matches the functionality of HTTPAdapter in requests v2.7.0
281
- (the same as our vendored version). The only major difference of note is
282
- that we currently do not support sending chunked requests. While requests
283
- v2.7.0 implemented this themselves, later version urllib3 support this
284
- directly via a flag to urlopen so enabling it if needed should be trivial.
285
- """
286
-
287
- def __init__(
288
- self,
289
- verify=True,
290
- proxies=None,
291
- timeout=None,
292
- max_pool_connections=MAX_POOL_CONNECTIONS,
293
- socket_options=None,
294
- client_cert=None,
295
- proxies_config=None,
296
- ):
297
- self._verify = verify
298
- self._proxy_config = ProxyConfiguration(
299
- proxies=proxies, proxies_settings=proxies_config
300
- )
301
- self._pool_classes_by_scheme = {
302
- 'http': botocore.awsrequest.AWSHTTPConnectionPool,
303
- 'https': botocore.awsrequest.AWSHTTPSConnectionPool,
304
- }
305
- if timeout is None:
306
- timeout = DEFAULT_TIMEOUT
307
- if not isinstance(timeout, (int, float)):
308
- timeout = Timeout(connect=timeout[0], read=timeout[1])
309
-
310
- self._cert_file = None
311
- self._key_file = None
312
- if isinstance(client_cert, str):
313
- self._cert_file = client_cert
314
- elif isinstance(client_cert, tuple):
315
- self._cert_file, self._key_file = client_cert
316
-
317
- self._timeout = timeout
318
- self._max_pool_connections = max_pool_connections
319
- self._socket_options = socket_options
320
- if socket_options is None:
321
- self._socket_options = []
322
- self._proxy_managers = {}
323
- self._manager = PoolManager(**self._get_pool_manager_kwargs())
324
- self._manager.pool_classes_by_scheme = self._pool_classes_by_scheme
325
-
326
- def _proxies_kwargs(self, **kwargs):
327
- proxies_settings = self._proxy_config.settings
328
- proxies_kwargs = {
329
- 'use_forwarding_for_https': proxies_settings.get(
330
- 'proxy_use_forwarding_for_https'
331
- ),
332
- **kwargs,
333
- }
334
- return {k: v for k, v in proxies_kwargs.items() if v is not None}
335
-
336
- def _get_pool_manager_kwargs(self, **extra_kwargs):
337
- pool_manager_kwargs = {
338
- 'strict': True,
339
- 'timeout': self._timeout,
340
- 'maxsize': self._max_pool_connections,
341
- 'ssl_context': self._get_ssl_context(),
342
- 'socket_options': self._socket_options,
343
- 'cert_file': self._cert_file,
344
- 'key_file': self._key_file,
345
- }
346
- pool_manager_kwargs.update(**extra_kwargs)
347
- return pool_manager_kwargs
348
-
349
- def _get_ssl_context(self):
350
- return create_urllib3_context()
351
-
352
- def _get_proxy_manager(self, proxy_url):
353
- if proxy_url not in self._proxy_managers:
354
- proxy_headers = self._proxy_config.proxy_headers_for(proxy_url)
355
- proxy_ssl_context = self._setup_proxy_ssl_context(proxy_url)
356
- proxy_manager_kwargs = self._get_pool_manager_kwargs(
357
- proxy_headers=proxy_headers
358
- )
359
- proxy_manager_kwargs.update(
360
- self._proxies_kwargs(proxy_ssl_context=proxy_ssl_context)
361
- )
362
- proxy_manager = proxy_from_url(proxy_url, **proxy_manager_kwargs)
363
- proxy_manager.pool_classes_by_scheme = self._pool_classes_by_scheme
364
- self._proxy_managers[proxy_url] = proxy_manager
365
-
366
- return self._proxy_managers[proxy_url]
367
-
368
- def _path_url(self, url):
369
- parsed_url = urlparse(url)
370
- path = parsed_url.path
371
- if not path:
372
- path = '/'
373
- if parsed_url.query:
374
- path = path + '?' + parsed_url.query
375
- return path
376
-
377
- def _setup_ssl_cert(self, conn, url, verify):
378
- if url.lower().startswith('https') and verify:
379
- conn.cert_reqs = 'CERT_REQUIRED'
380
- conn.ca_certs = get_cert_path(verify)
381
- else:
382
- conn.cert_reqs = 'CERT_NONE'
383
- conn.ca_certs = None
384
-
385
- def _setup_proxy_ssl_context(self, proxy_url):
386
- proxies_settings = self._proxy_config.settings
387
- proxy_ca_bundle = proxies_settings.get('proxy_ca_bundle')
388
- proxy_cert = proxies_settings.get('proxy_client_cert')
389
- if proxy_ca_bundle is None and proxy_cert is None:
390
- return None
391
-
392
- context = self._get_ssl_context()
393
- try:
394
- url = parse_url(proxy_url)
395
- # urllib3 disables this by default but we need it for proper
396
- # proxy tls negotiation when proxy_url is not an IP Address
397
- if not _is_ipaddress(url.host):
398
- context.check_hostname = True
399
- if proxy_ca_bundle is not None:
400
- context.load_verify_locations(cafile=proxy_ca_bundle)
401
-
402
- if isinstance(proxy_cert, tuple):
403
- context.load_cert_chain(proxy_cert[0], keyfile=proxy_cert[1])
404
- elif isinstance(proxy_cert, str):
405
- context.load_cert_chain(proxy_cert)
406
-
407
- return context
408
- except (OSError, URLLib3SSLError, LocationParseError) as e:
409
- raise InvalidProxiesConfigError(error=e)
410
-
411
- def _get_connection_manager(self, url, proxy_url=None):
412
- if proxy_url:
413
- manager = self._get_proxy_manager(proxy_url)
414
- else:
415
- manager = self._manager
416
- return manager
417
-
418
- def _get_request_target(self, url, proxy_url):
419
- has_proxy = proxy_url is not None
420
-
421
- if not has_proxy:
422
- return self._path_url(url)
423
-
424
- # HTTP proxies expect the request_target to be the absolute url to know
425
- # which host to establish a connection to. urllib3 also supports
426
- # forwarding for HTTPS through the 'use_forwarding_for_https' parameter.
427
- proxy_scheme = urlparse(proxy_url).scheme
428
- using_https_forwarding_proxy = (
429
- proxy_scheme == 'https'
430
- and self._proxies_kwargs().get('use_forwarding_for_https', False)
431
- )
432
-
433
- if using_https_forwarding_proxy or url.startswith('http:'):
434
- return url
435
- else:
436
- return self._path_url(url)
437
-
438
- def _chunked(self, headers):
439
- transfer_encoding = headers.get('Transfer-Encoding', b'')
440
- transfer_encoding = ensure_bytes(transfer_encoding)
441
- return transfer_encoding.lower() == b'chunked'
442
-
443
- def close(self):
444
- self._manager.clear()
445
- for manager in self._proxy_managers.values():
446
- manager.clear()
447
-
448
- def send(self, request):
449
- try:
450
- proxy_url = self._proxy_config.proxy_url_for(request.url)
451
- manager = self._get_connection_manager(request.url, proxy_url)
452
- conn = manager.connection_from_url(request.url)
453
- self._setup_ssl_cert(conn, request.url, self._verify)
454
- if ensure_boolean(
455
- os.environ.get('BOTO_EXPERIMENTAL__ADD_PROXY_HOST_HEADER', '')
456
- ):
457
- # This is currently an "experimental" feature which provides
458
- # no guarantees of backwards compatibility. It may be subject
459
- # to change or removal in any patch version. Anyone opting in
460
- # to this feature should strictly pin botocore.
461
- host = urlparse(request.url).hostname
462
- conn.proxy_headers['host'] = host
463
-
464
- request_target = self._get_request_target(request.url, proxy_url)
465
- urllib_response = conn.urlopen(
466
- method=request.method,
467
- url=request_target,
468
- body=request.body,
469
- headers=request.headers,
470
- retries=Retry(False),
471
- assert_same_host=False,
472
- preload_content=False,
473
- decode_content=False,
474
- chunked=self._chunked(request.headers),
475
- )
476
-
477
- http_response = botocore.awsrequest.AWSResponse(
478
- request.url,
479
- urllib_response.status,
480
- urllib_response.headers,
481
- urllib_response,
482
- )
483
-
484
- if not request.stream_output:
485
- # Cause the raw stream to be exhausted immediately. We do it
486
- # this way instead of using preload_content because
487
- # preload_content will never buffer chunked responses
488
- http_response.content
489
-
490
- return http_response
491
- except URLLib3SSLError as e:
492
- raise SSLError(endpoint_url=request.url, error=e)
493
- except (NewConnectionError, socket.gaierror) as e:
494
- raise EndpointConnectionError(endpoint_url=request.url, error=e)
495
- except ProxyError as e:
496
- raise ProxyConnectionError(
497
- proxy_url=mask_proxy_url(proxy_url), error=e
498
- )
499
- except URLLib3ConnectTimeoutError as e:
500
- raise ConnectTimeoutError(endpoint_url=request.url, error=e)
501
- except URLLib3ReadTimeoutError as e:
502
- raise ReadTimeoutError(endpoint_url=request.url, error=e)
503
- except ProtocolError as e:
504
- raise ConnectionClosedError(
505
- error=e, request=request, endpoint_url=request.url
506
- )
507
- except Exception as e:
508
- message = 'Exception received when sending urllib3 HTTP request'
509
- logger.debug(message, exc_info=True)
510
- raise HTTPClientError(error=e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/network/session.py DELETED
@@ -1,517 +0,0 @@
1
- """PipSession and supporting code, containing all pip-specific
2
- network request configuration and behavior.
3
- """
4
-
5
- import email.utils
6
- import io
7
- import ipaddress
8
- import json
9
- import logging
10
- import mimetypes
11
- import os
12
- import platform
13
- import shutil
14
- import subprocess
15
- import sys
16
- import urllib.parse
17
- import warnings
18
- from typing import (
19
- TYPE_CHECKING,
20
- Any,
21
- Dict,
22
- Generator,
23
- List,
24
- Mapping,
25
- Optional,
26
- Sequence,
27
- Tuple,
28
- Union,
29
- )
30
-
31
- from pip._vendor import requests, urllib3
32
- from pip._vendor.cachecontrol import CacheControlAdapter as _BaseCacheControlAdapter
33
- from pip._vendor.requests.adapters import DEFAULT_POOLBLOCK, BaseAdapter
34
- from pip._vendor.requests.adapters import HTTPAdapter as _BaseHTTPAdapter
35
- from pip._vendor.requests.models import PreparedRequest, Response
36
- from pip._vendor.requests.structures import CaseInsensitiveDict
37
- from pip._vendor.urllib3.connectionpool import ConnectionPool
38
- from pip._vendor.urllib3.exceptions import InsecureRequestWarning
39
-
40
- from pip import __version__
41
- from pip._internal.metadata import get_default_environment
42
- from pip._internal.models.link import Link
43
- from pip._internal.network.auth import MultiDomainBasicAuth
44
- from pip._internal.network.cache import SafeFileCache
45
-
46
- # Import ssl from compat so the initial import occurs in only one place.
47
- from pip._internal.utils.compat import has_tls
48
- from pip._internal.utils.glibc import libc_ver
49
- from pip._internal.utils.misc import build_url_from_netloc, parse_netloc
50
- from pip._internal.utils.urls import url_to_path
51
-
52
- if TYPE_CHECKING:
53
- from ssl import SSLContext
54
-
55
- from pip._vendor.urllib3.poolmanager import PoolManager
56
-
57
-
58
- logger = logging.getLogger(__name__)
59
-
60
- SecureOrigin = Tuple[str, str, Optional[Union[int, str]]]
61
-
62
-
63
- # Ignore warning raised when using --trusted-host.
64
- warnings.filterwarnings("ignore", category=InsecureRequestWarning)
65
-
66
-
67
- SECURE_ORIGINS: List[SecureOrigin] = [
68
- # protocol, hostname, port
69
- # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
70
- ("https", "*", "*"),
71
- ("*", "localhost", "*"),
72
- ("*", "127.0.0.0/8", "*"),
73
- ("*", "::1/128", "*"),
74
- ("file", "*", None),
75
- # ssh is always secure.
76
- ("ssh", "*", "*"),
77
- ]
78
-
79
-
80
- # These are environment variables present when running under various
81
- # CI systems. For each variable, some CI systems that use the variable
82
- # are indicated. The collection was chosen so that for each of a number
83
- # of popular systems, at least one of the environment variables is used.
84
- # This list is used to provide some indication of and lower bound for
85
- # CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.
86
- # For more background, see: https://github.com/pypa/pip/issues/5499
87
- CI_ENVIRONMENT_VARIABLES = (
88
- # Azure Pipelines
89
- "BUILD_BUILDID",
90
- # Jenkins
91
- "BUILD_ID",
92
- # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
93
- "CI",
94
- # Explicit environment variable.
95
- "PIP_IS_CI",
96
- )
97
-
98
-
99
- def looks_like_ci() -> bool:
100
- """
101
- Return whether it looks like pip is running under CI.
102
- """
103
- # We don't use the method of checking for a tty (e.g. using isatty())
104
- # because some CI systems mimic a tty (e.g. Travis CI). Thus that
105
- # method doesn't provide definitive information in either direction.
106
- return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
107
-
108
-
109
- def user_agent() -> str:
110
- """
111
- Return a string representing the user agent.
112
- """
113
- data: Dict[str, Any] = {
114
- "installer": {"name": "pip", "version": __version__},
115
- "python": platform.python_version(),
116
- "implementation": {
117
- "name": platform.python_implementation(),
118
- },
119
- }
120
-
121
- if data["implementation"]["name"] == "CPython":
122
- data["implementation"]["version"] = platform.python_version()
123
- elif data["implementation"]["name"] == "PyPy":
124
- pypy_version_info = sys.pypy_version_info # type: ignore
125
- if pypy_version_info.releaselevel == "final":
126
- pypy_version_info = pypy_version_info[:3]
127
- data["implementation"]["version"] = ".".join(
128
- [str(x) for x in pypy_version_info]
129
- )
130
- elif data["implementation"]["name"] == "Jython":
131
- # Complete Guess
132
- data["implementation"]["version"] = platform.python_version()
133
- elif data["implementation"]["name"] == "IronPython":
134
- # Complete Guess
135
- data["implementation"]["version"] = platform.python_version()
136
-
137
- if sys.platform.startswith("linux"):
138
- from pip._vendor import distro
139
-
140
- linux_distribution = distro.name(), distro.version(), distro.codename()
141
- distro_infos: Dict[str, Any] = dict(
142
- filter(
143
- lambda x: x[1],
144
- zip(["name", "version", "id"], linux_distribution),
145
- )
146
- )
147
- libc = dict(
148
- filter(
149
- lambda x: x[1],
150
- zip(["lib", "version"], libc_ver()),
151
- )
152
- )
153
- if libc:
154
- distro_infos["libc"] = libc
155
- if distro_infos:
156
- data["distro"] = distro_infos
157
-
158
- if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
159
- data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
160
-
161
- if platform.system():
162
- data.setdefault("system", {})["name"] = platform.system()
163
-
164
- if platform.release():
165
- data.setdefault("system", {})["release"] = platform.release()
166
-
167
- if platform.machine():
168
- data["cpu"] = platform.machine()
169
-
170
- if has_tls():
171
- import _ssl as ssl
172
-
173
- data["openssl_version"] = ssl.OPENSSL_VERSION
174
-
175
- setuptools_dist = get_default_environment().get_distribution("setuptools")
176
- if setuptools_dist is not None:
177
- data["setuptools_version"] = str(setuptools_dist.version)
178
-
179
- if shutil.which("rustc") is not None:
180
- # If for any reason `rustc --version` fails, silently ignore it
181
- try:
182
- rustc_output = subprocess.check_output(
183
- ["rustc", "--version"], stderr=subprocess.STDOUT, timeout=0.5
184
- )
185
- except Exception:
186
- pass
187
- else:
188
- if rustc_output.startswith(b"rustc "):
189
- # The format of `rustc --version` is:
190
- # `b'rustc 1.52.1 (9bc8c42bb 2021-05-09)\n'`
191
- # We extract just the middle (1.52.1) part
192
- data["rustc_version"] = rustc_output.split(b" ")[1].decode()
193
-
194
- # Use None rather than False so as not to give the impression that
195
- # pip knows it is not being run under CI. Rather, it is a null or
196
- # inconclusive result. Also, we include some value rather than no
197
- # value to make it easier to know that the check has been run.
198
- data["ci"] = True if looks_like_ci() else None
199
-
200
- user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
201
- if user_data is not None:
202
- data["user_data"] = user_data
203
-
204
- return "{data[installer][name]}/{data[installer][version]} {json}".format(
205
- data=data,
206
- json=json.dumps(data, separators=(",", ":"), sort_keys=True),
207
- )
208
-
209
-
210
- class LocalFSAdapter(BaseAdapter):
211
- def send(
212
- self,
213
- request: PreparedRequest,
214
- stream: bool = False,
215
- timeout: Optional[Union[float, Tuple[float, float]]] = None,
216
- verify: Union[bool, str] = True,
217
- cert: Optional[Union[str, Tuple[str, str]]] = None,
218
- proxies: Optional[Mapping[str, str]] = None,
219
- ) -> Response:
220
- pathname = url_to_path(request.url)
221
-
222
- resp = Response()
223
- resp.status_code = 200
224
- resp.url = request.url
225
-
226
- try:
227
- stats = os.stat(pathname)
228
- except OSError as exc:
229
- # format the exception raised as a io.BytesIO object,
230
- # to return a better error message:
231
- resp.status_code = 404
232
- resp.reason = type(exc).__name__
233
- resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode("utf8"))
234
- else:
235
- modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
236
- content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
237
- resp.headers = CaseInsensitiveDict(
238
- {
239
- "Content-Type": content_type,
240
- "Content-Length": stats.st_size,
241
- "Last-Modified": modified,
242
- }
243
- )
244
-
245
- resp.raw = open(pathname, "rb")
246
- resp.close = resp.raw.close
247
-
248
- return resp
249
-
250
- def close(self) -> None:
251
- pass
252
-
253
-
254
- class _SSLContextAdapterMixin:
255
- """Mixin to add the ``ssl_context`` constructor argument to HTTP adapters.
256
-
257
- The additional argument is forwarded directly to the pool manager. This allows us
258
- to dynamically decide what SSL store to use at runtime, which is used to implement
259
- the optional ``truststore`` backend.
260
- """
261
-
262
- def __init__(
263
- self,
264
- *,
265
- ssl_context: Optional["SSLContext"] = None,
266
- **kwargs: Any,
267
- ) -> None:
268
- self._ssl_context = ssl_context
269
- super().__init__(**kwargs)
270
-
271
- def init_poolmanager(
272
- self,
273
- connections: int,
274
- maxsize: int,
275
- block: bool = DEFAULT_POOLBLOCK,
276
- **pool_kwargs: Any,
277
- ) -> "PoolManager":
278
- if self._ssl_context is not None:
279
- pool_kwargs.setdefault("ssl_context", self._ssl_context)
280
- return super().init_poolmanager( # type: ignore[misc]
281
- connections=connections,
282
- maxsize=maxsize,
283
- block=block,
284
- **pool_kwargs,
285
- )
286
-
287
-
288
- class HTTPAdapter(_SSLContextAdapterMixin, _BaseHTTPAdapter):
289
- pass
290
-
291
-
292
- class CacheControlAdapter(_SSLContextAdapterMixin, _BaseCacheControlAdapter):
293
- pass
294
-
295
-
296
- class InsecureHTTPAdapter(HTTPAdapter):
297
- def cert_verify(
298
- self,
299
- conn: ConnectionPool,
300
- url: str,
301
- verify: Union[bool, str],
302
- cert: Optional[Union[str, Tuple[str, str]]],
303
- ) -> None:
304
- super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
305
-
306
-
307
- class InsecureCacheControlAdapter(CacheControlAdapter):
308
- def cert_verify(
309
- self,
310
- conn: ConnectionPool,
311
- url: str,
312
- verify: Union[bool, str],
313
- cert: Optional[Union[str, Tuple[str, str]]],
314
- ) -> None:
315
- super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
316
-
317
-
318
- class PipSession(requests.Session):
319
- timeout: Optional[int] = None
320
-
321
- def __init__(
322
- self,
323
- *args: Any,
324
- retries: int = 0,
325
- cache: Optional[str] = None,
326
- trusted_hosts: Sequence[str] = (),
327
- index_urls: Optional[List[str]] = None,
328
- ssl_context: Optional["SSLContext"] = None,
329
- **kwargs: Any,
330
- ) -> None:
331
- """
332
- :param trusted_hosts: Domains not to emit warnings for when not using
333
- HTTPS.
334
- """
335
- super().__init__(*args, **kwargs)
336
-
337
- # Namespace the attribute with "pip_" just in case to prevent
338
- # possible conflicts with the base class.
339
- self.pip_trusted_origins: List[Tuple[str, Optional[int]]] = []
340
-
341
- # Attach our User Agent to the request
342
- self.headers["User-Agent"] = user_agent()
343
-
344
- # Attach our Authentication handler to the session
345
- self.auth = MultiDomainBasicAuth(index_urls=index_urls)
346
-
347
- # Create our urllib3.Retry instance which will allow us to customize
348
- # how we handle retries.
349
- retries = urllib3.Retry(
350
- # Set the total number of retries that a particular request can
351
- # have.
352
- total=retries,
353
- # A 503 error from PyPI typically means that the Fastly -> Origin
354
- # connection got interrupted in some way. A 503 error in general
355
- # is typically considered a transient error so we'll go ahead and
356
- # retry it.
357
- # A 500 may indicate transient error in Amazon S3
358
- # A 520 or 527 - may indicate transient error in CloudFlare
359
- status_forcelist=[500, 503, 520, 527],
360
- # Add a small amount of back off between failed requests in
361
- # order to prevent hammering the service.
362
- backoff_factor=0.25,
363
- ) # type: ignore
364
-
365
- # Our Insecure HTTPAdapter disables HTTPS validation. It does not
366
- # support caching so we'll use it for all http:// URLs.
367
- # If caching is disabled, we will also use it for
368
- # https:// hosts that we've marked as ignoring
369
- # TLS errors for (trusted-hosts).
370
- insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
371
-
372
- # We want to _only_ cache responses on securely fetched origins or when
373
- # the host is specified as trusted. We do this because
374
- # we can't validate the response of an insecurely/untrusted fetched
375
- # origin, and we don't want someone to be able to poison the cache and
376
- # require manual eviction from the cache to fix it.
377
- if cache:
378
- secure_adapter = CacheControlAdapter(
379
- cache=SafeFileCache(cache),
380
- max_retries=retries,
381
- ssl_context=ssl_context,
382
- )
383
- self._trusted_host_adapter = InsecureCacheControlAdapter(
384
- cache=SafeFileCache(cache),
385
- max_retries=retries,
386
- )
387
- else:
388
- secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context)
389
- self._trusted_host_adapter = insecure_adapter
390
-
391
- self.mount("https://", secure_adapter)
392
- self.mount("http://", insecure_adapter)
393
-
394
- # Enable file:// urls
395
- self.mount("file://", LocalFSAdapter())
396
-
397
- for host in trusted_hosts:
398
- self.add_trusted_host(host, suppress_logging=True)
399
-
400
- def update_index_urls(self, new_index_urls: List[str]) -> None:
401
- """
402
- :param new_index_urls: New index urls to update the authentication
403
- handler with.
404
- """
405
- self.auth.index_urls = new_index_urls
406
-
407
- def add_trusted_host(
408
- self, host: str, source: Optional[str] = None, suppress_logging: bool = False
409
- ) -> None:
410
- """
411
- :param host: It is okay to provide a host that has previously been
412
- added.
413
- :param source: An optional source string, for logging where the host
414
- string came from.
415
- """
416
- if not suppress_logging:
417
- msg = f"adding trusted host: {host!r}"
418
- if source is not None:
419
- msg += f" (from {source})"
420
- logger.info(msg)
421
-
422
- host_port = parse_netloc(host)
423
- if host_port not in self.pip_trusted_origins:
424
- self.pip_trusted_origins.append(host_port)
425
-
426
- self.mount(
427
- build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter
428
- )
429
- self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter)
430
- if not host_port[1]:
431
- self.mount(
432
- build_url_from_netloc(host, scheme="http") + ":",
433
- self._trusted_host_adapter,
434
- )
435
- # Mount wildcard ports for the same host.
436
- self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter)
437
-
438
- def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]:
439
- yield from SECURE_ORIGINS
440
- for host, port in self.pip_trusted_origins:
441
- yield ("*", host, "*" if port is None else port)
442
-
443
- def is_secure_origin(self, location: Link) -> bool:
444
- # Determine if this url used a secure transport mechanism
445
- parsed = urllib.parse.urlparse(str(location))
446
- origin_protocol, origin_host, origin_port = (
447
- parsed.scheme,
448
- parsed.hostname,
449
- parsed.port,
450
- )
451
-
452
- # The protocol to use to see if the protocol matches.
453
- # Don't count the repository type as part of the protocol: in
454
- # cases such as "git+ssh", only use "ssh". (I.e., Only verify against
455
- # the last scheme.)
456
- origin_protocol = origin_protocol.rsplit("+", 1)[-1]
457
-
458
- # Determine if our origin is a secure origin by looking through our
459
- # hardcoded list of secure origins, as well as any additional ones
460
- # configured on this PackageFinder instance.
461
- for secure_origin in self.iter_secure_origins():
462
- secure_protocol, secure_host, secure_port = secure_origin
463
- if origin_protocol != secure_protocol and secure_protocol != "*":
464
- continue
465
-
466
- try:
467
- addr = ipaddress.ip_address(origin_host or "")
468
- network = ipaddress.ip_network(secure_host)
469
- except ValueError:
470
- # We don't have both a valid address or a valid network, so
471
- # we'll check this origin against hostnames.
472
- if (
473
- origin_host
474
- and origin_host.lower() != secure_host.lower()
475
- and secure_host != "*"
476
- ):
477
- continue
478
- else:
479
- # We have a valid address and network, so see if the address
480
- # is contained within the network.
481
- if addr not in network:
482
- continue
483
-
484
- # Check to see if the port matches.
485
- if (
486
- origin_port != secure_port
487
- and secure_port != "*"
488
- and secure_port is not None
489
- ):
490
- continue
491
-
492
- # If we've gotten here, then this origin matches the current
493
- # secure origin and we should return True
494
- return True
495
-
496
- # If we've gotten to this point, then the origin isn't secure and we
497
- # will not accept it as a valid location to search. We will however
498
- # log a warning that we are ignoring it.
499
- logger.warning(
500
- "The repository located at %s is not a trusted or secure host and "
501
- "is being ignored. If this repository is available via HTTPS we "
502
- "recommend you use HTTPS instead, otherwise you may silence "
503
- "this warning and allow it anyway with '--trusted-host %s'.",
504
- origin_host,
505
- origin_host,
506
- )
507
-
508
- return False
509
-
510
- def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response:
511
- # Allow setting a default timeout on a session
512
- kwargs.setdefault("timeout", self.timeout)
513
- # Allow setting a default proxies on a session
514
- kwargs.setdefault("proxies", self.proxies)
515
-
516
- # Dispatch the actual request
517
- return super().request(method, url, *args, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boilin/URetinex-Net/network/architecture.py DELETED
@@ -1,41 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torchvision
4
-
5
- def get_batchnorm_layer(opts):
6
- if opts.norm_layer == "batch":
7
- norm_layer = nn.BatchNorm2d
8
- elif opts.layer == "spectral_instance":
9
- norm_layer = nn.InstanceNorm2d
10
- else:
11
- print("not implemented")
12
- exit()
13
- return norm_layer
14
-
15
- def get_conv2d_layer(in_c, out_c, k, s, p=0, dilation=1, groups=1):
16
- return nn.Conv2d(in_channels=in_c,
17
- out_channels=out_c,
18
- kernel_size=k,
19
- stride=s,
20
- padding=p,dilation=dilation, groups=groups)
21
-
22
- def get_deconv2d_layer(in_c, out_c, k=1, s=1, p=1):
23
- return nn.Sequential(
24
- nn.Upsample(scale_factor=2, mode="bilinear"),
25
- nn.Conv2d(
26
- in_channels=in_c,
27
- out_channels=out_c,
28
- kernel_size=k,
29
- stride=s,
30
- padding=p
31
- )
32
- )
33
-
34
- class Identity(nn.Module):
35
-
36
- def __init__(self):
37
- super(Identity, self).__init__()
38
-
39
- def forward(self, x):
40
- return x
41
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CAMP-ViL/Xplainer/model.py DELETED
@@ -1,158 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- import torch
5
- import torch.nn.functional as F
6
- from health_multimodal.image import get_biovil_resnet_inference
7
- from health_multimodal.text import get_cxr_bert_inference
8
- from health_multimodal.vlp import ImageTextInferenceEngine
9
-
10
- from utils import cos_sim_to_prob, prob_to_log_prob, log_prob_to_prob
11
-
12
-
13
- class InferenceModel():
14
- def __init__(self):
15
- self.text_inference = get_cxr_bert_inference()
16
- self.image_inference = get_biovil_resnet_inference()
17
- self.image_text_inference = ImageTextInferenceEngine(
18
- image_inference_engine=self.image_inference,
19
- text_inference_engine=self.text_inference,
20
- )
21
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
- self.image_text_inference.to(self.device)
23
-
24
- # caches for faster inference
25
- self.text_embedding_cache = {}
26
- self.image_embedding_cache = {}
27
-
28
- self.transform = self.image_inference.transform
29
-
30
- def get_similarity_score_from_raw_data(self, image_embedding, query_text: str) -> float:
31
- """Compute the cosine similarity score between an image and one or more strings.
32
- If multiple strings are passed, their embeddings are averaged before L2-normalization.
33
- :param image_path: Path to the input chest X-ray, either a DICOM or JPEG file.
34
- :param query_text: Input radiology text phrase.
35
- :return: The similarity score between the image and the text.
36
- """
37
- assert not self.image_text_inference.image_inference_engine.model.training
38
- assert not self.image_text_inference.text_inference_engine.model.training
39
- if query_text in self.text_embedding_cache:
40
- text_embedding = self.text_embedding_cache[query_text]
41
- else:
42
- text_embedding = self.image_text_inference.text_inference_engine.get_embeddings_from_prompt([query_text], normalize=False)
43
- text_embedding = text_embedding.mean(dim=0)
44
- text_embedding = F.normalize(text_embedding, dim=0, p=2)
45
- self.text_embedding_cache[query_text] = text_embedding
46
-
47
- cos_similarity = image_embedding @ text_embedding.t()
48
-
49
- return cos_similarity.item()
50
-
51
- def process_image(self, image):
52
- ''' same code as in image_text_inference.image_inference_engine.get_projected_global_embedding() but adapted to deal with image instances instead of path'''
53
-
54
- transformed_image = self.transform(image)
55
- projected_img_emb = self.image_inference.model.forward(transformed_image).projected_global_embedding
56
- projected_img_emb = F.normalize(projected_img_emb, dim=-1)
57
- assert projected_img_emb.shape[0] == 1
58
- assert projected_img_emb.ndim == 2
59
- return projected_img_emb[0]
60
-
61
- def get_descriptor_probs(self, image_path: Path, descriptors: List[str], do_negative_prompting=True, demo=False):
62
- probs = {}
63
- negative_probs = {}
64
- if image_path in self.image_embedding_cache:
65
- image_embedding = self.image_embedding_cache[image_path]
66
- else:
67
- image_embedding = self.image_text_inference.image_inference_engine.get_projected_global_embedding(image_path)
68
- if not demo:
69
- self.image_embedding_cache[image_path] = image_embedding
70
-
71
- # Default get_similarity_score_from_raw_data would load the image every time. Instead we only load once.
72
- for desc in descriptors:
73
- prompt = f'There are {desc}'
74
- score = self.get_similarity_score_from_raw_data(image_embedding, prompt)
75
- if do_negative_prompting:
76
- neg_prompt = f'There are no {desc}'
77
- neg_score = self.get_similarity_score_from_raw_data(image_embedding, neg_prompt)
78
-
79
- pos_prob = cos_sim_to_prob(score)
80
-
81
- if do_negative_prompting:
82
- pos_prob, neg_prob = torch.softmax((torch.tensor([score, neg_score]) / 0.5), dim=0)
83
- negative_probs[desc] = neg_prob
84
-
85
- probs[desc] = pos_prob
86
-
87
- return probs, negative_probs
88
-
89
- def get_all_descriptors(self, disease_descriptors):
90
- all_descriptors = set()
91
- for disease, descs in disease_descriptors.items():
92
- all_descriptors.update([f"{desc} indicating {disease}" for desc in descs])
93
- all_descriptors = sorted(all_descriptors)
94
- return all_descriptors
95
-
96
- def get_all_descriptors_only_disease(self, disease_descriptors):
97
- all_descriptors = set()
98
- for disease, descs in disease_descriptors.items():
99
- all_descriptors.update([f"{desc}" for desc in descs])
100
- all_descriptors = sorted(all_descriptors)
101
- return all_descriptors
102
-
103
- def get_diseases_probs(self, disease_descriptors, pos_probs, negative_probs, prior_probs=None, do_negative_prompting=True):
104
- disease_probs = {}
105
- disease_neg_probs = {}
106
- for disease, descriptors in disease_descriptors.items():
107
- desc_log_probs = []
108
- desc_neg_log_probs = []
109
- for desc in descriptors:
110
- desc = f"{desc} indicating {disease}"
111
- desc_log_probs.append(prob_to_log_prob(pos_probs[desc]))
112
- if do_negative_prompting:
113
- desc_neg_log_probs.append(prob_to_log_prob(negative_probs[desc]))
114
- disease_log_prob = sum(sorted(desc_log_probs, reverse=True)) / len(desc_log_probs)
115
- if do_negative_prompting:
116
- disease_neg_log_prob = sum(desc_neg_log_probs) / len(desc_neg_log_probs)
117
- disease_probs[disease] = log_prob_to_prob(disease_log_prob)
118
- if do_negative_prompting:
119
- disease_neg_probs[disease] = log_prob_to_prob(disease_neg_log_prob)
120
-
121
- return disease_probs, disease_neg_probs
122
-
123
- # Threshold Based
124
- def get_predictions(self, disease_descriptors, threshold, disease_probs, keys):
125
- predicted_diseases = []
126
- prob_vector = torch.zeros(len(keys), dtype=torch.float) # num of diseases
127
- for idx, disease in enumerate(disease_descriptors):
128
- if disease == 'No Finding':
129
- continue
130
- prob_vector[keys.index(disease)] = disease_probs[disease]
131
- if disease_probs[disease] > threshold:
132
- predicted_diseases.append(disease)
133
-
134
- if len(predicted_diseases) == 0: # No finding rule based
135
- prob_vector[0] = 1.0 - max(prob_vector)
136
- else:
137
- prob_vector[0] = 1.0 - max(prob_vector)
138
-
139
- return predicted_diseases, prob_vector
140
-
141
- # Negative vs Positive Prompting
142
- def get_predictions_bin_prompting(self, disease_descriptors, disease_probs, negative_disease_probs, keys):
143
- predicted_diseases = []
144
- prob_vector = torch.zeros(len(keys), dtype=torch.float) # num of diseases
145
- for idx, disease in enumerate(disease_descriptors):
146
- if disease == 'No Finding':
147
- continue
148
- pos_neg_scores = torch.tensor([disease_probs[disease], negative_disease_probs[disease]])
149
- prob_vector[keys.index(disease)] = pos_neg_scores[0]
150
- if torch.argmax(pos_neg_scores) == 0: # Positive is More likely
151
- predicted_diseases.append(disease)
152
-
153
- if len(predicted_diseases) == 0: # No finding rule based
154
- prob_vector[0] = 1.0 - max(prob_vector)
155
- else:
156
- prob_vector[0] = 1.0 - max(prob_vector)
157
-
158
- return predicted_diseases, prob_vector