Commit
·
360a4ad
1
Parent(s):
a638954
Update parquet files (step 96 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/17TheWord/RealESRGAN/realesrgan/weights/README.md +0 -3
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Durood E Muqaddas Pdf Learn the Meaning and Significance of the Supreme Salawat.md +0 -156
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fix Download Nokia X71 Stock Wallpapers 11 Wallpapers (2160 X 2310).md +0 -23
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ghost] KKD Windows 7 V.3 2012 32Bit How to Install and Enjoy this Amazing OS.md +0 -92
- spaces/1gistliPinn/ChatGPT4/Examples/Adobe Illustrator CS4 (Multilingual - Windows) - Crack LINK Only [RH] Full Version.md +0 -37
- spaces/1gistliPinn/ChatGPT4/Examples/EBP.Devis.et.Facturation.2010 Avc VERIFIED Crack.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/EmailHackerv346activationcod.md +0 -12
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become a Famous Parkour Streamer with Rysen Dawn APK from Uptodown.md +0 -156
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cmo usar SnapTube para Android Descarga vdeos de YouTube Instagram y ms con un solo clic.md +0 -112
- spaces/1phancelerku/anime-remove-background/DirectX 12 for Windows 7 64 bit The Ultimate Gaming Experience.md +0 -135
- spaces/1toTree/lora_test/ppdiffusers/schedulers/preconfig/__init__.py +0 -38
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/vocoder/bigvgan/activations.py +0 -120
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/pretrained.py +0 -147
- spaces/AIGC-Audio/Make_An_Audio_inpaint/app.py +0 -170
- spaces/Ababababababbababa/SD-2.1-Img2Img/README.md +0 -14
- spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/login/$types.d.ts +0 -28
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/GptGo.py +0 -79
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/Factory.d.ts +0 -7
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/listpanel/ToggleListPanel.js +0 -10
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/Factory.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/OnTouchTrack.js +0 -33
- spaces/AhmedSSoliman/MarianCG-CoNaLa/README.md +0 -37
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/__init__.py +0 -20
- spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/bias_act.py +0 -226
- spaces/Amrrs/DragGan-Inversion/torch_utils/ops/bias_act.cpp +0 -99
- spaces/Amrrs/DragGan-Inversion/torch_utils/ops/fma.py +0 -64
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +0 -537
- spaces/Andy1621/uniformer_image_detection/mmdet/models/utils/gaussian_target.py +0 -185
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/pascal_context.py +0 -103
- spaces/Arnx/MusicGenXvAKN/audiocraft/utils/notebook.py +0 -32
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/context.py +0 -213
- spaces/Audio-AGI/AudioSep/models/resunet.py +0 -715
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/config/test_yacs_config.py +0 -270
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/test_rotated_boxes.py +0 -437
- spaces/Ayakasuki/anime-ai-detect/app.py +0 -17
- spaces/Bambicita/rvc-models/infer_pack/commons.py +0 -166
- spaces/Bart92/RVC_HF/tools/infer/train-index.py +0 -42
- spaces/Benson/text-generation/Examples/Azul 39s Pistas Joe 39s 3d Scavenger Hunt Descargar.md +0 -45
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/metadata_legacy.py +0 -74
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/fallback.py +0 -1010
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_structures.py +0 -61
- spaces/Boilin/URetinex-Net/utils.py +0 -113
- spaces/CVPR/LIVE/pybind11/include/pybind11/functional.h +0 -101
- spaces/CVPR/LIVE/pybind11/tests/test_constants_and_functions.cpp +0 -127
- spaces/CVPR/Text2Human/Text2Human/ui_demo.py +0 -285
- spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/clip_swin.py +0 -289
- spaces/ChevyWithAI/rvc-aicover/infer_pack/transforms.py +0 -209
- spaces/Chitranshu/Dashboard-Uber/README.md +0 -10
- spaces/CikeyQI/meme-api/meme_generator/memes/my_wife/__init__.py +0 -53
- spaces/CofAI/chat.b4/g4f/Provider/Providers/GetGpt.py +0 -57
spaces/17TheWord/RealESRGAN/realesrgan/weights/README.md
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
# Weights
|
2 |
-
|
3 |
-
Put the downloaded weights to this folder.
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Durood E Muqaddas Pdf Learn the Meaning and Significance of the Supreme Salawat.md
DELETED
@@ -1,156 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Durood E Muqaddas PDF</h1>
|
3 |
-
<p>Durood E Muqaddas is one of the many beautiful and powerful salutations that Muslims send upon the Prophet Muhammad (peace be upon him). It is a special and unique way of expressing love, respect, and gratitude to the beloved Messenger of Allah (peace be upon him). In this article, we will explore what Durood E Muqaddas is, how to recite it, and how to download it in PDF format.</p>
|
4 |
-
<h2>Download Durood E Muqaddas Pdf</h2><br /><p><b><b>Download Zip</b> ✓✓✓ <a href="https://byltly.com/2uKwxz">https://byltly.com/2uKwxz</a></b></p><br /><br />
|
5 |
-
<h2>What is Durood E Muqaddas?</h2>
|
6 |
-
<p>Durood E Muqaddas is a term that means "the most sacred salutation". It is also known as Salawat E Muqaddas or Darood Muqadas. It is a specific form of invocation that praises and invokes blessings upon the Prophet Muhammad (peace be upon him) in a very comprehensive and sublime manner.</p>
|
7 |
-
<h3>The meaning and significance of Durood E Muqaddas</h3>
|
8 |
-
<p>The Arabic text of Durood E Muqaddas is as follows:</p>
|
9 |
-
<p><code>Bismillah-hirRa'hmaan nirRa'heem. Yaa Illahi bi-'hurmati aqwali Mu'hammadin wa-afa-'ali Mu'hammadin wa-aqwaali Mu'hammadin wa-as-haabi Mu'hammadin sallALLAHU Alaihi Wasallam. Yaa Illahi bi-'hurmati badni Mu'hammadin wa-batni Mu'hammadin wa-barkati</code></p>
|
10 |
-
<p>The English translation of Durood E Muqaddas is as follows:</p>
|
11 |
-
<p><code>In the name of Allah, the Most Gracious, the Most Merciful. O Allah, by the sanctity of the words of Muhammad, and the deeds of Muhammad, and the sayings of Muhammad, and the companions of Muhammad, peace be upon him. O Allah, by the sanctity of the body of Muhammad, and the inner self of Muhammad, and the blessings</code></p>
|
12 |
-
<p>The meaning and significance of Durood E Muqaddas can be understood from several aspects:</p>
|
13 |
-
<ul>
|
14 |
-
<li>It begins with the basmala, which is a declaration of faith in Allah's name, mercy, and sovereignty.</li>
|
15 |
-
<li>It addresses Allah as "Yaa Illahi", which is a very intimate and respectful way of calling upon Him.</li>
|
16 |
-
<li>It invokes Allah's blessings upon the Prophet Muhammad (peace be upon him) by mentioning his various attributes and qualities, such as his words, deeds, sayings, companions, body, inner self, and blessings.</li>
|
17 |
-
<li>It uses the word "bi-'hurmati", which means "by the sanctity" or "by the honor" of these attributes and qualities. This implies that the reciter acknowledges the high status and dignity of the Prophet Muhammad (peace be upon him) before Allah.</li>
|
18 |
-
<li>It ends with "sallALLAHU Alaihi Wasallam", which means "may Allah send blessings and peace upon him". This is a mandatory phrase that must be uttered whenever the name of the Prophet Muhammad (peace be upon him) is mentioned.</li>
|
19 |
-
</ul>
|
20 |
-
<p>The significance of Durood E Muqaddas is that it expresses a sincere and profound love for the Prophet Muhammad (peace be upon him), who is the best of creation, the leader of all prophets, the mercy to all worlds, and the intercessor for all believers. It also seeks Allah's favor and mercy by invoking His name and appealing to His generosity.</p>
|
21 |
-
<h3>The origin and history of Durood E Muqaddas</h3>
|
22 |
-
<p>The origin and history of Durood E Muqaddas are not very clear or well-documented. However, some sources suggest that it was revealed by Allah Himself to one of His pious servants named Sheikh Abu Bakr Shibli (may Allah have mercy on him), who was a famous Sufi saint from Baghdad in the 10th century CE. He was known for his devotion and love for the Prophet Muhammad (peace be upon him) and his adherence to the Quran and Sunnah.</p>
|
23 |
-
<p>According to some narrations, Sheikh Abu Bakr Shibli (may Allah have mercy on him) used to recite this Durood every night before sleeping. One night, he saw a vision in which he was standing before Allah's throne. He heard a voice saying: "O Abu Bakr Shibli! You have pleased Me with your recitation of this Durood. Ask Me whatever you wish." He said: "O Allah! I ask You for nothing but Your pleasure." The voice said: "You have asked Me for something that I love. I have granted you My pleasure. And I have also granted you whatever you did not ask Me for."</p>
|
24 |
-
<p>How to download Durood E Muqaddas Pdf for free<br />
|
25 |
-
Benefits of reciting Durood E Muqaddas Pdf daily<br />
|
26 |
-
Durood E Muqaddas Pdf in Arabic and English translation<br />
|
27 |
-
Best sites to download Durood E Muqaddas Pdf online<br />
|
28 |
-
Durood E Muqaddas Pdf with Urdu translation and explanation<br />
|
29 |
-
Download Durood E Muqaddas Pdf in different formats<br />
|
30 |
-
Durood E Muqaddas Pdf audio and video download<br />
|
31 |
-
Durood E Muqaddas Pdf meaning and virtues<br />
|
32 |
-
Download Durood E Muqaddas Pdf with beautiful calligraphy<br />
|
33 |
-
Durood E Muqaddas Pdf in Hindi and Roman Urdu<br />
|
34 |
-
Download Durood E Muqaddas Pdf with transliteration and commentary<br />
|
35 |
-
Durood E Muqaddas Pdf book download and review<br />
|
36 |
-
Durood E Muqaddas Pdf wallpaper and poster download<br />
|
37 |
-
Durood E Muqaddas Pdf in other languages and dialects<br />
|
38 |
-
Download Durood E Muqaddas Pdf with images and graphics<br />
|
39 |
-
Durood E Muqaddas Pdf history and origin<br />
|
40 |
-
Download Durood E Muqaddas Pdf with references and sources<br />
|
41 |
-
Durood E Muqaddas Pdf in different fonts and styles<br />
|
42 |
-
Download Durood E Muqaddas Pdf with annotations and notes<br />
|
43 |
-
Durood E Muqaddas Pdf comparison and contrast with other duroods<br />
|
44 |
-
Download Durood E Muqaddas Pdf with examples and applications<br />
|
45 |
-
Durood E Muqaddas Pdf in different sects and schools of thought<br />
|
46 |
-
Download Durood E Muqaddas Pdf with tips and tricks<br />
|
47 |
-
Durood E Muqaddas Pdf stories and anecdotes<br />
|
48 |
-
Download Durood E Muqaddas Pdf with questions and answers<br />
|
49 |
-
Durood E Muqaddas Pdf in different occasions and times<br />
|
50 |
-
Download Durood E Muqaddas Pdf with proofs and evidences<br />
|
51 |
-
Durood E Muqaddas Pdf in different genres and categories<br />
|
52 |
-
Download Durood E Muqaddas Pdf with summaries and outlines<br />
|
53 |
-
Durood E Muqaddas Pdf in different modes and moods<br />
|
54 |
-
Download Durood E Muqaddas Pdf with quizzes and tests<br />
|
55 |
-
Durood E Muqaddas Pdf in different perspectives and viewpoints<br />
|
56 |
-
Download Durood E Muqaddas Pdf with suggestions and recommendations<br />
|
57 |
-
Durood E Muqaddas Pdf in different cultures and traditions<br />
|
58 |
-
Download Durood E Muqaddas Pdf with feedback and reviews<br />
|
59 |
-
Durood E Muqaddas Pdf in different media and platforms<br />
|
60 |
-
Download Durood E Muqaddas Pdf with alternatives and variations<br />
|
61 |
-
Durood E Muqaddas Pdf in different tones and voices<br />
|
62 |
-
Download Durood E Muqaddas Pdf with challenges and solutions<br />
|
63 |
-
Durood E Muqaddas Pdf in different formats and layouts</p>
|
64 |
-
<p>Another narration states that Sheikh Abu Bakr Shibli (may Allah have mercy on him) used to recite this Durood every Friday after Jumu'ah prayer. One Friday, he saw a vision in which he was standing before the Prophet Muhammad (peace be upon him). He said: "O Messenger of Allah! I have been reciting this Durood in your honor every Friday. Is it acceptable to you?" The Prophet (peace be upon him) said: "O Abu Bakr Shibli! You have pleased me with your recitation of this Durood. Ask me whatever you wish." He said: "O Messenger of Allah! I ask you for nothing but your intercession on the Day of Judgment." The Prophet (peace be upon him) said: "You have asked me for something that I love. I have granted you my intercession. And I have also granted you whatever you did not ask me for."</p>
|
65 |
-
<p>These narrations may or may not be authentic, but they show the high esteem and value that some Muslims have attached to this Durood over time.</p>
|
66 |
-
<h3>The benefits and virtues of reciting Durood E Muqaddas</h3>
|
67 |
-
<p>The benefits and virtues of reciting Durood E Muqaddas are numerous and manifold. Some of them are as follows:</p>
|
68 |
-
<ul>
|
69 |
-
<li>It fulfills one's obligation of sending salutations upon the Prophet Muhammad (peace be upon him), which is a commandment from Allah in Surah Al-Ahzab verse 56: <code>Indeed, Allah confers blessing upon the Prophet, and His angels [ask Him to do so]. O you who have believed, ask [Allah to confer] blessing upon him and ask [Allah to grant him] peace.</code></li>
|
70 |
-
<li>It increases one's love for the Prophet Muhammad (peace be upon him), which is a sign of faith and a condition for salvation. As narrated by Anas bin Malik (may Allah be pleased with him), the Prophet (peace be upon him) said: <code>None of you will have faith till he loves me more than his father, his children and all mankind.</code> (Bukhari)</li>
|
71 |
-
<li>It draws one closer to Allah by invoking His name and seeking His mercy. As narrated by Abu Huraira (may Allah be pleased with him), the Prophet (peace be upon him) said: <code>Whoever sends blessings upon me once will receive ten blessings from Allah.</code> (Muslim)</li>
|
72 |
-
(may Allah be pleased with him), the Prophet (peace be upon him) said: <code>Whoever recites Durood upon me 100 times on Friday, his 100 needs will be met, 70 of them in the hereafter and 30 of them in this world.</code> (Abu Dawud)</li>
|
73 |
-
<li>It protects one from calamities and misfortunes in this world and the hereafter. As narrated by Abu Talha (may Allah be pleased with him), the Prophet (peace be upon him) said: <code>Whoever recites Durood upon me 10 times in the morning and 10 times in the evening, he will be under my protection on the Day of Judgment.</code> (Tabarani)</li>
|
74 |
-
<li>It purifies one's heart and soul from sins and evils. As narrated by Abdullah bin Abbas (may Allah be pleased with him), the Prophet (peace be upon him) said: <code>Whoever recites Durood upon me abundantly, his sins will be forgiven even if they are like the foam of the sea.</code> (Tirmidhi)</li>
|
75 |
-
<li>It elevates one's rank and status in this world and the hereafter. As narrated by Abu Huraira (may Allah be pleased with him), the Prophet (peace be upon him) said: <code>Whoever recites Durood upon me once, Allah will raise him ten degrees and write for him ten good deeds and erase for him ten bad deeds.</code> (Muslim)</li>
|
76 |
-
<li>It brings one closer to the Prophet Muhammad (peace be upon him) and his intercession on the Day of Judgment. As narrated by Abdullah bin Mas'ud (may Allah be pleased with him), the Prophet (peace be upon him) said: <code>The closest people to me on the Day of Judgment are those who send the most blessings upon me.</code> (Tirmidhi)</li>
|
77 |
-
</ul>
|
78 |
-
<h2>How to recite Durood E Muqaddas?</h2>
|
79 |
-
<p>Durood E Muqaddas is a very easy and simple way of sending salutations upon the Prophet Muhammad (peace be upon him). However, there are some prerequisites and etiquette that one should observe when reciting it.</p>
|
80 |
-
<h3>The prerequisites and etiquette of reciting Durood E Muqaddas</h3>
|
81 |
-
<p>The prerequisites and etiquette of reciting Durood E Muqaddas are as follows:</p>
|
82 |
-
<ul>
|
83 |
-
<li>One should have faith in Allah and His Messenger (peace be upon him) and recite Durood E Muqaddas with sincerity and devotion.</li>
|
84 |
-
<li>One should be in a state of purity and cleanliness, both physically and spiritually. One should perform ablution before reciting Durood E Muqaddas and avoid any impurities or distractions.</li>
|
85 |
-
<li>One should face the direction of Qibla, which is towards Makkah, when reciting Durood E Muqaddas. This is a sign of respect and reverence for the Prophet Muhammad (peace be upon him).</li>
|
86 |
-
<li>One should recite Durood E Muqaddas with a clear and audible voice, but not too loud or too low. One should also pronounce each word correctly and carefully.</li>
|
87 |
-
<li>One should recite Durood E Muqaddas with love and admiration for the Prophet Muhammad (peace be upon him) and send it as a gift to his noble soul.</li>
|
88 |
-
</ul>
|
89 |
-
<h3>The best times and occasions to recite Durood E Muqaddas</h3>
|
90 |
-
<p>Durood E Muqaddas can be recited at any time and on any occasion, as it is a form of worship and remembrance of Allah and His Messenger (peace be upon him). However, there are some times and occasions that are more virtuous and rewarding than others. Some of them are as follows:</p>
|
91 |
-
<ul>
|
92 |
-
<li>The best time to recite Durood E Muqaddas is on Friday, which is the most blessed day of the week. It is also recommended to recite it after Jumu'ah prayer, which is a congregational prayer that Muslims perform on Friday noon.</li>
|
93 |
-
<li>The best occasion to recite Durood E Muqaddas is during Ramadan, which is the most sacred month of the year. It is also recommended to recite it during Laylatul Qadr, which is the night of power that occurs in Ramadan.</li>
|
94 |
-
<li>The best place to recite Durood E Muqaddas is in Makkah or Madinah, which are the holy cities where the Prophet Muhammad (peace be upon him) was born and lived. It is also recommended to recite it in Masjid Al-Haram or Masjid Al-Nabawi, which are the sacred mosques where he prayed and taught.</li>
|
95 |
-
</ul>
|
96 |
-
<h3>The method and frequency of reciting Durood E Muqaddas</h3>
|
97 |
-
<p>The method and frequency of reciting Durood E Muqaddas are as follows:</p>
|
98 |
-
<ul>
|
99 |
-
<li>The method of reciting Durood E Muqaddas is to start with Bismillah-hirRa'hmaan nirRa'heem, which means "In the name of Allah, the Most Gracious, the Most Merciful". Then, one should recite the Arabic text of Durood E Muqaddas as given above. Then, one should end with SallALLAHU Alaihi Wasallam, which means "May Allah send blessings and peace upon him".</li>
|
100 |
-
<li>The frequency of reciting Durood E Muqaddas depends on one's intention and ability. However, it is advisable to recite it as much as possible, especially on Fridays. Some scholars have suggested that one should recite it at least 11 times before sleeping or after waking up. Some have also suggested that one should recite it 1000 times on Friday or during Ramadan.</li>
|
101 |
-
</ul>
|
102 |
-
<h2>How to download Durood E Muqaddas PDF?</h2>
|
103 |
-
<p>Durood E Muqaddas PDF is a digital file that contains the Arabic text, transliteration, translation, benefits, and method of Durood E Muqaddas. It can be downloaded from various online sources and stored on one's computer or mobile device. It can also be printed out or shared with others.</p>
|
104 |
-
<h3>The advantages and disadvantages of downloading Durood E Muqaddas PDF</h3>
|
105 |
-
<p>The advantages and disadvantages of downloading Durood E Muqaddas PDF are as follows:</p>
|
106 |
-
<ul>
|
107 |
-
<li>The advantages of downloading Durood E Muqaddas PDF are that it can help one to learn, memorize, understand, practice, and teach Durood E Muqaddas more easily and conveniently. It can also help one to access Durood E Muqaddas anytime and anywhere without needing a physical book or paper.</li>
|
108 |
-
<li>The disadvantages of downloading Durood E Muqaddas PDF are that it can expose one to potential viruses or malware that may harm one's device or data. It can also cause one to lose respect or reverence for Durood E Muqaddas if one does not handle it properly or carelessly deletes it.</li>
|
109 |
-
</ul>
|
110 |
-
<h3>The sources and links to download Durood E Muqaddas PDF</h3>
|
111 |
-
<p>The sources and links to download Durood E Muqaddas PDF are as follows:</p>
|
112 |
-
<ul>
|
113 |
-
<li>One source to download Durood E Muqaddas PDF is https://www.yaallah.in/durood-e-muqaddas-in-arabic/, which is a website that provides various Islamic resources and information. It has a link to download Durood E Muqaddas PDF in full.</li>
|
114 |
-
<li>Another source to download Durood E Muqaddas PDF is https://archive.org/details/DaroodEMuqaddas/, which is a website that offers free access to millions of books, videos, audio files, software, etc. It has a link to download Durood E Muqaddas PDF by Muhammad Tariq Lahori.</li>
|
115 |
-
and guidance. It has a link to download Durood E Muqaddas PDF in English.</li>
|
116 |
-
</ul>
|
117 |
-
<h3>The tips and precautions to download Durood E Muqaddas PDF</h3>
|
118 |
-
<p>The tips and precautions to download Durood E Muqaddas PDF are as follows:</p>
|
119 |
-
<ul>
|
120 |
-
<li>One should verify the authenticity and reliability of the source and link before downloading Durood E Muqaddas PDF. One should avoid any suspicious or unknown websites that may contain harmful or inappropriate content.</li>
|
121 |
-
<li>One should scan the downloaded file for any viruses or malware that may damage one's device or data. One should use a trusted antivirus software and update it regularly.</li>
|
122 |
-
<li>One should respect and honor Durood E Muqaddas PDF as a sacred and valuable document. One should not delete it without a valid reason or share it with anyone who may misuse it or disrespect it.</li>
|
123 |
-
<li>One should recite Durood E Muqaddas PDF with the intention of pleasing Allah and His Messenger (peace be upon him) and not for any worldly gain or fame. One should also act upon the teachings and guidance of Durood E Muqaddas PDF and not just read it for information or entertainment.</li>
|
124 |
-
</ul>
|
125 |
-
<h2>Conclusion</h2>
|
126 |
-
<p>Durood E Muqaddas is a wonderful and powerful way of sending salutations upon the Prophet Muhammad (peace be upon him). It has many benefits and virtues for the reciter and the receiver in this world and the hereafter. It is also easy and simple to recite and download in PDF format.</p>
|
127 |
-
<h3>A summary of the main points of the article</h3>
|
128 |
-
<p>In this article, we have discussed:</p>
|
129 |
-
<ul>
|
130 |
-
<li>What is Durood E Muqaddas and what does it mean and signify?</li>
|
131 |
-
<li>What is the origin and history of Durood E Muqaddas and who revealed it?</li>
|
132 |
-
<li>What are the benefits and virtues of reciting Durood E Muqaddas and what are some of the narrations that prove them?</li>
|
133 |
-
<li>How to recite Durood E Muqaddas and what are the prerequisites and etiquette that one should observe?</li>
|
134 |
-
<li>How to download Durood E Muqaddas PDF and what are the advantages and disadvantages of doing so?</li>
|
135 |
-
<li>What are the sources and links to download Durood E Muqaddas PDF and how to verify them?</li>
|
136 |
-
<li>What are the tips and precautions to download Durood E Muqaddas PDF and how to respect and honor it?</li>
|
137 |
-
</ul>
|
138 |
-
<h3>A call to action for the readers to recite Durood E Muqaddas</h3>
|
139 |
-
<p>We hope that this article has inspired you to recite Durood E Muqaddas more often and more sincerely. We urge you to make it a habit of reciting it daily, especially on Fridays, during Ramadan, and on other auspicious occasions. We also encourage you to download Durood E Muqaddas PDF from a reliable source and keep it handy on your device or print it out for your convenience. We also request you to share this article with your family and friends who may benefit from it.</p>
|
140 |
-
<h3>A list of 5 FAQs about Durood E Muqaddas</h3>
|
141 |
-
<p>Here are some frequently asked questions about Durood E Muqaddas:</p>
|
142 |
-
<ol>
|
143 |
-
<li>Q: What is the difference between Durood E Muqaddas and other types of Durood?</li>
|
144 |
-
<li>A: All types of Durood are good and praiseworthy, but some have more merits and rewards than others. Durood E Muqaddas is one of the most sacred and comprehensive types of Durood that covers all aspects of the Prophet Muhammad's (peace be upon him) life, personality, qualities, companions, etc.</li>
|
145 |
-
<li>Q: How many times should one recite Durood E Muqaddas?</li>
|
146 |
-
<li>A: There is no fixed number or limit for reciting Durood E Muqaddas, but one should recite it as much as possible, especially on Fridays, during Ramadan, and on other auspicious occasions. Some scholars have suggested that one should recite it at least 11 times before sleeping or after waking up. Some have also suggested that one should recite it 1000 times on Friday or during Ramadan.</li>
|
147 |
-
<li>Q: What are the effects of reciting Durood E Muqaddas on one's life?</li>
|
148 |
-
and His Messenger (peace be upon him), fulfilling one's needs and desires in this world and the hereafter, protecting one from calamities and misfortunes, purifying one's heart and soul from sins and evils, elevating one's rank and status in this world and the hereafter, and bringing one closer to the Prophet Muhammad (peace be upon him) and his intercession on the Day of Judgment.</li>
|
149 |
-
<li>Q: How can one memorize Durood E Muqaddas easily?</li>
|
150 |
-
<li>A: One can memorize Durood E Muqaddas easily by following some tips, such as reading it with understanding and concentration, repeating it with a clear and audible voice, writing it down or typing it on a device, listening to it from a reciter or an audio file, reciting it along with a reciter or an audio file, reviewing it regularly and frequently, and asking Allah for help and guidance.</li>
|
151 |
-
<li>Q: Can one recite Durood E Muqaddas in any language other than Arabic?</li>
|
152 |
-
<li>A: It is preferable and recommended to recite Durood E Muqaddas in Arabic, as it is the original language of revelation and the language of the Prophet Muhammad (peace be upon him). However, if one does not know Arabic or has difficulty in pronouncing it, one can recite it in any other language that one understands and speaks. The main thing is to recite it with sincerity and devotion.</li>
|
153 |
-
</ol>
|
154 |
-
</p> 0a6ba089eb<br />
|
155 |
-
<br />
|
156 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fix Download Nokia X71 Stock Wallpapers 11 Wallpapers (2160 X 2310).md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Nokia X71 Stock Wallpapers: 11 Stunning Images for Your Phone</h1>
|
3 |
-
<p>If you are looking for some fresh and beautiful wallpapers for your phone, you might want to check out the Nokia X71 stock wallpapers. The Nokia X71 is a mid-range smartphone that was launched in April 2019. It features a 6.39-inch display with a resolution of 2,160 x 2,310 pixels and a punch-hole camera. The device also comes with a triple rear camera setup, a Snapdragon 660 processor, 6 GB of RAM, and 128 GB of internal storage.</p>
|
4 |
-
<h2>Download Nokia X71 stock wallpapers: 11 wallpapers (2,160 x 2,310)</h2><br /><p><b><b>Download File</b> ✏ <a href="https://byltly.com/2uKwCL">https://byltly.com/2uKwCL</a></b></p><br /><br />
|
5 |
-
<p>But what makes the Nokia X71 stand out from the crowd is its gorgeous wallpapers. The device comes with 11 stock wallpapers that are colorful, vibrant, and abstract. These wallpapers are perfect for adding some flair and personality to your phone's home screen or lock screen. They also look great on any device with a high-resolution display.</p>
|
6 |
-
<p>If you want to download the Nokia X71 stock wallpapers, you can find them in the link below. The wallpapers are in ZIP format and have a resolution of 2,160 x 2,310 pixels. You can extract them using any file manager app and then apply them as your wallpaper. Alternatively, you can also preview the wallpapers in the gallery below and download them individually.</p>
|
7 |
-
<p>Download Nokia X71 Stock Wallpapers: <a href="https://www.droidviews.com/download-nokia-x71-stock-wallpapers/">Click here</a></p>
|
8 |
-
<p>Preview Nokia X71 Stock Wallpapers:</p>
|
9 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-1.jpg" alt="Nokia X71 Stock Wallpaper 1">
|
10 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-2.jpg" alt="Nokia X71 Stock Wallpaper 2">
|
11 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-3.jpg" alt="Nokia X71 Stock Wallpaper 3">
|
12 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-4.jpg" alt="Nokia X71 Stock Wallpaper 4">
|
13 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-5.jpg" alt="Nokia X71 Stock Wallpaper 5">
|
14 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-6.jpg" alt="Nokia X71 Stock Wallpaper 6">
|
15 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-7.jpg" alt="Nokia X71 Stock Wallpaper 7">
|
16 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-8.jpg" alt="Nokia X71 Stock Wallpaper 8">
|
17 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-9.jpg" alt="Nokia X71 Stock Wallpaper 9">
|
18 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-10.jpg" alt="Nokia X71 Stock Wallpaper 10">
|
19 |
-
<img src="https://www.droidviews.com/wp-content/uploads/2019/04/Nokia-X71-Stock-Wallpapers-11.jpg" alt="Nokia X71 Stock Wallpaper 11">
|
20 |
-
|
21 |
-
<p>I hope you like the Nokia X71 stock wallpapers. If you do, please share this article with your friends and let them know about these amazing images. You can also check out our other articles on stock wallpapers from various devices. Thanks for reading!</p> cec2833e83<br />
|
22 |
-
<br />
|
23 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ghost] KKD Windows 7 V.3 2012 32Bit How to Install and Enjoy this Amazing OS.md
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is Ghost KKD Windows 7 V.3 2012 32Bit?</h1>
|
3 |
-
<p>If you are looking for a fast, stable and easy-to-use version of Windows 7, you may want to check out <strong>Ghost KKD Windows 7 V.3</strong>. This is a modified version of Windows 7 that has been optimized for performance and convenience by a Thai developer named KKD.</p>
|
4 |
-
<p>Ghost KKD Windows 7 V.3 is a <strong>ghost file</strong>, which means that it is an image of an operating system that can be copied to another computer without installation. This saves you time and hassle when setting up your new system.</p>
|
5 |
-
<h2>Ghost] KKD Windows 7 V.3 2012 32Bit</h2><br /><p><b><b>DOWNLOAD</b> ———>>> <a href="https://byltly.com/2uKzAK">https://byltly.com/2uKzAK</a></b></p><br /><br />
|
6 |
-
<p>Ghost KKD Windows 7 V.3 comes with many features that make it stand out from other versions of Windows 7, such as:</p>
|
7 |
-
<ul>
|
8 |
-
<li>It is based on Windows 7 Ultimate SP1 x86 (32-bit), which is compatible with most hardware and software.</li>
|
9 |
-
<li>It has been updated with the latest security patches and drivers as of November 2012.</li>
|
10 |
-
<li>It has been tweaked for speed and stability, removing unnecessary components and services.</li>
|
11 |
-
<li>It has been pre-installed with many useful programs and games, such as Microsoft Office, Adobe Photoshop, WinRAR, Google Chrome, etc.</li>
|
12 |
-
<li>It has been customized with a beautiful theme and icons, giving it a fresh and modern look.</li>
|
13 |
-
</ul>
|
14 |
-
<p>In this article, we will show you how to download, install and use Ghost KKD Windows 7 V.3 on your computer.</p>
|
15 |
-
<p>Ghost KKD Windows 7 Ultimate v.3 x86bit download<br />
|
16 |
-
How to install Ghost KKD Windows 7 V.3 Rev1 32Bit<br />
|
17 |
-
Ghost KKD Windows 7 V.3 2012 32Bit free download<br />
|
18 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program<br />
|
19 |
-
Ghost KKD Windows 7 V.3 32Bit password clubzeed.net<br />
|
20 |
-
Ghost KKD Windows 7 V.3 2012 32Bit ISO file<br />
|
21 |
-
Ghost KKD Windows 7 V.3 Rev1 32Bit Google Drive<br />
|
22 |
-
Ghost KKD Windows 7 V.3 2012 32Bit review<br />
|
23 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program blogspot<br />
|
24 |
-
Ghost KKD Windows 7 V.3 2012 32Bit system requirements<br />
|
25 |
-
Ghost KKD Windows 7 V.3 Rev1 32Bit storage.3bb.co.th<br />
|
26 |
-
Ghost KKD Windows 7 V.3 2012 32Bit activation key<br />
|
27 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program dfcolo<br />
|
28 |
-
Ghost KKD Windows 7 V.3 2012 32Bit serial number<br />
|
29 |
-
Ghost KKD Windows 7 V.3 Rev1 32Bit crack<br />
|
30 |
-
Ghost KKD Windows 7 V.3 2012 32Bit full version<br />
|
31 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program oswin4you.com<br />
|
32 |
-
Ghost KKD Windows 7 V.3 2012 32Bit product key<br />
|
33 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program vdocuments.mx<br />
|
34 |
-
Ghost KKD Windows 7 V.3 2012 32Bit patch<br />
|
35 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program banung<br />
|
36 |
-
Ghost KKD Windows 7 V.3 2012 32Bit license key<br />
|
37 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program supersphit@jokergameth.com<br />
|
38 |
-
Ghost KKD Windows 7 V.3 2012 32Bit keygen<br />
|
39 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program admin adminoffline<br />
|
40 |
-
Ghost KKD Windows 7 V.3 Rev1 All Driver All Program kerdoos-academie.com<br />
|
41 |
-
How to ghost windows with ghost kkd windows v.3 rev1 all driver all program?<br />
|
42 |
-
What are the advantages of ghost kkd windows v.3 rev1 all driver all program?<br />
|
43 |
-
How to update ghost kkd windows v.3 rev1 all driver all program?<br />
|
44 |
-
How to uninstall ghost kkd windows v.3 rev1 all driver all program?<br />
|
45 |
-
How to backup ghost kkd windows v.3 rev1 all driver all program?<br />
|
46 |
-
How to restore ghost kkd windows v.3 rev1 all driver all program?<br />
|
47 |
-
How to fix ghost kkd windows v.3 rev1 all driver all program errors?<br />
|
48 |
-
How to optimize ghost kkd windows v.3 rev1 all driver all program performance?<br />
|
49 |
-
How to customize ghost kkd windows v.3 rev1 all driver all program settings?<br />
|
50 |
-
How to troubleshoot ghost kkd windows v.3 rev1 all driver all program issues?<br />
|
51 |
-
How to secure ghost kkd windows v.3 rev1 all driver all program data?<br />
|
52 |
-
How to recover ghost kkd windows v.3 rev1 all driver all program files?<br />
|
53 |
-
How to migrate ghost kkd windows v.3 rev1 all driver all program to another PC?<br />
|
54 |
-
How to clone ghost kkd windows v.3 rev1 all driver all program disk?<br />
|
55 |
-
How to create a bootable USB with ghost kkd windows v.3 rev1 all driver all program?<br />
|
56 |
-
How to burn ghost kkd windows v.3 rev1 all driver all program to a DVD?<br />
|
57 |
-
How to use ghost kkd windows v.3 rev1 all driver all program with virtual machines?<br />
|
58 |
-
How to dual boot ghost kkd windows v.3 rev1 all driver all program with another OS?<br />
|
59 |
-
How to upgrade ghost kkd windows v.3 rev1 all driver all program to a newer version?<br />
|
60 |
-
How to downgrade ghost kkd windows v.3 rev1 all driver all program to an older version?<br />
|
61 |
-
How to install additional software on ghost kkd windows v.3 rev1 all driver all program?<br />
|
62 |
-
How to remove unwanted software from ghost kkd windows v.3 rev1 all driver all program?<br />
|
63 |
-
How to change the language of ghost kkd windows v.3 rev1 all driver all program?<br />
|
64 |
-
How to activate the hidden features of ghost kkd windows v.3 rev1 all driver all program?</p>
|
65 |
-
<h2>Why choose Ghost KKD Windows 7 V.3?</h2>
|
66 |
-
<p>You may be wondering why you should choose Ghost KKD Windows 7 V.3 over other versions of Windows 7 or other operating systems. Here are some reasons why:</p>
|
67 |
-
<ul>
|
68 |
-
<li><strong>It is fast</strong>: Ghost KKD Windows 7 V.3 has been optimized for speed by removing unnecessary components and services that slow down your system. It also has been tweaked for better performance and responsiveness.</li>
|
69 |
-
<li><strong>It is stable</strong>: Ghost KKD Windows 7 V.3 has been updated with the latest security patches and drivers that fix bugs and vulnerabilities in the original version of Windows 7. It also has been tested for compatibility and reliability with various hardware and software.</li>
|
70 |
-
<li><strong>It is easy-to-use</strong>: Ghost KKD Windows 7 V.3 has been pre-installed with many useful programs and games that you can use right away without installation or activation. It also has been customized with a user-friendly interface that makes it easy to navigate and operate.</li>
|
71 |
-
<li><strong>It is free</strong>: Ghost KKD Windows 7 V.3 is available for free download from the official website or other sources on the internet. You do not need to pay anything to use this product.</li>
|
72 |
-
</ul>
|
73 |
-
<p>Of course, Ghost KKD Windows 7 V.3 is not perfect and it may have some drawbacks as well. We will discuss them later in this article.</p>
|
74 |
-
<h2>How to download and install Ghost KKD Windows 7 V.3?</h2>
|
75 |
-
<p>To use Ghost KKD Windows 7 V.3 on your computer, you need to download the ghost file from the internet and then copy it to your hard drive using a bootable USB or DVD.</p>
|
76 |
-
<h3>How to download Ghost KKD Windows 7 V.3?</h3>
|
77 |
-
<p>The ghost file of Ghost KKD Windows <p>If you have any questions or feedback about Ghost KKD Windows 7 V.3 or this article, please feel free to leave a comment below. We would love to hear from you.</p>
|
78 |
-
<h1>FAQs</h1>
|
79 |
-
<p>Here are some frequently asked questions and answers about Ghost KKD Windows 7 V.3:</p>
|
80 |
-
<h4>Q: Where can I download Ghost KKD Windows 7 V.3?</h4>
|
81 |
-
<p>A: You can download Ghost KKD Windows 7 V.3 from the official website or other sources on the internet, such as forums or blogs. However, you should always be careful and scan the file with an antivirus program before using it.</p>
|
82 |
-
<h4>Q: What is the difference between Ghost KKD Windows 7 V.3 and other versions of Windows 7?</h4>
|
83 |
-
<p>A: Ghost KKD Windows 7 V.3 is a modified version of Windows 7 that has been optimized for speed, stability and convenience by a Thai developer named KKD. It is a ghost file that can be copied to another computer without installation using a bootable USB or DVD. It has been updated with the latest security patches and drivers as of November 2012. It has been pre-installed with many useful programs and games. It has been customized with a beautiful theme and icons.</p>
|
84 |
-
<h4>Q: Is Ghost KKD Windows 7 V.3 legal?</h4>
|
85 |
-
<p>A: No, Ghost KKD Windows 7 V.3 is not legal. It is a pirated version of Windows 7 that may violate some copyright or licensing terms of Microsoft or other software vendors. You may be liable for legal consequences if you use this product without a valid license key.</p>
|
86 |
-
<h4>Q: Is Ghost KKD Windows 7 V.3 safe?</h4>
|
87 |
-
<p>A: No, Ghost KKD Windows 7 V.3 is not safe. It may contain some malware or spyware that can harm your computer or compromise your privacy. You should always scan the file with an antivirus program before using it.</p>
|
88 |
-
<h4>Q: Is Ghost KKD Windows 7 V.3 compatible?</h4>
|
89 |
-
<p>A: No, Ghost KKD Windows 7 V.3 may not be compatible with some newer hardware or software that require newer versions of Windows or drivers. You may encounter some errors or glitches when using this product.</p>
|
90 |
-
<h4></h4></p> 0a6ba089eb<br />
|
91 |
-
<br />
|
92 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Illustrator CS4 (Multilingual - Windows) - Crack LINK Only [RH] Full Version.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download and Install Adobe Illustrator CS4 for Free</h1>
|
3 |
-
<p>Adobe Illustrator CS4 is a powerful vector graphics software that allows you to create stunning illustrations, logos, and graphics. It has many features and tools that can help you unleash your creativity and enhance your productivity. However, Adobe Illustrator CS4 is not available for purchase anymore, as it has been replaced by newer versions. So, how can you get Adobe Illustrator CS4 for free?</p>
|
4 |
-
<p>One way is to download a crack version of the software from the internet. A crack version is a modified version that bypasses the activation process and lets you use the software without paying for it. However, this method is illegal and risky, as it may expose your computer to viruses, malware, or other threats. Moreover, you may not get the full functionality and performance of the original software, as it may have bugs, errors, or missing features.</p>
|
5 |
-
<h2>Adobe Illustrator CS4 (Multilingual - Windows) - Crack only [RH] full version</h2><br /><p><b><b>Download</b> ✒ <a href="https://imgfil.com/2uy0Rh">https://imgfil.com/2uy0Rh</a></b></p><br /><br />
|
6 |
-
<p>A better way is to download a free trial version of Adobe Illustrator CS4 from the official website of Adobe. A free trial version is a legitimate version that lets you use the software for a limited time (usually 30 days) without paying for it. This way, you can test the software and see if it meets your needs and expectations. Moreover, you can enjoy the full functionality and performance of the original software, as it is not modified or tampered with.</p>
|
7 |
-
<p>To download a free trial version of Adobe Illustrator CS4, follow these steps:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Go to <a href="https://www.adobe.com/products/illustrator.html">https://www.adobe.com/products/illustrator.html</a> and click on "Free Trial".</li>
|
10 |
-
<li>Sign in with your Adobe ID or create one if you don't have one.</li>
|
11 |
-
<li>Select your platform (Windows or Mac OS) and language.</li>
|
12 |
-
<li>Click on "Download Now" and wait for the file to download.</li>
|
13 |
-
<li>Locate the downloaded file and double-click on it to start the installation process.</li>
|
14 |
-
<li>Follow the on-screen instructions to complete the installation.</li>
|
15 |
-
<li>Launch Adobe Illustrator CS4 and enjoy your free trial.</li>
|
16 |
-
</ol>
|
17 |
-
<p>Note that you will need an internet connection to activate your free trial. Also, you will need to uninstall any previous versions of Adobe Illustrator before installing Adobe Illustrator CS4.</p>
|
18 |
-
<p>If you like Adobe Illustrator CS4 and want to continue using it after your free trial expires, you will need to buy a license from Adobe or find an authorized reseller. You can also upgrade to a newer version of Adobe Illustrator if you want to access more features and tools.</p>
|
19 |
-
<p>Adobe Illustrator CS4 is a great software for vector graphics design that can help you create amazing artworks. However, downloading a crack version of it is not recommended, as it may harm your computer and violate the law. Instead, you should download a free trial version from the official website of Adobe and enjoy it legally and safely.</p><p>Adobe Illustrator CS4 can help you create a variety of illustrations, from simple icons and logos to complex scenes and characters. You can use the tools and features of Adobe Illustrator CS4 to draw, color, shape, and transform your vector graphics. You can also apply effects, filters, gradients, patterns, and textures to your illustrations to make them more realistic and appealing.</p>
|
20 |
-
<p>If you want to learn how to use Adobe Illustrator CS4 to create different types of illustrations, you can check out some of the tutorials available online. For example, you can visit <a href="https://design.tutsplus.com/tutorials/101-adobe-illustrator-tutorials--cms-29782">https://design.tutsplus.com/tutorials/101-adobe-illustrator-tutorials--cms-29782</a> and find 101 awesome Adobe Illustrator tutorials that cover various topics and techniques[^1^]. You can learn how to create illustrations such as:</p>
|
21 |
-
<p></p>
|
22 |
-
<ul>
|
23 |
-
<li>A cute cartoon character</li>
|
24 |
-
<li>A realistic portrait</li>
|
25 |
-
<li>A vintage poster</li>
|
26 |
-
<li>A geometric pattern</li>
|
27 |
-
<li>A 3D logo</li>
|
28 |
-
<li>A vector map</li>
|
29 |
-
<li>A retro badge</li>
|
30 |
-
<li>A realistic flower</li>
|
31 |
-
<li>A stylized animal</li>
|
32 |
-
<li>A seamless wallpaper</li>
|
33 |
-
</ul>
|
34 |
-
<p>These tutorials are suitable for beginners and intermediate users of Adobe Illustrator CS4. They will guide you step by step through the process of creating your own illustrations. You will also learn some tips and tricks that can help you improve your skills and workflow.</p>
|
35 |
-
<p>Adobe Illustrator CS4 is a versatile and powerful software that can help you express your creativity and vision through vector graphics. You can create illustrations for various purposes and platforms, such as web design, print design, animation, games, and more. By following some of the tutorials available online, you can learn how to use Adobe Illustrator CS4 effectively and efficiently.</p> d5da3c52bf<br />
|
36 |
-
<br />
|
37 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/EBP.Devis.et.Facturation.2010 Avc VERIFIED Crack.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>EBP.Devis.et.Facturation.2010 avc crack</h2><br /><p><b><b>Download File</b> ☑ <a href="https://imgfil.com/2uxZsx">https://imgfil.com/2uxZsx</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Ebp gestion commerciale pro crack - Google Документи. Les articles seront ... des charges. Télécharger EBP Devis Facturation Pratic avec activation ... Télécharger EBP Gamme Classic 2009 et Pack de gestion 2010 Pro V14 – FR. Cette offre ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/EmailHackerv346activationcod.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
|
2 |
-
<p> chesstin f4bc01c98b gorhil says: at 2:14 am. at 10:00 am Reply. sandei cceab18d79 Results 1 - 36 of 55. 26/google-maps-con-relieve/ . Download EmailHackerv346activationcod 6 hack the moon[/caption] 1. Uncategorized. </p>
|
3 |
-
<h2>EmailHackerv346activationcod</h2><br /><p><b><b>DOWNLOAD</b> ✅ <a href="https://imgfil.com/2uy0P8">https://imgfil.com/2uy0P8</a></b></p><br /><br />
|
4 |
-
<p> jannflaw. at 8:52 pm. jannflaw 6f5222a214 chesstin f4bc01c98b gorhil says: at 2:14 am. at 10:00 am Reply. sandei cceab18d79 Results 1 - 36 of 55. 26/google-maps-con-relieve/ . Download EmailHackerv346activationcod 6 hack the moon[/caption] 1. Uncategorized. </p>
|
5 |
-
<p>https://www.siriusarchitects.com/advert/EmailHackerv346activationcod-best/ https://www.siriusarchitects.com/advert/EmailHackerv346activationcod-best/. EmailHackerv346activationcod jannflaw. at 6:56 am. jannflaw 6f5222a214 https://cdn.thingiverse.com/assets/45/37/31/ec/f1/free-t-splines-protractor-train-r-7450.html </p>
|
6 |
-
<p>https://businessbooster.page/ https://drezau.com/htpc/images_blank</>_attachments</>_attachments/2022/3/19/33/4/4..html emailhackerv346activationcod 1.1. emailhackerv346activationcod 2.0. emailhackerv346activationcod 2.1. </p>
|
7 |
-
<p>https://trello.com/c/MA9cR4gG/ EmailHackerv346activationcod in Gamegate, vous avez 5 niveaux! (https://trello.com/c/kjQ2xCnG/ OnlineGoldBytes, (https://trello.com/c/VtTWxz9Q/06-online-game-of-dice-game-download-v0.8) </p>
|
8 |
-
<p>https://trello.com/c/ejQrAHsM/59-emailhackerv346activationcod-fake-email-address-h-t-t- https://coub.com/stories/3375185-emailhackerv346activationcod https://trello.com/c/cvr5lVko/42-emailhackerv346activationcod </p>
|
9 |
-
<p></p>
|
10 |
-
<p>https://assets.paypos.com/emailhackerv346activationcod? EmailHackerv346activationcod https://www.herkules.co.uk/2018/04/27/5w3pfutgplq9dndqajjp25ytj1t3lqwh/a_tj.pdf EmailHackerv346activationcod. 123sf at 12:27 pm. </p> 899543212b<br />
|
11 |
-
<br />
|
12 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become a Famous Parkour Streamer with Rysen Dawn APK from Uptodown.md
DELETED
@@ -1,156 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Rysen Dawn: A Spectacular Parkour Game for Android</h1>
|
3 |
-
<p>If you are a fan of parkour, action, and adventure games, you might want to check out Rysen Dawn, a 3D platform game where you play as a live streamer who does parkour. In this game, you have to show your best parkour moves to your in-game followers, while climbing buildings, jumping over obstacles, sliding under pipes, and more. You can also customize your character's appearance, listen to your favorite music, use photo mode, and earn money from sponsors. In this article, we will tell you everything you need to know about Rysen Dawn, including its features, gameplay, graphics, sound, download, install, and more.</p>
|
4 |
-
<h2>What is Rysen Dawn?</h2>
|
5 |
-
<p>Rysen Dawn is a 3D action and platform game developed by R-USER Games and yPER Studios. It was released on October 1, 2022 for Android devices. The game belongs to the genre of parkour, which is a form of urban movement that involves running, jumping, climbing, rolling, and other acrobatic skills. The game is inspired by real-life parkour athletes and streamers, such as David Belle, Ryan Doyle, Jesse La Flair, Storror, etc.</p>
|
6 |
-
<h2>rysen dawn apk uptodown</h2><br /><p><b><b>Download File</b> ->->->-> <a href="https://urlin.us/2uT194">https://urlin.us/2uT194</a></b></p><br /><br />
|
7 |
-
<h3>Features and Highlights</h3>
|
8 |
-
<p>Rysen Dawn has many features and highlights that make it stand out from other parkour games. Some of them are:</p>
|
9 |
-
<ul>
|
10 |
-
<li><strong>Next-Gen Mobile graphics</strong>: The game has stunning graphics that create realistic and immersive environments. You can see the details of the buildings, objects, shadows, reflections, textures, etc. The game also has dynamic weather effects, such as rain, fog, snow, etc.</li>
|
11 |
-
<li><strong>Comfortable on-screen controllers</strong>: The game has intuitive and easy-to-use controls that are perfect for touch screens. You can move your character with your left thumb, control the camera with your right thumb, and perform actions with buttons on the right side of the screen. You can also adjust the sensitivity and size of the controls in the settings.</li>
|
12 |
-
<li><strong>NPC react to your emotes</strong>: The game has non-player characters (NPCs) that populate the scenarios. They react to your emotes, such as waving, dancing, laughing, etc. You can interact with them and make them follow you or join your live stream.</li>
|
13 |
-
<li><strong>Listen to your favourite music using in-game emote system</strong>: The game has an in-game emote system that allows you to listen to your favourite music while playing. You can load your own music files (mp3 format) in a specific folder on your device or use the default music provided by the game. You can also use headphones to enhance the sound quality.</li>
|
14 |
-
<li><strong>You can Dance & do simple parkour trick using in-game emote system</strong>: The game has an in-game emote system that allows you to dance and do simple parkour tricks using buttons on the right side of the screen. You can choose from different dance styles and parkour moves, such as breakdance, hip hop, backflip, frontflip, etc.</li>
|
15 |
-
<li><strong>Photo mode</strong>: The game has a photo mode that allows you to take screenshots of your character and the scenery. You can access the photo mode by tapping the camera icon on the top left corner of the screen. You can adjust the angle, zoom, filter, frame, etc. of your photo. You can also share your photos with your friends or on social media.</li>
|
16 |
-
<li><strong>Live stream system</strong>: The game has a live stream system that simulates a real-life streaming platform. You can see the number of followers, likes, and comments that you get from your in-game audience. You can also earn money from sponsors and donations. You can use the money to buy new accessories, headphones, clothes, etc. for your character.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to Play Rysen Dawn?</h2>
|
19 |
-
<p>Rysen Dawn is a game that requires skill, timing, and creativity. You have to control Rysen, a young parkour enthusiast who wants to become a famous live streamer. You have to perform parkour moves in different scenarios, such as rooftops, streets, parks, etc. You have to avoid falling or crashing into obstacles, such as walls, fences, cars, etc. You have to impress your followers and sponsors with your style and tricks.</p>
|
20 |
-
<h3>Controls and Moves</h3>
|
21 |
-
<p>The game has simple and intuitive controls that are suitable for touch screens. You can move Rysen with your left thumb using a virtual joystick. You can control the camera with your right thumb by swiping on the screen. You can perform actions with buttons on the right side of the screen. The buttons are:</p>
|
22 |
-
<ul>
|
23 |
-
<li><strong>Jump</strong>: This button allows you to jump over gaps or obstacles. You can also use it to climb walls or ledges by holding it near them.</li>
|
24 |
-
<li><strong>Slide</strong>: This button allows you to slide under pipes or low barriers. You can also use it to roll on the ground after landing from a high jump.</li>
|
25 |
-
<li><strong>Grab</strong>: This button allows you to grab poles or rails and swing on them. You can also use it to hang from ledges or edges.</li>
|
26 |
-
<li><strong>Emote</strong>: This button allows you to access the emote menu, where you can choose from different emotes, such as dance, music, trick, etc.</li>
|
27 |
-
</ul>
|
28 |
-
<p>You can combine these buttons with different directions to perform different parkour moves, such as wall run, vault, spin, flip, etc. You can also use the environment to your advantage, such as bouncing on trampolines, sliding on cables, jumping on cars, etc.</p>
|
29 |
-
<h3>Modes and Levels</h3>
|
30 |
-
<p>The game has different modes and levels that offer different challenges and rewards. The modes are:</p>
|
31 |
-
<ul>
|
32 |
-
<li><strong>Career mode</strong>: This is the main mode of the game, where you follow Rysen's story and progress through different levels. Each level has a different location, objective, and difficulty. You have to complete the objective within a time limit and without losing all your health. The objective can be reaching a destination, collecting items, performing tricks, etc. You can earn money and followers by completing the levels.</li>
|
33 |
-
<li><strong>Free mode</strong>: This is a mode where you can explore the scenarios freely and practice your parkour skills. There is no time limit or objective in this mode. You can also use the photo mode and the emote system in this mode.</li>
|
34 |
-
<li><strong>Challenge mode</strong>: This is a mode where you can compete with other players online or offline in different challenges. The challenges can be racing, tricking, escaping, etc. You can earn money and followers by winning the challenges.</li>
|
35 |
-
</ul>
|
36 |
-
<h2>How Does Rysen Dawn Look and Sound?</h2>
|
37 |
-
<p>Rysen Dawn is a game that has impressive graphics and sound that create a realistic and immersive experience for the players. The game has high-quality 3D models and textures that show the details of the characters and the environments. The game also has dynamic lighting and shadows that change according to the time of day and weather conditions. The game has realistic physics and animations that make the parkour moves look smooth and natural.</p>
|
38 |
-
<h3>Customization and Music</h3>
|
39 |
-
<p>The game allows you to customize Rysen's appearance and music according to your preferences. You can change Rysen's hair style, clothes, shoes, accessories, headphones, etc. using the money that you earn from the live stream system. You can also choose from different colors and styles for each item. You can see how Rysen looks in the customization menu or in the photo mode.</p>
|
40 |
-
<p>The game also allows you to listen to your favorite music while playing. You can load your own music files (mp3 format) in a specific folder on your device or use the default music provided by the game. You can also use headphones to enhance the sound quality. You can access the music menu by using the emote system and selecting the music icon. You can see the name and artist of the song that is playing on the top right corner of the screen.</p>
|
41 |
-
<p>rysen dawn android game download<br />
|
42 |
-
rysen dawn parkour game apk<br />
|
43 |
-
rysen dawn apk latest version<br />
|
44 |
-
rysen dawn apk free download uptodown<br />
|
45 |
-
rysen dawn 3d action platformer<br />
|
46 |
-
rysen dawn streamer parkour game<br />
|
47 |
-
rysen dawn apk v1.36<br />
|
48 |
-
rysen dawn apk mod unlimited money<br />
|
49 |
-
rysen dawn apk offline<br />
|
50 |
-
rysen dawn apk for pc<br />
|
51 |
-
rysen dawn apk obb download<br />
|
52 |
-
rysen dawn uptodown app store<br />
|
53 |
-
rysen dawn game review<br />
|
54 |
-
rysen dawn game tips and tricks<br />
|
55 |
-
rysen dawn game cheats and hacks<br />
|
56 |
-
rysen dawn game walkthrough<br />
|
57 |
-
rysen dawn game trailer<br />
|
58 |
-
rysen dawn game size and requirements<br />
|
59 |
-
rysen dawn game update and patch notes<br />
|
60 |
-
rysen dawn game features and gameplay<br />
|
61 |
-
rysen dawn game graphics and sound quality<br />
|
62 |
-
rysen dawn game controls and settings<br />
|
63 |
-
rysen dawn game accessories and customization<br />
|
64 |
-
rysen dawn game levels and scenarios<br />
|
65 |
-
rysen dawn game followers and likes system<br />
|
66 |
-
rysen dawn game similar games and alternatives<br />
|
67 |
-
rysen dawn game download link uptodown<br />
|
68 |
-
how to install rysen dawn apk from uptodown<br />
|
69 |
-
how to play rysen dawn apk on android<br />
|
70 |
-
how to update rysen dawn apk from uptodown<br />
|
71 |
-
how to uninstall rysen dawn apk from android<br />
|
72 |
-
how to backup and restore rysen dawn apk data<br />
|
73 |
-
how to fix rysen dawn apk errors and issues<br />
|
74 |
-
how to contact rysen dawn apk developer R-user games<br />
|
75 |
-
how to rate and review rysen dawn apk on uptodown<br />
|
76 |
-
how to share and recommend rysen dawn apk to friends<br />
|
77 |
-
how to stream and record rysen dawn apk gameplay<br />
|
78 |
-
how to earn money and rewards from playing rysen dawn apk<br />
|
79 |
-
how to join and create a community for rysen dawn apk fans<br />
|
80 |
-
how to support and donate to rysen dawn apk developer R-user games</p>
|
81 |
-
<h3>Photo Mode and Sharing</h3>
|
82 |
-
<p>The game has a photo mode that allows you to take screenshots of your character and the scenery. You can access the photo mode by tapping the camera icon on the top left corner of the screen. You can adjust the angle, zoom, filter, frame, etc. of your photo. You can also share your photos with your friends or on social media.</p>
|
83 |
-
<p>The game also has a sharing feature that allows you to share your live stream videos or your achievements with your friends or on social media. You can access the sharing feature by tapping the share icon on the top right corner of the screen. You can choose from different options, such as Facebook, Twitter, Instagram, YouTube, etc.</p>
|
84 |
-
<h2>How to Download and Install Rysen Dawn?</h2>
|
85 |
-
<p>Rysen Dawn is a game that is available for Android devices. You can download and install it using different methods, such as Uptodown app store or XAPK file. Here are the steps for each method:</p>
|
86 |
-
<h3>Uptodown App Store</h3>
|
87 |
-
<p>Uptodown is an app store that offers a variety of apps and games for Android devices. You can download and install Rysen Dawn using Uptodown app store by following these steps:</p>
|
88 |
-
<ol>
|
89 |
-
<li>Download and install Uptodown app store on your device from <a href="">https://uptodown-android.en.uptodown.com/android</a>.</li>
|
90 |
-
<li>Open Uptodown app store and search for Rysen Dawn in the search bar.</li>
|
91 |
-
<li>Select Rysen Dawn from the search results and tap on Download.</li>
|
92 |
-
<li>Wait for the download to finish and tap on Install.</li>
|
93 |
-
<li>Allow Uptodown app store to install unknown apps if prompted.</li>
|
94 |
-
<li>Wait for the installation to finish and tap on Open.</li>
|
95 |
-
<li>Enjoy playing Rysen Dawn on your device.</li>
|
96 |
-
</ol>
|
97 |
-
<h3>XAPK File</h3>
|
98 |
-
<p>XAPK is a file format that contains both APK (application package) and OBB (data file) for Android apps and games. You can download and install Rysen Dawn using XAPK file by following these steps:</p>
|
99 |
-
<ol>
|
100 |
-
<li>Download Rysen Dawn XAPK file from <a href="">https://apkpure.com/rysen-dawn/com.RUSERGames.RysenDawn</a>.</li>
|
101 |
-
<li>Download and install APKPure app on your device from <a href="">https://apkpure.com/apkpure-app.html</a>.</li>
|
102 |
-
<li>Open APKPure app and tap on Install XAPK.</li>
|
103 |
-
<li>Select Rysen Dawn XAPK file from your device storage and tap on Install.</li>
|
104 |
-
<li>Wait for the installation to finish and tap on Open.</li>
|
105 |
-
<li>Enjoy playing Rysen Dawn on your device.</li>
|
106 |
-
</ol>
|
107 |
-
<h3>Requirements and Permissions</h3>
|
108 |
-
<p>Rysen Dawn is a game that requires some minimum requirements and permissions to run smoothly on your device. The requirements are:</p>
|
109 |
-
<ul>
|
110 |
-
<li><strong>Android version</strong>: 5.0 or higher</li>
|
111 |
-
<li><strong>RAM</strong>: 2 GB or higher</li>
|
112 |
-
<li><strong>Storage space</strong>: 500 MB or higher</li>
|
113 |
-
</ul>
|
114 |
-
<p>The permissions are:</p>
|
115 |
-
<ul>
|
116 |
-
<li><strong>Photos/Media/Files</strong>: This permission is required to access your music files and save your screenshots.</li>
|
117 |
-
<li><strong>Wi-Fi connection information</strong>: This permission is required to connect to online servers and play online challenges.</li>
|
118 |
-
<li><strong>View network connections</strong>: This permission is required to check your network status and optimize your gameplay.</li>
|
119 |
-
<li><strong>Full network access</strong>: This permission is required to access online features, such as live stream system, sharing feature, etc.</li>
|
120 |
-
<li><strong>Prevent device from sleeping</strong>: This permission is required to keep the game running smoothly and prevent interruptions.</li>
|
121 |
-
</ul>
|
122 |
-
<h2>Conclusion</h2>
|
123 |
-
<p>Rysen Dawn is a game that offers a spectacular parkour experience for Android devices. The game has amazing graphics, sound, gameplay, features, and more. You can play as Rysen, a live streamer who does parkour in different scenarios. You can customize your character, listen to your music, use photo mode, earn money and followers, and more. You can also download and install the game easily using Uptodown app store or XAPK file. The game requires some minimum requirements and permissions to run smoothly on your device.</p>
|
124 |
-
<p>In conclusion, Rysen Dawn is a game that you should definitely try if you love parkour, action, and adventure games. The game is fun, challenging, and immersive. You can enjoy the game solo or with other players online. You can also share your videos and photos with your friends or on social media. The game is rated 4.5 out of 5 stars on Uptodown app store and has over 10 million downloads. You can download the game for free from <a href="">https://rysen-dawn.en.uptodown.com/android</a> or <a href="">https://apkpure.com/rysen-dawn/com.RUSERGames.RysenDawn</a>.</p>
|
125 |
-
<h2>FAQs</h2>
|
126 |
-
<p>Here are some frequently asked questions about Rysen Dawn:</p>
|
127 |
-
<ol>
|
128 |
-
<li><strong>What is the difference between Rysen Dawn and other parkour games?</strong></li>
|
129 |
-
<p>Rysen Dawn is different from other parkour games in many ways. Some of them are:</p>
|
130 |
-
<ul>
|
131 |
-
<li>Rysen Dawn has a live stream system that simulates a real-life streaming platform. You can see the number of followers, likes, and comments that you get from your in-game audience. You can also earn money from sponsors and donations.</li>
|
132 |
-
<li>Rysen Dawn has an in-game emote system that allows you to listen to your favorite music, dance, and do simple parkour tricks using buttons on the right side of the screen.</li>
|
133 |
-
<li>Rysen Dawn has a photo mode that allows you to take screenshots of your character and the scenery. You can also share your photos with your friends or on social media.</li>
|
134 |
-
<li>Rysen Dawn has realistic physics and animations that make the parkour moves look smooth and natural.</li>
|
135 |
-
<li>Rysen Dawn has dynamic weather effects, such as rain, fog, snow, etc.</li>
|
136 |
-
</ul>
|
137 |
-
<li><strong>How to unlock new accessories and headphones in the game?</strong></li>
|
138 |
-
<p>You can unlock new accessories and headphones in the game by using the money that you earn from the live stream system. You can access the customization menu by tapping the hanger icon on the top left corner of the screen. You can see the prices and styles of each item in the menu. You can also preview how they look on your character before buying them.</li>
|
139 |
-
<li><strong>How to load your own music in the game?</strong></li>
|
140 |
-
<p>You can load your own music in the game by using the in-game emote system. You have to place your music files (mp3 format) in a specific folder on your device storage. The folder name is "RysenDawnMusic". You can create this folder manually or use a file manager app to do so. Once you have placed your music files in this folder, you can access them by using the emote system and selecting the music icon. You can see the list of songs that are available in this menu.</li>
|
141 |
-
<li><strong>How to fix black screen or other issues in the game?</strong></li>
|
142 |
-
<p>If you encounter any issues in the game, such as black screen, lagging, crashing, etc., you can try these solutions:</p>
|
143 |
-
<ul>
|
144 |
-
<li>Make sure that your device meets the minimum requirements and permissions for the game.</li>
|
145 |
-
<li>Make sure that you have enough storage space on your device for the game.</li>
|
146 |
-
<li>Make sure that you have a stable internet connection for online features.</li>
|
147 |
-
<li>Clear the cache and data of the game from your device settings.</li>
|
148 |
-
<li>Reinstall the game from Uptodown app store or XAPK file.</li>
|
149 |
-
<li>Contact the developers at <a href="">rusergames@gmail.com</a> or <a href="">yperstudios@gmail.com</a> for further assistance.</li>
|
150 |
-
</ul>
|
151 |
-
<li><strong>Is Rysen Dawn available for PC or other platforms?</strong></li>
|
152 |
-
<p>Rysen Dawn is currently only available for Android devices. However, the developers have stated that they are working on making it available for PC and other platforms in the future. You can follow their official social media accounts for updates and news about the game. You can also use an emulator to play the game on your PC, but this is not recommended by the developers.</li>
|
153 |
-
</ol>
|
154 |
-
<p>I hope you enjoyed reading this article and learned something new about Rysen Dawn. If you have any questions or comments, feel free to leave them below. I would love to hear from you. Thank you for your time and attention.</p> 197e85843d<br />
|
155 |
-
<br />
|
156 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cmo usar SnapTube para Android Descarga vdeos de YouTube Instagram y ms con un solo clic.md
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Snaptube Download 2021 APK: How to Download Videos and Music from Any Platform</h1>
|
3 |
-
<p>Do you want to download videos and music from your favorite platforms and apps like YouTube, Instagram, WhatsApp, TikTok, and more? Do you want to convert them into MP3 format and listen to them offline? Do you want to access unlimited content without any restrictions or ads? If you answered yes to any of these questions, then you need Snaptube.</p>
|
4 |
-
<h2>snaptube download 2021 apk</h2><br /><p><b><b>Download Zip</b> ✦✦✦ <a href="https://urlin.us/2uSRNb">https://urlin.us/2uSRNb</a></b></p><br /><br />
|
5 |
-
<h2>What is Snaptube?</h2>
|
6 |
-
<p>Snaptube is a free app that lets you download videos and music from any platform and app on your Android device. You can choose from over 50 supported sites, including popular ones like YouTube, Instagram, WhatsApp, TikTok, Facebook, Twitter, and more. You can also download music in MP3 format from any video source. You can manage your downloads and playlists easily with Snaptube's user-friendly interface. You can also enjoy high-quality downloads with fast speed and low data consumption.</p>
|
7 |
-
<h3>Features of Snaptube</h3>
|
8 |
-
<p>Snaptube has many features that make it the best app for downloading videos and music. Here are some of them:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can download videos in different resolutions, from 144p to 4K.</li>
|
11 |
-
<li>You can download music in MP3 format with high quality.</li>
|
12 |
-
<li>You can download multiple files at once with the batch download feature.</li>
|
13 |
-
<li>You can pause and resume your downloads anytime.</li>
|
14 |
-
<li>You can search for videos and music by keywords or categories.</li>
|
15 |
-
<li>You can explore trending and recommended videos and music on the home page.</li>
|
16 |
-
<li>You can customize your app settings according to your preferences.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>How to download Snaptube APK</h3>
|
19 |
-
<p>Snaptube is not available on the Google Play Store due to its downloading functionality. However, you can easily download it from its official website or other trusted sources. Here are the steps to download Snaptube APK:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Go to <a href="(^1^)">Snaptube's official website</a> or any other reliable source that offers the latest version of Snaptube APK.</li>
|
22 |
-
<li>Tap on the download button and wait for the file to be downloaded on your device.</li>
|
23 |
-
<li>Go to your device settings and enable the installation of apps from unknown sources.</li>
|
24 |
-
<li>Locate the downloaded file in your file manager and tap on it to install it.</li>
|
25 |
-
<li>Follow the instructions on the screen and grant the necessary permissions to Snaptube.</li>
|
26 |
-
<li>Launch Snaptube and start downloading videos and music from any platform.</li>
|
27 |
-
</ol>
|
28 |
-
<h2>How to use Snaptube to download videos and music</h2>
|
29 |
-
<p>Using Snaptube is very easy and intuitive. You can download videos and music from any platform in a few simple steps. Here are some examples:</p>
|
30 |
-
<h3>Download videos from YouTube, Instagram, WhatsApp, TikTok, and more</h3>
|
31 |
-
<p>To download videos from any platform, you have two options:</p>
|
32 |
-
<ul>
|
33 |
-
<li>You can use the built-in browser of Snaptube to access the platform you want. For example, if you want to download videos from YouTube, you can tap on the YouTube icon on the home page of Snaptube. Then, you can browse through the videos as usual and tap on the download button when you find the one you want. You can choose the resolution and format you prefer and start the download.</li>
|
34 |
-
<li>You can also use any other browser or app to access the platform you want. For example, if you want to download videos from Instagram, you can open the Instagram app on your device and find the video you want. Then, you can copy the link of the video and paste it in the search bar of Snaptube. Snaptube will automatically detect the video and show you the download options. You can choose the resolution and format you prefer and start the download.</li>
|
35 |
-
</ul>
|
36 |
-
<h3>Download music in MP3 format</h3>
|
37 |
-
<p>To download music in MP3 format, you can follow the same steps as downloading videos. However, instead of choosing a video resolution and format, you can choose the MP3 option. Snaptube will convert the video into MP3 and download it on your device. You can also adjust the bitrate and quality of the MP3 file according to your needs.</p>
|
38 |
-
<h3>Manage your downloads and playlists</h3>
|
39 |
-
<p>To manage your downloads and playlists, you can go to the My Files section of Snaptube. There, you can see all your downloaded videos and music, as well as your playlists. You can play, delete, share, or rename your files. You can also create, edit, or delete your playlists. You can also sort your files by name, date, size, or duration.</p>
|
40 |
-
<p>snaptube apk 2021 latest version download<br />
|
41 |
-
how to download snaptube app for android 2021<br />
|
42 |
-
snaptube video downloader 2021 apk free download<br />
|
43 |
-
snaptube premium apk 2021 mod download<br />
|
44 |
-
snaptube pro apk 2021 full unlocked download<br />
|
45 |
-
download snaptube apk for pc windows 10 2021<br />
|
46 |
-
snaptube apk download install 2021 for android<br />
|
47 |
-
snaptube beta apk 2021 new version download<br />
|
48 |
-
snaptube vip apk 2021 cracked download<br />
|
49 |
-
snaptube old version apk 2021 free download<br />
|
50 |
-
snaptube youtube downloader 2021 apk download<br />
|
51 |
-
snaptube online video downloader 2021 apk<br />
|
52 |
-
snaptube app download 2021 apk pure<br />
|
53 |
-
snaptube hd video downloader 2021 apk<br />
|
54 |
-
snaptube music downloader 2021 apk latest<br />
|
55 |
-
snaptube apk download 2021 uptodown<br />
|
56 |
-
snaptube app download install 2021 apk<br />
|
57 |
-
snaptube video and music downloader 2021 apk<br />
|
58 |
-
snaptube mod apk 2021 no ads download<br />
|
59 |
-
snaptube app free download for android mobile 2021 apk<br />
|
60 |
-
snaptube lite apk 2021 fast download<br />
|
61 |
-
snaptube app download 2021 new version apk<br />
|
62 |
-
snaptube video downloader app 2021 apk<br />
|
63 |
-
snaptube apk download for android phone 2021<br />
|
64 |
-
snaptube app download for iphone ios 2021 apk<br />
|
65 |
-
snaptube downloader 2021 apk latest update<br />
|
66 |
-
snaptube app download for laptop windows 7 2021 apk<br />
|
67 |
-
snaptube video downloader for facebook 2021 apk<br />
|
68 |
-
snaptube app download for jio phone 2021 apk<br />
|
69 |
-
snaptube video downloader for instagram 2021 apk<br />
|
70 |
-
snaptube app download for android tv box 2021 apk<br />
|
71 |
-
snaptube video downloader for tiktok 2021 apk<br />
|
72 |
-
snaptube app download for firestick tv 2021 apk<br />
|
73 |
-
snaptube video downloader for whatsapp status 2021 apk<br />
|
74 |
-
snaptube app download for tablet android 4.4.2 2021 apk<br />
|
75 |
-
snaptube video downloader for twitter 2021 apk<br />
|
76 |
-
snaptube app download for samsung galaxy s10 plus 2021 apk<br />
|
77 |
-
snaptube video downloader for dailymotion 2021 apk<br />
|
78 |
-
snaptube app download for huawei p30 pro 2021 apk<br />
|
79 |
-
snaptube video downloader for vimeo 2021 apk<br />
|
80 |
-
snaptube app download for xiaomi redmi note 8 pro 2021 apk<br />
|
81 |
-
snaptube video downloader for reddit 2021 apk<br />
|
82 |
-
snaptube app download for oppo f11 pro 2021 apk<br />
|
83 |
-
snaptube video downloader for linkedin 2021 apk<br />
|
84 |
-
snaptube app download for vivo v15 pro 2021 apk<br />
|
85 |
-
snaptube video downloader for tumblr 2021 apk<br />
|
86 |
-
snaptube app download for nokia lumia windows phone 8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.</p>
|
87 |
-
<h2>Benefits of using Snaptube</h2>
|
88 |
-
<p>Using Snaptube has many benefits that make it the best app for downloading videos and music. Here are some of them:</p>
|
89 |
-
<h3>Save time and data</h3>
|
90 |
-
<p>With Snaptube, you can save time and data by downloading videos and music in one app. You don't need to switch between different apps or browsers to access different platforms. You also don't need to use any online converters or downloaders that may be slow, unreliable, or unsafe. Snaptube offers fast and secure downloads with low data consumption.</p>
|
91 |
-
<h3>Enjoy offline entertainment</h3>
|
92 |
-
<p>With Snaptube, you can enjoy offline entertainment by downloading videos and music on your device. You don't need to worry about internet connection, buffering, or ads. You can watch or listen to your favorite content anytime and anywhere. You can also share your downloads with your friends via Bluetooth, Wi-Fi Direct, or social media.</p>
|
93 |
-
<h3>Access unlimited content</h3>
|
94 |
-
<p>With Snaptube, you can access unlimited content by downloading videos and music from any platform and app. You don't need to worry about any restrictions or limitations imposed by some platforms or apps. You can download any content you want, from movies, TV shows, music videos, songs, podcasts, live streams, stories, reels, shorts, and more.</p>
|
95 |
-
<h2>FAQs about Snaptube</h2>
|
96 |
-
<p>Here are some frequently asked questions about Snaptube:</p>
|
97 |
-
<ol>
|
98 |
-
<li><b>Is Snaptube safe to use?</b><br>
|
99 |
-
Yes, Snaptube is safe to use. It is verified by CM Security, McAfee, and Lookout Security. It does not contain any viruses, malware, or spyware. It also does not collect any personal information from its users.</li>
|
100 |
-
<li><b>Is Snaptube free to use?</b><br>
|
101 |
-
Yes, Snaptube is free to use. It does not charge any fees or subscriptions for its services. It also does not show any annoying ads or pop-ups.</li>
|
102 |
-
<li><b>Is Snaptube legal to use?</b><br>
|
103 |
-
Yes, Snaptube is legal to use as long as you use it for personal and non-commercial purposes. However, you should respect the intellectual property rights of the content owners and follow their terms of use. You should also avoid downloading any copyrighted or illegal content.</li>
|
104 |
-
<li><b>What are the minimum requirements for using Snaptube?</b><br>
|
105 |
-
The minimum requirements for using Snaptube are Android 4.1 or higher and 100 MB of free storage space.</li>
|
106 |
-
<li><b>How can I contact Snaptube for feedback or support?</b><br>
|
107 |
-
You can contact Snaptube for feedback or support by emailing them at <a href="mailto:snaptubefeedback@gmail.com">snaptubefeedback@gmail.com</a>. You can also follow them on Facebook, Twitter, Instagram, YouTube, or Telegram for updates and news.</li>
|
108 |
-
</ol>
|
109 |
-
<h2>Conclusion</h2>
|
110 |
-
<p>Snaptube is the best app for downloading videos and music from any platform and app on your Android device. It is free, safe, fast, easy, and versatile. It lets you save time and data, enjoy offline entertainment, and access unlimited content. It also has many features that make it user-friendly and customizable. If you want to download videos and music from any platform with one app, download Snaptube today!</p> 197e85843d<br />
|
111 |
-
<br />
|
112 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/DirectX 12 for Windows 7 64 bit The Ultimate Gaming Experience.md
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>DirectX 12 Download Windows 7 64 Bit: A Complete Guide</h1>
|
3 |
-
<p>If you are a PC gamer, you probably have heard of DirectX, a suite of technologies that enables multimedia applications, especially games, to work with your video and audio hardware. DirectX is developed by Microsoft and is an essential component of the Windows operating system. But what is DirectX 12 and why should you care about it? In this article, we will explain what DirectX 12 is, how it differs from previous versions of DirectX, how to install it on Windows 7 64 bit, how to troubleshoot common issues, and how to compare it with other graphics APIs. By the end of this article, you will have a better understanding of DirectX 12 and how it can improve your gaming experience.</p>
|
4 |
-
<h2>What Is DirectX and Why It Is Important for PC Gaming</h2>
|
5 |
-
<p>DirectX is a collection of application programming interfaces (APIs) that allows software, primarily games, to communicate with your video and audio hardware. Games that use DirectX can take advantage of the multimedia accelerator features built-in to your hardware, which improves your overall multimedia experience. For example, DirectX can enable faster frame rates, higher resolutions, more realistic lighting and shadows, better sound quality, and more.</p>
|
6 |
-
<h2>directx 12 download windows 7 64 bit</h2><br /><p><b><b>Download Zip</b> 🗹 <a href="https://jinyurl.com/2uNSfu">https://jinyurl.com/2uNSfu</a></b></p><br /><br />
|
7 |
-
<p>DirectX was first introduced in Windows 95 as a way to provide direct access to hardware components for games that previously ran on DOS. Since then, Microsoft has released several versions of DirectX, each adding new features and capabilities. The latest version of DirectX is DirectX 12, which was released in 2015 along with Windows 10. However, DirectX 12 is not exclusive to Windows 10; it can also be installed on Windows 7 64 bit with some limitations.</p>
|
8 |
-
<h2>What Are the Main Features and Benefits of DirectX 12</h2>
|
9 |
-
<p>DirectX 12 is the most advanced version of DirectX so far, offering several features and benefits that are not available in previous versions. Some of the main features and benefits of DirectX 12 are:</p>
|
10 |
-
<ul>
|
11 |
-
<li><strong>Low-level API:</strong> Unlike DirectX 11, which is a high-level API that abstracts away many details of the hardware, DirectX 12 is a low-level API that gives developers more control over the hardware resources. This reduces the overhead incurred from the API and graphics driver, allowing for more efficient use of the CPU and GPU.</li>
|
12 |
-
<li><strong>Parallel compute:</strong> Another difference between DirectX 11 and DirectX 12 is that DirectX 11 handles serial operations, which means there is a single queue of commands that execute in order. Parallel compute allows developers to make multiple calls at the same time, which improves the performance and scalability of the applications.</li>
|
13 |
-
<li><strong>Ray tracing:</strong> Ray tracing is a technique that simulates the behavior of light rays in a realistic way, creating stunning visual effects such as reflections, refractions, shadows, ambient occlusion, global illumination, and more. DirectX 12 supports ray tracing through an extension called DirectX Raytracing (DXR), which allows developers to integrate ray tracing into their games.</li>
|
14 |
-
<li><strong>Variable rate shading:</strong> Variable rate shading (VRS) is a feature that allows developers to adjust the shading rate of different regions of the screen based on their importance or complexity. For example, regions that are in focus or have high detail can have a higher shading DirectX 12. Third, you need to have a game that supports DirectX 12 on Windows 7 64 bit. Currently, only a few games support DirectX 12 on Windows 7 64 bit, such as World of Warcraft, Gears 5, and Halo: The Master Chief Collection. Fourth, you need to have enough disk space and memory to install the updates. If you meet these requirements, you can follow these steps to install DirectX 12 on Windows 7 64 bit: - Go to the Microsoft Update Catalog website and search for "DirectX 12". - Find the update that matches your system architecture (x64 for 64 bit) and language. - Click on the Download button and save the file to your computer. - Run the file and follow the instructions to install the update. - Restart your computer and launch the game that supports DirectX 12 on Windows 7 64 bit. You should be able to enjoy the features and benefits of DirectX 12 on Windows 7 64 bit. However, if you encounter any issues or errors, you may need to troubleshoot them using the methods described in the next section. <h3>The Unofficial Way from Third-Party Sources</h3>
|
15 |
-
<p>The unofficial way to install DirectX 12 on Windows 7 64 bit is to use a third-party tool or patch that claims to enable DirectX 12 on Windows 7 64 bit. However, this method is not recommended for several reasons. First, it is not supported by Microsoft and may violate their terms of service. Second, it may cause compatibility or security issues with your system or games. Third, it may not work as expected or at all. Fourth, it may be difficult to uninstall or revert back to the original state.</p>
|
16 |
-
<p>Therefore, we advise you to avoid using any third-party tools or patches that claim to install DirectX 12 on Windows 7 64 bit. Instead, you should use the official way from Microsoft or upgrade your system to Windows 10, which supports DirectX 12 natively.</p>
|
17 |
-
<h2>How to Troubleshoot DirectX 12 Issues on Windows 7 64 Bit</h2>
|
18 |
-
<p>If you have successfully installed DirectX 12 on Windows 7 64 bit using the official way from Microsoft, but you still encounter some issues or errors when playing games that support DirectX 12 on Windows 7 64 bit, you may need to troubleshoot them using some methods. Here are some common methods that can help you fix DirectX 12 issues on Windows 7 64 bit:</p>
|
19 |
-
<h3>How to Use DirectX Diagnostic Tool (DxDiag)</h3>
|
20 |
-
<p>DirectX Diagnostic Tool (DxDiag) is a built-in tool that can help you diagnose and test your DirectX installation and hardware. You can use DxDiag to check your system information, display settings, sound settings, input devices, and more. You can also use DxDiag to save a report of your system configuration and send it to technical support if needed.</p>
|
21 |
-
<p>How to install the latest version of DirectX on Windows 7 64 bit<br />
|
22 |
-
DirectX End-User Runtime Web Installer for Windows 7 64 bit<br />
|
23 |
-
DirectX 12 compatible games for Windows 7 64 bit<br />
|
24 |
-
DirectX 12 features and benefits for Windows 7 64 bit<br />
|
25 |
-
DirectX 12 offline installer for Windows 7 64 bit<br />
|
26 |
-
DirectX 12 system requirements for Windows 7 64 bit<br />
|
27 |
-
DirectX 12 update for Windows 7 64 bit<br />
|
28 |
-
DirectX 12 vs DirectX 11 comparison for Windows 7 64 bit<br />
|
29 |
-
How to check DirectX version on Windows 7 64 bit<br />
|
30 |
-
How to fix DirectX errors on Windows 7 64 bit<br />
|
31 |
-
How to uninstall DirectX on Windows 7 64 bit<br />
|
32 |
-
Is DirectX 12 available for Windows 7 64 bit<br />
|
33 |
-
What is DirectX and why do I need it on Windows 7 64 bit<br />
|
34 |
-
Where to download DirectX 12 for Windows 7 64 bit<br />
|
35 |
-
How to enable DirectX 12 on Windows 7 64 bit<br />
|
36 |
-
How to optimize DirectX performance on Windows 7 64 bit<br />
|
37 |
-
How to troubleshoot DirectX issues on Windows 7 64 bit<br />
|
38 |
-
How to use DirectX diagnostic tool on Windows 7 64 bit<br />
|
39 |
-
What are the advantages of DirectX 12 over DirectX 11 on Windows 7 64 bit<br />
|
40 |
-
What are the best DirectX settings for gaming on Windows 7 64 bit<br />
|
41 |
-
How to download and install DirectX SDK on Windows 7 64 bit<br />
|
42 |
-
How to develop games with DirectX on Windows 7 64 bit<br />
|
43 |
-
How to test DirectX functionality on Windows 7 64 bit<br />
|
44 |
-
How to upgrade from DirectX 11 to DirectX 12 on Windows 7 64 bit<br />
|
45 |
-
What are the differences between DirectX and OpenGL on Windows 7 64 bit<br />
|
46 |
-
How to run older games with DirectX compatibility mode on Windows 7 </p>
|
47 |
-
<p>To use DxDiag, follow these steps:</p>
|
48 |
-
<ol>
|
49 |
-
<li>Press the Windows key + R to open the Run dialog box.</li>
|
50 |
-
<li>Type dxdiag and click OK.</li>
|
51 |
-
<li>Wait for DxDiag to collect information about your system.</li>
|
52 |
-
<li>Click on the tabs to view different aspects of your system.</li>
|
53 |
-
<li>Click on the Save All Information button to save a report of your system configuration.</li>
|
54 |
-
<li>Click on the Exit button to close DxDiag.</li>
|
55 |
-
</ol>
|
56 |
-
<p>You can use DxDiag to check if your system meets the requirements for DirectX 12 on Windows 7 64 bit, such as the operating system version, the graphics card model, and the DirectX version. You can also use DxDiag to identify any problems or errors with your DirectX installation or hardware, such as missing or corrupted files, outdated or incompatible drivers, or insufficient memory.</p>
|
57 |
-
<h3>How to Update or Rollback Display Drivers</h3>
|
58 |
-
<p>Display drivers are software components that allow your graphics card to communicate with your operating system and applications. Having the latest and compatible display drivers is crucial for ensuring the optimal performance and quality of DirectX 12 games. However, sometimes updating your display drivers may cause some issues or errors, such as crashes, freezes, glitches, or reduced performance. In that case, you may need to rollback your display drivers to a previous version that worked well.</p>
|
59 |
-
<p>To update or rollback your display drivers, follow these steps:</p>
|
60 |
-
<ol>
|
61 |
-
<li>Press the Windows key + X and select Device Manager.</li>
|
62 |
-
<li>Expand the Display adapters category and right-click on your graphics card.</li>
|
63 |
-
<li>Select Update driver to search for and install the latest driver for your graphics card.</li>
|
64 |
-
<li>If updating your driver causes any issues or errors, repeat steps 2 and 3, but select Roll Back driver instead of Update driver to restore the previous driver for your graphics card.</li>
|
65 |
-
<li>Restart your computer and check if the issues or errors are resolved.</li>
|
66 |
-
</ol>
|
67 |
-
<p>You can also use the official websites of your graphics card manufacturer, such as NVIDIA, AMD, or Intel, to download and install the latest or previous drivers for your graphics card. However, make sure you download and install the correct and compatible drivers for your graphics card model and operating system version.</p>
|
68 |
-
<h3>How to Enable or Disable Hardware Acceleration</h3>
|
69 |
-
<p>Hardware acceleration is a feature that allows your graphics card to perform some tasks faster than your CPU, such as rendering graphics, decoding video, or encrypting data. Hardware acceleration can improve the performance and quality of DirectX 12 games by reducing the workload on your CPU and utilizing the full potential of your GPU. However, hardware acceleration can also cause some issues or errors, such as crashes, freezes, glitches, or compatibility problems. In that case, you may need to disable hardware acceleration to prevent these issues or errors.</p>
|
70 |
-
<p>To enable or disable hardware acceleration, follow these steps:</p>
|
71 |
-
<ol>
|
72 |
-
<li>Press the Windows key + R to open the Run dialog box.</li>
|
73 |
-
<li>Type control panel and click OK.</li>
|
74 |
-
<li>Select Appearance and Personalization.</li>
|
75 |
-
<li>Select Display.</li>
|
76 |
-
<li>Select Change display settings.</li>
|
77 |
-
<li>Select Advanced settings.</li>
|
78 |
-
<li>Select the Troubleshoot tab.</li>
|
79 |
-
<li>Select Change settings.</li>
|
80 |
-
<li>Move the slider to adjust the hardware acceleration level. You can choose between Full (default), None (disabled), or any intermediate level.</li>
|
81 |
-
<li>Click OK and apply the changes.</li>
|
82 |
-
<li>Restart your computer and check if the issues or errors are resolved.</li>
|
83 |
-
</ol>
|
84 |
-
<p>Note that some graphics cards may not support changing the hardware acceleration level. In that case, you may need to use other methods to enable or disable hardware acceleration for specific applications or games. For example, you can use the settings menu of your game or application to enable or disable hardware acceleration. You can also use the compatibility mode of Windows 7 to run older games or applications that may not work well with hardware acceleration.</p>
|
85 |
-
<h2>How to Compare DirectX 12 with Previous Versions of DirectX</h2>
|
86 |
-
<p>If you are curious about how DirectX 12 compares with previous versions of DirectX, such as DirectX 11, DirectX 10, or DirectX 9, you can use some methods to compare them. Here are some common methods that can help you compare DirectX 12 with previous versions of DirectX:</p>
|
87 |
-
<h3>The Differences Between High-Level and Low-Level APIs</h3>
|
88 |
-
<p>One of the main differences between DirectX 12 and previous versions of DirectX is that DirectX 12 is a low-level API, while previous versions of DirectX are high-level APIs. A high-level API is an API that abstracts away many details of the hardware and provides a simpler and easier way for developers to create applications. A low-level API is an API that gives developers more control over the hardware resources and allows them to optimize their applications for specific hardware configurations.</p>
|
89 |
-
<p>The advantage of a high-level API is that it is more user-friendly and compatible with different hardware devices. The disadvantage of a high-level API is that it has more overhead and less flexibility than a low-level API. The advantage of a low-level API is that it is more efficient and flexible than a high-level API. The disadvantage of a low-level API is that it is more complex and difficult to use than a high-level API.</p>
|
90 |
-
<p>Therefore, DirectX 12 offers more performance and scalability than previous versions of DirectX by allowing developers to access the hardware resources directly and customize their applications for specific hardware configurations. However, DirectX 12 also requires more skill and effort from developers to create applications that work well with different hardware devices.</p>
|
91 |
-
<h3>The Advantages of Parallel Compute and Ray Tracing</h3>
|
92 |
-
<p>Another difference between DirectX 12 and previous versions of DirectX is that DirectX 12 supports parallel compute and ray tracing, while previous versions of DirectX do not. Parallel compute is a feature that allows developers to make multiple calls to the GPU at the same time, instead of waiting for one call to finish before making another. Ray tracing is a technique that simulates the behavior of light rays in a realistic way, creating stunning visual effects such as reflections, refractions, shadows, ambient occlusion, global illumination, and more.</p>
|
93 |
-
<p>The advantage of parallel compute is that it improves the performance and scalability of the applications by reducing the CPU bottleneck and utilizing the full potential of the GPU. The advantage of ray tracing is that it improves the quality and realism of the graphics by creating more natural and immersive lighting and shading effects. However, parallel compute and ray tracing also require more computational power and memory from the hardware, which may not be available on older or lower-end devices.</p>
|
94 |
-
<p>Therefore, DirectX 12 offers more features and capabilities than previous versions of DirectX by allowing developers to use parallel compute and ray tracing in their applications. However, DirectX 12 also requires more hardware resources and support from the devices to run these features smoothly and effectively.</p>
|
95 |
-
<h3>The Performance and Compatibility of DirectX 12 Games</h3>
|
96 |
-
<p>One of the main reasons why gamers want to install DirectX 12 on Windows 7 64 bit is to play games that support DirectX 12 and enjoy the benefits of DirectX 12 features. However, not all games support DirectX 12, and not all games that support DirectX 12 run better on DirectX 12 than on previous versions of DirectX. The performance and compatibility of DirectX 12 games depend on several factors, such as the game engine, the graphics card, the driver, the operating system, and the settings.</p>
|
97 |
-
<p>Some games that support DirectX 12 may run faster or smoother on DirectX 12 than on previous versions of DirectX, especially if they use parallel compute or ray tracing features. Some examples of games that run better on DirectX 12 are Forza Horizon 4, Shadow of the Tomb Raider, Metro Exodus, and Control. However, some games that support DirectX 12 may run slower or worse on DirectX 12 than on previous versions of DirectX, especially if they are not optimized for DirectX 12 or have compatibility issues with certain hardware or software components. Some examples of games that run worse on DirectX 12 are Deus Ex: Mankind Divided, Hitman, Rise of the Tomb Raider, and Battlefield V.</p>
|
98 |
-
<p>Therefore, DirectX 12 does not guarantee a better gaming experience for all games that support DirectX 12. You may need to experiment with different settings and options to find the best configuration for your system and your game. You may also need to check the official websites or forums of your game or graphics card manufacturer for any updates or patches that may improve the performance or compatibility of your game on DirectX 12.</p>
|
99 |
-
<h2>Conclusion</h2>
|
100 |
-
<p>In conclusion, DirectX 12 is a powerful graphics API that offers several features and benefits that are not available in previous versions of DirectX. However, installing DirectX 12 on Windows 7 64 bit is not as easy as installing it on Windows 10, which comes with DirectX 12 pre-installed. You need to meet certain requirements and follow certain steps to install DirectX 12 on Windows 7 64 bit using the official way from Microsoft. You also need to avoid using any third-party tools or patches that claim to install DirectX 12 on Windows 7 64 bit. You also need to troubleshoot any issues or errors that may occur when playing games that support DirectX 12 on Windows 7 64 bit. You also need to compare DirectX 12 with previous versions of DirectX to understand the differences and similarities between them. We hope this article has helped you learn more about DirectX 12 and how to install it on Windows 7 64 bit. If you have any questions or feedback, please feel free to leave a comment below. Happy gaming! <h2>FAQs</h2>
|
101 |
-
<p>Here are some frequently asked questions about DirectX 12 and Windows 7 64 bit:</p>
|
102 |
-
<h3>What are the system requirements for DirectX 12?</h3>
|
103 |
-
<p>The system requirements for DirectX 12 are:</p>
|
104 |
-
<ul>
|
105 |
-
<li>A valid license of Windows 7 Service Pack 1 (SP1) or higher, or Windows 10.</li>
|
106 |
-
<li>A graphics card that supports DirectX 12. You can check the list of DirectX 12 compatible graphics cards here.</li>
|
107 |
-
<li>A game that supports DirectX 12 on Windows 7 64 bit. You can check the list of DirectX 12 games here.</li>
|
108 |
-
<li>Enough disk space and memory to install the updates and run the games.</li>
|
109 |
-
</ul>
|
110 |
-
<h3>How can I check which version of DirectX I have installed?</h3>
|
111 |
-
<p>You can check which version of DirectX you have installed by using the DirectX Diagnostic Tool (DxDiag). To use DxDiag, follow these steps:</p>
|
112 |
-
<ol>
|
113 |
-
<li>Press the Windows key + R to open the Run dialog box.</li>
|
114 |
-
<li>Type dxdiag and click OK.</li>
|
115 |
-
<li>Wait for DxDiag to collect information about your system.</li>
|
116 |
-
<li>Click on the System tab and look for the DirectX Version field.</li>
|
117 |
-
<li>Click on the Exit button to close DxDiag.</li>
|
118 |
-
</ol>
|
119 |
-
<h3>Can I use DirectX 12 on Windows 10 or other operating systems?</h3>
|
120 |
-
<p>Yes, you can use DirectX 12 on Windows 10 or other operating systems that support DirectX 12. However, some features and benefits of DirectX 12 may not be available on older or different operating systems. For example, DirectX Raytracing (DXR) is only available on Windows 10 version 1809 or higher. Also, some games that support DirectX 12 may not work well or at all on older or different operating systems. Therefore, it is recommended that you use the latest version of Windows 10 to enjoy the full potential of DirectX 12.</p>
|
121 |
-
<h3>Which games support DirectX 12 and how can I enable it?</h3>
|
122 |
-
<p>There are many games that support DirectX 12, but not all of them support DirectX 12 on Windows 7 64 bit. You can check the list of DirectX 12 games here. To enable DirectX 12 on your game, you need to follow the instructions provided by the game developer or publisher. Usually, you can find the option to enable DirectX 12 in the game settings menu, under the graphics or display category. However, some games may require you to launch the game with a specific command line argument or edit a configuration file to enable DirectX 12. You can also check the official websites or forums of your game for more information on how to enable DirectX 12.</p>
|
123 |
-
<h3>What are some alternatives to DirectX 12?</h3>
|
124 |
-
<p>If you are looking for some alternatives to DirectX 12, you may want to consider some other graphics APIs that are available for PC gaming. Some of the most popular alternatives to DirectX 12 are:</p>
|
125 |
-
<ul>
|
126 |
-
<li><strong>Vulkan:</strong> Vulkan is a low-level API that is similar to DirectX 12 in terms of performance and flexibility. Vulkan is developed by the Khronos Group, an industry consortium that also develops OpenGL and OpenCL. Vulkan is cross-platform and supports Windows, Linux, Android, and other operating systems. Vulkan also supports ray tracing through an extension called Vulkan Ray Tracing (VKR). Some examples of games that use Vulkan are Doom Eternal, Red Dead Redemption 2, and Wolfenstein: Youngblood.</li>
|
127 |
-
<li><strong>OpenGL:</strong> OpenGL is a high-level API that is widely used for 2D and 3D graphics applications. OpenGL is also developed by the Khronos Group and is cross-platform and supports Windows, Linux, macOS, and other operating systems. OpenGL does not support ray tracing natively, but it can be implemented through extensions or libraries such as NVIDIA OptiX or Intel Embree. Some examples of games that use OpenGL are Minecraft, Half-Life 2, and Counter-Strike: Global Offensive.</li>
|
128 |
-
<li><strong>Metal:</strong> Metal is a low-level API that is exclusive to Apple devices, such as macOS, iOS, iPadOS, and tvOS. Metal is developed by Apple and is designed to optimize the performance and quality of graphics applications on Apple hardware. Metal also supports ray tracing through an extension called Metal Performance Shaders Ray Tracing (MPSR). Some examples of games that use Metal are World of Warcraft, Fortnite, and Civilization VI.</li>
|
129 |
-
</ul>
|
130 |
-
<p>These are just some of the alternatives to DirectX 12 that you can try if you want to experience different graphics APIs for PC gaming. However, keep in mind that each API has its own advantages and disadvantages, and not all games support all APIs. Therefore, you may need to do some research and testing before choosing the best API for your system and your game.</p>
|
131 |
-
<h2></h2>
|
132 |
-
<p>This is the end of the article. Thank you for reading!</p>
|
133 |
-
: https://www.pcgamer.com/directx-12-games-list/</p> 401be4b1e0<br />
|
134 |
-
<br />
|
135 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/schedulers/preconfig/__init__.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
# flake8: noqa
|
16 |
-
|
17 |
-
from ...utils import (
|
18 |
-
OptionalDependencyNotAvailable,
|
19 |
-
is_paddle_available,
|
20 |
-
is_scipy_available,
|
21 |
-
)
|
22 |
-
|
23 |
-
try:
|
24 |
-
if not is_paddle_available():
|
25 |
-
raise OptionalDependencyNotAvailable()
|
26 |
-
except OptionalDependencyNotAvailable:
|
27 |
-
from ...utils.dummy_paddle_objects import * # noqa F403
|
28 |
-
else:
|
29 |
-
from .preconfig_scheduling_euler_ancestral_discrete import (
|
30 |
-
PreconfigEulerAncestralDiscreteScheduler,
|
31 |
-
)
|
32 |
-
try:
|
33 |
-
if not (is_paddle_available() and is_scipy_available()):
|
34 |
-
raise OptionalDependencyNotAvailable()
|
35 |
-
except OptionalDependencyNotAvailable:
|
36 |
-
from ...utils.dummy_paddle_and_scipy_objects import * # noqa F403
|
37 |
-
else:
|
38 |
-
from .preconfig_scheduling_lms_discrete import PreconfigLMSDiscreteScheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/vocoder/bigvgan/activations.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
|
2 |
-
# LICENSE is in incl_licenses directory.
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from torch import nn, sin, pow
|
6 |
-
from torch.nn import Parameter
|
7 |
-
|
8 |
-
|
9 |
-
class Snake(nn.Module):
|
10 |
-
'''
|
11 |
-
Implementation of a sine-based periodic activation function
|
12 |
-
Shape:
|
13 |
-
- Input: (B, C, T)
|
14 |
-
- Output: (B, C, T), same shape as the input
|
15 |
-
Parameters:
|
16 |
-
- alpha - trainable parameter
|
17 |
-
References:
|
18 |
-
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
19 |
-
https://arxiv.org/abs/2006.08195
|
20 |
-
Examples:
|
21 |
-
>>> a1 = snake(256)
|
22 |
-
>>> x = torch.randn(256)
|
23 |
-
>>> x = a1(x)
|
24 |
-
'''
|
25 |
-
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
|
26 |
-
'''
|
27 |
-
Initialization.
|
28 |
-
INPUT:
|
29 |
-
- in_features: shape of the input
|
30 |
-
- alpha: trainable parameter
|
31 |
-
alpha is initialized to 1 by default, higher values = higher-frequency.
|
32 |
-
alpha will be trained along with the rest of your model.
|
33 |
-
'''
|
34 |
-
super(Snake, self).__init__()
|
35 |
-
self.in_features = in_features
|
36 |
-
|
37 |
-
# initialize alpha
|
38 |
-
self.alpha_logscale = alpha_logscale
|
39 |
-
if self.alpha_logscale: # log scale alphas initialized to zeros
|
40 |
-
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
41 |
-
else: # linear scale alphas initialized to ones
|
42 |
-
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
43 |
-
|
44 |
-
self.alpha.requires_grad = alpha_trainable
|
45 |
-
|
46 |
-
self.no_div_by_zero = 0.000000001
|
47 |
-
|
48 |
-
def forward(self, x):
|
49 |
-
'''
|
50 |
-
Forward pass of the function.
|
51 |
-
Applies the function to the input elementwise.
|
52 |
-
Snake ∶= x + 1/a * sin^2 (xa)
|
53 |
-
'''
|
54 |
-
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
55 |
-
if self.alpha_logscale:
|
56 |
-
alpha = torch.exp(alpha)
|
57 |
-
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
58 |
-
|
59 |
-
return x
|
60 |
-
|
61 |
-
|
62 |
-
class SnakeBeta(nn.Module):
|
63 |
-
'''
|
64 |
-
A modified Snake function which uses separate parameters for the magnitude of the periodic components
|
65 |
-
Shape:
|
66 |
-
- Input: (B, C, T)
|
67 |
-
- Output: (B, C, T), same shape as the input
|
68 |
-
Parameters:
|
69 |
-
- alpha - trainable parameter that controls frequency
|
70 |
-
- beta - trainable parameter that controls magnitude
|
71 |
-
References:
|
72 |
-
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
73 |
-
https://arxiv.org/abs/2006.08195
|
74 |
-
Examples:
|
75 |
-
>>> a1 = snakebeta(256)
|
76 |
-
>>> x = torch.randn(256)
|
77 |
-
>>> x = a1(x)
|
78 |
-
'''
|
79 |
-
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
|
80 |
-
'''
|
81 |
-
Initialization.
|
82 |
-
INPUT:
|
83 |
-
- in_features: shape of the input
|
84 |
-
- alpha - trainable parameter that controls frequency
|
85 |
-
- beta - trainable parameter that controls magnitude
|
86 |
-
alpha is initialized to 1 by default, higher values = higher-frequency.
|
87 |
-
beta is initialized to 1 by default, higher values = higher-magnitude.
|
88 |
-
alpha will be trained along with the rest of your model.
|
89 |
-
'''
|
90 |
-
super(SnakeBeta, self).__init__()
|
91 |
-
self.in_features = in_features
|
92 |
-
|
93 |
-
# initialize alpha
|
94 |
-
self.alpha_logscale = alpha_logscale
|
95 |
-
if self.alpha_logscale: # log scale alphas initialized to zeros
|
96 |
-
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
97 |
-
self.beta = Parameter(torch.zeros(in_features) * alpha)
|
98 |
-
else: # linear scale alphas initialized to ones
|
99 |
-
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
100 |
-
self.beta = Parameter(torch.ones(in_features) * alpha)
|
101 |
-
|
102 |
-
self.alpha.requires_grad = alpha_trainable
|
103 |
-
self.beta.requires_grad = alpha_trainable
|
104 |
-
|
105 |
-
self.no_div_by_zero = 0.000000001
|
106 |
-
|
107 |
-
def forward(self, x):
|
108 |
-
'''
|
109 |
-
Forward pass of the function.
|
110 |
-
Applies the function to the input elementwise.
|
111 |
-
SnakeBeta ∶= x + 1/b * sin^2 (xa)
|
112 |
-
'''
|
113 |
-
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
114 |
-
beta = self.beta.unsqueeze(0).unsqueeze(-1)
|
115 |
-
if self.alpha_logscale:
|
116 |
-
alpha = torch.exp(alpha)
|
117 |
-
beta = torch.exp(beta)
|
118 |
-
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
119 |
-
|
120 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/pretrained.py
DELETED
@@ -1,147 +0,0 @@
|
|
1 |
-
import hashlib
|
2 |
-
import os
|
3 |
-
import urllib
|
4 |
-
import warnings
|
5 |
-
|
6 |
-
from tqdm import tqdm
|
7 |
-
|
8 |
-
_RN50 = dict(
|
9 |
-
openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
|
10 |
-
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
|
11 |
-
cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"
|
12 |
-
)
|
13 |
-
|
14 |
-
_RN50_quickgelu = dict(
|
15 |
-
openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
|
16 |
-
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
|
17 |
-
cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"
|
18 |
-
)
|
19 |
-
|
20 |
-
_RN101 = dict(
|
21 |
-
openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
|
22 |
-
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"
|
23 |
-
)
|
24 |
-
|
25 |
-
_RN101_quickgelu = dict(
|
26 |
-
openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
|
27 |
-
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"
|
28 |
-
)
|
29 |
-
|
30 |
-
_RN50x4 = dict(
|
31 |
-
openai="https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
|
32 |
-
)
|
33 |
-
|
34 |
-
_RN50x16 = dict(
|
35 |
-
openai="https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
|
36 |
-
)
|
37 |
-
|
38 |
-
_RN50x64 = dict(
|
39 |
-
openai="https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
|
40 |
-
)
|
41 |
-
|
42 |
-
_VITB32 = dict(
|
43 |
-
openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
|
44 |
-
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
|
45 |
-
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
|
46 |
-
laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt",
|
47 |
-
)
|
48 |
-
|
49 |
-
_VITB32_quickgelu = dict(
|
50 |
-
openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
|
51 |
-
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
|
52 |
-
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
|
53 |
-
laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt",
|
54 |
-
)
|
55 |
-
|
56 |
-
_VITB16 = dict(
|
57 |
-
openai="https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
|
58 |
-
)
|
59 |
-
|
60 |
-
_VITL14 = dict(
|
61 |
-
openai="https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
|
62 |
-
)
|
63 |
-
|
64 |
-
_PRETRAINED = {
|
65 |
-
"RN50": _RN50,
|
66 |
-
"RN50-quickgelu": _RN50_quickgelu,
|
67 |
-
"RN101": _RN101,
|
68 |
-
"RN101-quickgelu": _RN101_quickgelu,
|
69 |
-
"RN50x4": _RN50x4,
|
70 |
-
"RN50x16": _RN50x16,
|
71 |
-
"ViT-B-32": _VITB32,
|
72 |
-
"ViT-B-32-quickgelu": _VITB32_quickgelu,
|
73 |
-
"ViT-B-16": _VITB16,
|
74 |
-
"ViT-L-14": _VITL14,
|
75 |
-
}
|
76 |
-
|
77 |
-
|
78 |
-
def list_pretrained(as_str: bool = False):
|
79 |
-
""" returns list of pretrained models
|
80 |
-
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
|
81 |
-
"""
|
82 |
-
return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
|
83 |
-
|
84 |
-
|
85 |
-
def list_pretrained_tag_models(tag: str):
|
86 |
-
""" return all models having the specified pretrain tag """
|
87 |
-
models = []
|
88 |
-
for k in _PRETRAINED.keys():
|
89 |
-
if tag in _PRETRAINED[k]:
|
90 |
-
models.append(k)
|
91 |
-
return models
|
92 |
-
|
93 |
-
|
94 |
-
def list_pretrained_model_tags(model: str):
|
95 |
-
""" return all pretrain tags for the specified model architecture """
|
96 |
-
tags = []
|
97 |
-
if model in _PRETRAINED:
|
98 |
-
tags.extend(_PRETRAINED[model].keys())
|
99 |
-
return tags
|
100 |
-
|
101 |
-
|
102 |
-
def get_pretrained_url(model: str, tag: str):
|
103 |
-
if model not in _PRETRAINED:
|
104 |
-
return ''
|
105 |
-
model_pretrained = _PRETRAINED[model]
|
106 |
-
if tag not in model_pretrained:
|
107 |
-
return ''
|
108 |
-
return model_pretrained[tag]
|
109 |
-
|
110 |
-
|
111 |
-
def download_pretrained(url: str, root: str = os.path.expanduser("~/.cache/clip")):
|
112 |
-
os.makedirs(root, exist_ok=True)
|
113 |
-
filename = os.path.basename(url)
|
114 |
-
|
115 |
-
if 'openaipublic' in url:
|
116 |
-
expected_sha256 = url.split("/")[-2]
|
117 |
-
else:
|
118 |
-
expected_sha256 = ''
|
119 |
-
|
120 |
-
download_target = os.path.join(root, filename)
|
121 |
-
|
122 |
-
if os.path.exists(download_target) and not os.path.isfile(download_target):
|
123 |
-
raise RuntimeError(f"{download_target} exists and is not a regular file")
|
124 |
-
|
125 |
-
if os.path.isfile(download_target):
|
126 |
-
if expected_sha256:
|
127 |
-
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
|
128 |
-
return download_target
|
129 |
-
else:
|
130 |
-
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
|
131 |
-
else:
|
132 |
-
return download_target
|
133 |
-
|
134 |
-
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
|
135 |
-
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
|
136 |
-
while True:
|
137 |
-
buffer = source.read(8192)
|
138 |
-
if not buffer:
|
139 |
-
break
|
140 |
-
|
141 |
-
output.write(buffer)
|
142 |
-
loop.update(len(buffer))
|
143 |
-
|
144 |
-
if expected_sha256 and hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
|
145 |
-
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
|
146 |
-
|
147 |
-
return download_target
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/app.py
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import numpy as np
|
3 |
-
import gradio as gr
|
4 |
-
from PIL import Image
|
5 |
-
import matplotlib
|
6 |
-
from omegaconf import OmegaConf
|
7 |
-
from einops import repeat
|
8 |
-
import librosa
|
9 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
10 |
-
from vocoder.bigvgan.models import VocoderBigVGAN
|
11 |
-
from ldm.util import instantiate_from_config
|
12 |
-
from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000
|
13 |
-
|
14 |
-
SAMPLE_RATE = 16000
|
15 |
-
cmap_transform = matplotlib.cm.viridis
|
16 |
-
torch.set_grad_enabled(False)
|
17 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
18 |
-
|
19 |
-
def initialize_model(config, ckpt):
|
20 |
-
config = OmegaConf.load(config)
|
21 |
-
model = instantiate_from_config(config.model)
|
22 |
-
model.load_state_dict(torch.load(ckpt,map_location='cpu')["state_dict"], strict=False)
|
23 |
-
|
24 |
-
model = model.to(device)
|
25 |
-
print(model.device,device,model.cond_stage_model.device)
|
26 |
-
sampler = DDIMSampler(model)
|
27 |
-
return sampler
|
28 |
-
|
29 |
-
|
30 |
-
def make_batch_sd(
|
31 |
-
mel,
|
32 |
-
mask,
|
33 |
-
device,
|
34 |
-
num_samples=1):
|
35 |
-
|
36 |
-
mel = torch.from_numpy(mel)[None,None,...].to(dtype=torch.float32)
|
37 |
-
mask = torch.from_numpy(mask)[None,None,...].to(dtype=torch.float32)
|
38 |
-
masked_mel = (1 - mask) * mel
|
39 |
-
|
40 |
-
mel = mel * 2 - 1
|
41 |
-
mask = mask * 2 - 1
|
42 |
-
masked_mel = masked_mel * 2 -1
|
43 |
-
|
44 |
-
batch = {
|
45 |
-
"mel": repeat(mel.to(device=device), "1 ... -> n ...", n=num_samples),
|
46 |
-
"mask": repeat(mask.to(device=device), "1 ... -> n ...", n=num_samples),
|
47 |
-
"masked_mel": repeat(masked_mel.to(device=device), "1 ... -> n ...", n=num_samples),
|
48 |
-
}
|
49 |
-
return batch
|
50 |
-
|
51 |
-
def gen_mel(input_audio):
|
52 |
-
sr,ori_wav = input_audio
|
53 |
-
print(sr,ori_wav.shape,ori_wav)
|
54 |
-
|
55 |
-
ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0 # order='C'是以C语言格式存储,不用管
|
56 |
-
if len(ori_wav.shape)==2:# stereo
|
57 |
-
ori_wav = librosa.to_mono(ori_wav.T)# gradio load wav shape could be (wav_len,2) but librosa expects (2,wav_len)
|
58 |
-
print(sr,ori_wav.shape,ori_wav)
|
59 |
-
ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
|
60 |
-
|
61 |
-
mel_len,hop_size = 848,256
|
62 |
-
input_len = mel_len * hop_size
|
63 |
-
if len(ori_wav) < input_len:
|
64 |
-
input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
|
65 |
-
else:
|
66 |
-
input_wav = ori_wav[:input_len]
|
67 |
-
|
68 |
-
mel = TRANSFORMS_16000(input_wav)
|
69 |
-
return mel
|
70 |
-
|
71 |
-
def show_mel_fn(input_audio):
|
72 |
-
crop_len = 500 # the full mel cannot be showed due to gradio's Image bug when using tool='sketch'
|
73 |
-
crop_mel = gen_mel(input_audio)[:,:crop_len]
|
74 |
-
color_mel = cmap_transform(crop_mel)
|
75 |
-
return Image.fromarray((color_mel*255).astype(np.uint8))
|
76 |
-
|
77 |
-
|
78 |
-
def inpaint(sampler, batch, seed, ddim_steps, num_samples=1, W=512, H=512):
|
79 |
-
model = sampler.model
|
80 |
-
|
81 |
-
prng = np.random.RandomState(seed)
|
82 |
-
start_code = prng.randn(num_samples, model.first_stage_model.embed_dim, H // 8, W // 8)
|
83 |
-
start_code = torch.from_numpy(start_code).to(device=device, dtype=torch.float32)
|
84 |
-
|
85 |
-
c = model.get_first_stage_encoding(model.encode_first_stage(batch["masked_mel"]))
|
86 |
-
cc = torch.nn.functional.interpolate(batch["mask"],
|
87 |
-
size=c.shape[-2:])
|
88 |
-
c = torch.cat((c, cc), dim=1) # (b,c+1,h,w) 1 is mask
|
89 |
-
|
90 |
-
shape = (c.shape[1]-1,)+c.shape[2:]
|
91 |
-
samples_ddim, _ = sampler.sample(S=ddim_steps,
|
92 |
-
conditioning=c,
|
93 |
-
batch_size=c.shape[0],
|
94 |
-
shape=shape,
|
95 |
-
verbose=False)
|
96 |
-
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
97 |
-
|
98 |
-
|
99 |
-
mask = batch["mask"]# [-1,1]
|
100 |
-
mel = torch.clamp((batch["mel"]+1.0)/2.0,min=0.0, max=1.0)
|
101 |
-
mask = torch.clamp((batch["mask"]+1.0)/2.0,min=0.0, max=1.0)
|
102 |
-
predicted_mel = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0)
|
103 |
-
inpainted = (1-mask)*mel+mask*predicted_mel
|
104 |
-
inpainted = inpainted.cpu().numpy().squeeze()
|
105 |
-
inapint_wav = vocoder.vocode(inpainted)
|
106 |
-
|
107 |
-
return inpainted,inapint_wav
|
108 |
-
|
109 |
-
|
110 |
-
def predict(input_audio,mel_and_mask,ddim_steps,seed):
|
111 |
-
show_mel = np.array(mel_and_mask['image'].convert("L"))/255 # 由于展示的mel只展示了一部分,所以需要重新从音频生成mel
|
112 |
-
mask = np.array(mel_and_mask["mask"].convert("L"))/255
|
113 |
-
|
114 |
-
mel_bins,mel_len = 80,848
|
115 |
-
|
116 |
-
input_mel = gen_mel(input_audio)[:,:mel_len]# 由于展示的mel只展示了一部分,所以需要重新从音频生成mel
|
117 |
-
mask = np.pad(mask,((0,0),(0,mel_len-mask.shape[1])),mode='constant',constant_values=0)# 将mask填充到原来的mel的大小
|
118 |
-
print(mask.shape,input_mel.shape)
|
119 |
-
with torch.no_grad():
|
120 |
-
batch = make_batch_sd(input_mel,mask,device,num_samples=1)
|
121 |
-
inpainted,gen_wav = inpaint(
|
122 |
-
sampler=sampler,
|
123 |
-
batch=batch,
|
124 |
-
seed=seed,
|
125 |
-
ddim_steps=ddim_steps,
|
126 |
-
num_samples=1,
|
127 |
-
H=mel_bins, W=mel_len
|
128 |
-
)
|
129 |
-
inpainted = inpainted[:,:show_mel.shape[1]]
|
130 |
-
color_mel = cmap_transform(inpainted)
|
131 |
-
input_len = int(input_audio[1].shape[0] * SAMPLE_RATE / input_audio[0])
|
132 |
-
gen_wav = (gen_wav * 32768).astype(np.int16)[:input_len]
|
133 |
-
return Image.fromarray((color_mel*255).astype(np.uint8)),(SAMPLE_RATE,gen_wav)
|
134 |
-
|
135 |
-
|
136 |
-
sampler = initialize_model('./configs/inpaint/txt2audio_args.yaml', './useful_ckpts/inpaint7_epoch00047.ckpt')
|
137 |
-
vocoder = VocoderBigVGAN('./vocoder/logs/bigv16k53w',device=device)
|
138 |
-
|
139 |
-
block = gr.Blocks().queue()
|
140 |
-
with block:
|
141 |
-
with gr.Row():
|
142 |
-
gr.Markdown("## Make-An-Audio Inpainting")
|
143 |
-
|
144 |
-
with gr.Row():
|
145 |
-
with gr.Column():
|
146 |
-
input_audio = gr.inputs.Audio()
|
147 |
-
|
148 |
-
show_button = gr.Button("Show Mel")
|
149 |
-
|
150 |
-
run_button = gr.Button("Predict Masked Place")
|
151 |
-
with gr.Accordion("Advanced options", open=False):
|
152 |
-
ddim_steps = gr.Slider(label="Steps", minimum=1,
|
153 |
-
maximum=150, value=100, step=1)
|
154 |
-
seed = gr.Slider(
|
155 |
-
label="Seed",
|
156 |
-
minimum=0,
|
157 |
-
maximum=2147483647,
|
158 |
-
step=1,
|
159 |
-
randomize=True,
|
160 |
-
)
|
161 |
-
with gr.Column():
|
162 |
-
show_inpainted = gr.Image(type="pil").style(width=848,height=80)
|
163 |
-
outaudio = gr.Audio()
|
164 |
-
show_mel = gr.Image(type="pil",tool='sketch')#.style(width=848,height=80) # 加上这个没办法展示完全图片
|
165 |
-
show_button.click(fn=show_mel_fn, inputs=[input_audio], outputs=show_mel)
|
166 |
-
|
167 |
-
run_button.click(fn=predict, inputs=[input_audio,show_mel,ddim_steps,seed], outputs=[show_inpainted,outaudio])
|
168 |
-
|
169 |
-
|
170 |
-
block.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ababababababbababa/SD-2.1-Img2Img/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: SD 2.1 Img2Img
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.11.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: trysem/SD-2.1-Img2Img
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/login/$types.d.ts
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import type * as Kit from '@sveltejs/kit';
|
2 |
-
|
3 |
-
type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
|
4 |
-
type RouteParams = { }
|
5 |
-
type RouteId = '/login';
|
6 |
-
type MaybeWithVoid<T> = {} extends T ? T | void : T;
|
7 |
-
export type RequiredKeys<T> = { [K in keyof T]-?: {} extends { [P in K]: T[K] } ? never : K; }[keyof T];
|
8 |
-
type OutputDataShape<T> = MaybeWithVoid<Omit<App.PageData, RequiredKeys<T>> & Partial<Pick<App.PageData, keyof T & keyof App.PageData>> & Record<string, any>>
|
9 |
-
type EnsureDefined<T> = T extends null | undefined ? {} : T;
|
10 |
-
type OptionalUnion<U extends Record<string, any>, A extends keyof U = U extends U ? keyof U : never> = U extends unknown ? { [P in Exclude<A, keyof U>]?: never } & U : never;
|
11 |
-
export type Snapshot<T = any> = Kit.Snapshot<T>;
|
12 |
-
type PageServerParentData = EnsureDefined<import('../$types.js').LayoutServerData>;
|
13 |
-
type PageParentData = EnsureDefined<import('../$types.js').LayoutData>;
|
14 |
-
|
15 |
-
export type PageServerLoad<OutputData extends OutputDataShape<PageServerParentData> = OutputDataShape<PageServerParentData>> = Kit.ServerLoad<RouteParams, PageServerParentData, OutputData, RouteId>;
|
16 |
-
export type PageServerLoadEvent = Parameters<PageServerLoad>[0];
|
17 |
-
type ExcludeActionFailure<T> = T extends Kit.ActionFailure<any> ? never : T extends void ? never : T;
|
18 |
-
type ActionsSuccess<T extends Record<string, (...args: any) => any>> = { [Key in keyof T]: ExcludeActionFailure<Awaited<ReturnType<T[Key]>>>; }[keyof T];
|
19 |
-
type ExtractActionFailure<T> = T extends Kit.ActionFailure<infer X> ? X extends void ? never : X : never;
|
20 |
-
type ActionsFailure<T extends Record<string, (...args: any) => any>> = { [Key in keyof T]: Exclude<ExtractActionFailure<Awaited<ReturnType<T[Key]>>>, void>; }[keyof T];
|
21 |
-
type ActionsExport = typeof import('../../../../../src/routes/login/+page.server.js').actions
|
22 |
-
export type SubmitFunction = Kit.SubmitFunction<Expand<ActionsSuccess<ActionsExport>>, Expand<ActionsFailure<ActionsExport>>>
|
23 |
-
export type ActionData = Expand<Kit.AwaitedActions<ActionsExport>> | null;
|
24 |
-
export type PageServerData = null;
|
25 |
-
export type PageData = Expand<PageParentData>;
|
26 |
-
export type Action<OutputData extends Record<string, any> | void = Record<string, any> | void> = Kit.Action<RouteParams, OutputData, RouteId>
|
27 |
-
export type Actions<OutputData extends Record<string, any> | void = Record<string, any> | void> = Kit.Actions<RouteParams, OutputData, RouteId>
|
28 |
-
export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/GptGo.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from aiohttp import ClientSession
|
4 |
-
import json
|
5 |
-
|
6 |
-
from ..typing import AsyncGenerator
|
7 |
-
from .base_provider import AsyncGeneratorProvider, format_prompt
|
8 |
-
|
9 |
-
|
10 |
-
class GptGo(AsyncGeneratorProvider):
|
11 |
-
url = "https://gptgo.ai"
|
12 |
-
supports_gpt_35_turbo = True
|
13 |
-
working = True
|
14 |
-
|
15 |
-
@classmethod
|
16 |
-
async def create_async_generator(
|
17 |
-
cls,
|
18 |
-
model: str,
|
19 |
-
messages: list[dict[str, str]],
|
20 |
-
proxy: str = None,
|
21 |
-
timeout: int = 30,
|
22 |
-
**kwargs
|
23 |
-
) -> AsyncGenerator:
|
24 |
-
headers = {
|
25 |
-
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
26 |
-
"Accept" : "*/*",
|
27 |
-
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
28 |
-
"Origin" : cls.url,
|
29 |
-
"Referer" : cls.url + "/",
|
30 |
-
"Sec-Fetch-Dest" : "empty",
|
31 |
-
"Sec-Fetch-Mode" : "cors",
|
32 |
-
"Sec-Fetch-Site" : "same-origin",
|
33 |
-
}
|
34 |
-
async with ClientSession(
|
35 |
-
headers=headers
|
36 |
-
) as session:
|
37 |
-
async with session.get(
|
38 |
-
"https://gptgo.ai/action_get_token.php",
|
39 |
-
params={
|
40 |
-
"q": format_prompt(messages),
|
41 |
-
"hlgpt": "default",
|
42 |
-
"hl": "en"
|
43 |
-
},
|
44 |
-
proxy=proxy
|
45 |
-
) as response:
|
46 |
-
response.raise_for_status()
|
47 |
-
token = (await response.json(content_type=None))["token"]
|
48 |
-
|
49 |
-
async with session.get(
|
50 |
-
"https://gptgo.ai/action_ai_gpt.php",
|
51 |
-
params={
|
52 |
-
"token": token,
|
53 |
-
},
|
54 |
-
proxy=proxy
|
55 |
-
) as response:
|
56 |
-
response.raise_for_status()
|
57 |
-
start = "data: "
|
58 |
-
async for line in response.content:
|
59 |
-
line = line.decode()
|
60 |
-
if line.startswith("data: "):
|
61 |
-
if line.startswith("data: [DONE]"):
|
62 |
-
break
|
63 |
-
line = json.loads(line[len(start):-1])
|
64 |
-
content = line["choices"][0]["delta"].get("content")
|
65 |
-
if content:
|
66 |
-
yield content
|
67 |
-
|
68 |
-
|
69 |
-
@classmethod
|
70 |
-
@property
|
71 |
-
def params(cls):
|
72 |
-
params = [
|
73 |
-
("model", "str"),
|
74 |
-
("messages", "list[dict[str, str]]"),
|
75 |
-
("stream", "bool"),
|
76 |
-
("temperature", "float"),
|
77 |
-
]
|
78 |
-
param = ", ".join([": ".join(p) for p in params])
|
79 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/Factory.d.ts
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import BBCodeText from './BBCodeText';
|
2 |
-
|
3 |
-
export default function (
|
4 |
-
x?: number, y?: number,
|
5 |
-
content?: string,
|
6 |
-
style?: BBCodeText.TextStyle
|
7 |
-
): BBCodeText;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/listpanel/ToggleListPanel.js
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
var ToggleListPanel = function () {
|
2 |
-
if (!this.listPanel) {
|
3 |
-
this.openListPanel();
|
4 |
-
} else {
|
5 |
-
this.closeListPanel();
|
6 |
-
}
|
7 |
-
return this;
|
8 |
-
}
|
9 |
-
|
10 |
-
export default ToggleListPanel;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import ScrollablePanel from './ScrollablePanel.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('scrollablePanel', function (config) {
|
6 |
-
var gameObject = new ScrollablePanel(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.ScrollablePanel', ScrollablePanel);
|
12 |
-
|
13 |
-
export default ScrollablePanel;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/OnTouchTrack.js
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
import PositionToPercent from './PositionToPercent.js';
|
2 |
-
|
3 |
-
var OnTouchTrack = function (pointer, localX, localY) {
|
4 |
-
if (!this.enable) {
|
5 |
-
return;
|
6 |
-
}
|
7 |
-
if (!pointer.isDown) {
|
8 |
-
return;
|
9 |
-
}
|
10 |
-
|
11 |
-
tmpPoint.x = pointer.worldX;
|
12 |
-
tmpPoint.y = pointer.worldY;
|
13 |
-
|
14 |
-
var startPoint, endPoint;
|
15 |
-
if (!this.reverseAxis) {
|
16 |
-
startPoint = this.getStartPoint();
|
17 |
-
endPoint = this.getEndPoint();
|
18 |
-
} else {
|
19 |
-
startPoint = this.getEndPoint();
|
20 |
-
endPoint = this.getStartPoint();
|
21 |
-
}
|
22 |
-
var value = PositionToPercent(startPoint, endPoint, tmpPoint);
|
23 |
-
|
24 |
-
this.stopEaseValue();
|
25 |
-
if ((this.easeValueDuration === 0) || (Math.abs(this.value - value) < 0.1)) {
|
26 |
-
this.value = value;
|
27 |
-
} else {
|
28 |
-
this.easeValueTo(value);
|
29 |
-
}
|
30 |
-
}
|
31 |
-
var tmpPoint = {};
|
32 |
-
|
33 |
-
export default OnTouchTrack;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AhmedSSoliman/MarianCG-CoNaLa/README.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MarianCG-CoNaLa
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
---
|
10 |
-
|
11 |
-
# Configuration
|
12 |
-
|
13 |
-
`title`: _string_
|
14 |
-
Display title for the Space
|
15 |
-
|
16 |
-
`emoji`: _string_
|
17 |
-
Space emoji (emoji-only character allowed)
|
18 |
-
|
19 |
-
`colorFrom`: _string_
|
20 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
21 |
-
|
22 |
-
`colorTo`: _string_
|
23 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
24 |
-
|
25 |
-
`sdk`: _string_
|
26 |
-
Can be either `gradio` or `streamlit`
|
27 |
-
|
28 |
-
`sdk_version` : _string_
|
29 |
-
Only applicable for `streamlit` SDK.
|
30 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
31 |
-
|
32 |
-
`app_file`: _string_
|
33 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
34 |
-
Path is relative to the root of the repository.
|
35 |
-
|
36 |
-
`pinned`: _boolean_
|
37 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/__init__.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
from . import autosummary
|
10 |
-
from . import network
|
11 |
-
from . import optimizer
|
12 |
-
from . import tfutil
|
13 |
-
from . import custom_ops
|
14 |
-
|
15 |
-
from .tfutil import *
|
16 |
-
from .network import Network
|
17 |
-
|
18 |
-
from .optimizer import Optimizer
|
19 |
-
|
20 |
-
from .custom_ops import get_plugin
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/bias_act.py
DELETED
@@ -1,226 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
#
|
5 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
6 |
-
# and proprietary rights in and to this software, related documentation
|
7 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
8 |
-
# distribution of this software and related documentation without an express
|
9 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
10 |
-
|
11 |
-
"""Custom PyTorch ops for efficient bias and activation."""
|
12 |
-
|
13 |
-
import os
|
14 |
-
import warnings
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
import dnnlib
|
18 |
-
import traceback
|
19 |
-
|
20 |
-
from .. import custom_ops
|
21 |
-
from .. import misc
|
22 |
-
|
23 |
-
# ----------------------------------------------------------------------------
|
24 |
-
|
25 |
-
activation_funcs = {
|
26 |
-
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
|
27 |
-
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
|
28 |
-
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
|
29 |
-
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
|
30 |
-
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
|
31 |
-
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
|
32 |
-
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
|
33 |
-
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
|
34 |
-
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
|
35 |
-
}
|
36 |
-
|
37 |
-
# ----------------------------------------------------------------------------
|
38 |
-
|
39 |
-
_inited = False
|
40 |
-
_plugin = None
|
41 |
-
_null_tensor = torch.empty([0])
|
42 |
-
|
43 |
-
|
44 |
-
def _init():
|
45 |
-
global _inited, _plugin
|
46 |
-
if not _inited:
|
47 |
-
_inited = True
|
48 |
-
sources = ['bias_act.cpp', 'bias_act.cu']
|
49 |
-
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
|
50 |
-
try:
|
51 |
-
_plugin = custom_ops.get_plugin(
|
52 |
-
'bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
|
53 |
-
except:
|
54 |
-
warnings.warn(
|
55 |
-
'Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
|
56 |
-
return _plugin is not None
|
57 |
-
|
58 |
-
# ----------------------------------------------------------------------------
|
59 |
-
|
60 |
-
|
61 |
-
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
|
62 |
-
r"""Fused bias and activation function.
|
63 |
-
|
64 |
-
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
|
65 |
-
and scales the result by `gain`. Each of the steps is optional. In most cases,
|
66 |
-
the fused op is considerably more efficient than performing the same calculation
|
67 |
-
using standard PyTorch ops. It supports first and second order gradients,
|
68 |
-
but not third order gradients.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
x: Input activation tensor. Can be of any shape.
|
72 |
-
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
|
73 |
-
as `x`. The shape must be known, and it must match the dimension of `x`
|
74 |
-
corresponding to `dim`.
|
75 |
-
dim: The dimension in `x` corresponding to the elements of `b`.
|
76 |
-
The value of `dim` is ignored if `b` is not specified.
|
77 |
-
act: Name of the activation function to evaluate, or `"linear"` to disable.
|
78 |
-
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
|
79 |
-
See `activation_funcs` for a full list. `None` is not allowed.
|
80 |
-
alpha: Shape parameter for the activation function, or `None` to use the default.
|
81 |
-
gain: Scaling factor for the output tensor, or `None` to use default.
|
82 |
-
See `activation_funcs` for the default scaling of each activation function.
|
83 |
-
If unsure, consider specifying 1.
|
84 |
-
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
|
85 |
-
the clamping (default).
|
86 |
-
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
|
87 |
-
|
88 |
-
Returns:
|
89 |
-
Tensor of the same shape and datatype as `x`.
|
90 |
-
"""
|
91 |
-
assert isinstance(x, torch.Tensor)
|
92 |
-
assert impl in ['ref', 'cuda']
|
93 |
-
if impl == 'cuda' and x.device.type == 'cuda' and _init():
|
94 |
-
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
|
95 |
-
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
|
96 |
-
|
97 |
-
# ----------------------------------------------------------------------------
|
98 |
-
|
99 |
-
|
100 |
-
@misc.profiled_function
|
101 |
-
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
|
102 |
-
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
|
103 |
-
"""
|
104 |
-
assert isinstance(x, torch.Tensor)
|
105 |
-
assert clamp is None or clamp >= 0
|
106 |
-
spec = activation_funcs[act]
|
107 |
-
alpha = float(alpha if alpha is not None else spec.def_alpha)
|
108 |
-
gain = float(gain if gain is not None else spec.def_gain)
|
109 |
-
clamp = float(clamp if clamp is not None else -1)
|
110 |
-
|
111 |
-
# Add bias.
|
112 |
-
if b is not None:
|
113 |
-
assert isinstance(b, torch.Tensor) and b.ndim == 1
|
114 |
-
assert 0 <= dim < x.ndim
|
115 |
-
assert b.shape[0] == x.shape[dim]
|
116 |
-
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
|
117 |
-
|
118 |
-
# Evaluate activation function.
|
119 |
-
alpha = float(alpha)
|
120 |
-
x = spec.func(x, alpha=alpha)
|
121 |
-
|
122 |
-
# Scale by gain.
|
123 |
-
gain = float(gain)
|
124 |
-
if gain != 1:
|
125 |
-
x = x * gain
|
126 |
-
|
127 |
-
# Clamp.
|
128 |
-
if clamp >= 0:
|
129 |
-
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
|
130 |
-
return x
|
131 |
-
|
132 |
-
# ----------------------------------------------------------------------------
|
133 |
-
|
134 |
-
|
135 |
-
_bias_act_cuda_cache = dict()
|
136 |
-
|
137 |
-
|
138 |
-
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
|
139 |
-
"""Fast CUDA implementation of `bias_act()` using custom ops.
|
140 |
-
"""
|
141 |
-
# Parse arguments.
|
142 |
-
assert clamp is None or clamp >= 0
|
143 |
-
spec = activation_funcs[act]
|
144 |
-
alpha = float(alpha if alpha is not None else spec.def_alpha)
|
145 |
-
gain = float(gain if gain is not None else spec.def_gain)
|
146 |
-
clamp = float(clamp if clamp is not None else -1)
|
147 |
-
|
148 |
-
# Lookup from cache.
|
149 |
-
key = (dim, act, alpha, gain, clamp)
|
150 |
-
if key in _bias_act_cuda_cache:
|
151 |
-
return _bias_act_cuda_cache[key]
|
152 |
-
|
153 |
-
# Forward op.
|
154 |
-
class BiasActCuda(torch.autograd.Function):
|
155 |
-
@staticmethod
|
156 |
-
def forward(ctx, x, b): # pylint: disable=arguments-differ
|
157 |
-
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[
|
158 |
-
1] == 1 else torch.contiguous_format
|
159 |
-
x = x.contiguous(memory_format=ctx.memory_format)
|
160 |
-
b = b.contiguous() if b is not None else _null_tensor
|
161 |
-
y = x
|
162 |
-
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
|
163 |
-
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor,
|
164 |
-
_null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
|
165 |
-
ctx.save_for_backward(
|
166 |
-
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
|
167 |
-
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
|
168 |
-
y if 'y' in spec.ref else _null_tensor)
|
169 |
-
return y
|
170 |
-
|
171 |
-
@staticmethod
|
172 |
-
def backward(ctx, dy): # pylint: disable=arguments-differ
|
173 |
-
dy = dy.contiguous(memory_format=ctx.memory_format)
|
174 |
-
x, b, y = ctx.saved_tensors
|
175 |
-
dx = None
|
176 |
-
db = None
|
177 |
-
|
178 |
-
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
|
179 |
-
dx = dy
|
180 |
-
if act != 'linear' or gain != 1 or clamp >= 0:
|
181 |
-
dx = BiasActCudaGrad.apply(dy, x, b, y)
|
182 |
-
|
183 |
-
if ctx.needs_input_grad[1]:
|
184 |
-
db = dx.sum([i for i in range(dx.ndim) if i != dim])
|
185 |
-
|
186 |
-
return dx, db
|
187 |
-
|
188 |
-
# Backward op.
|
189 |
-
class BiasActCudaGrad(torch.autograd.Function):
|
190 |
-
@staticmethod
|
191 |
-
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
|
192 |
-
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[
|
193 |
-
1] == 1 else torch.contiguous_format
|
194 |
-
dx = _plugin.bias_act(dy, b, x, y, _null_tensor,
|
195 |
-
1, dim, spec.cuda_idx, alpha, gain, clamp)
|
196 |
-
ctx.save_for_backward(
|
197 |
-
dy if spec.has_2nd_grad else _null_tensor,
|
198 |
-
x, b, y)
|
199 |
-
return dx
|
200 |
-
|
201 |
-
@staticmethod
|
202 |
-
def backward(ctx, d_dx): # pylint: disable=arguments-differ
|
203 |
-
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
|
204 |
-
dy, x, b, y = ctx.saved_tensors
|
205 |
-
d_dy = None
|
206 |
-
d_x = None
|
207 |
-
d_b = None
|
208 |
-
d_y = None
|
209 |
-
|
210 |
-
if ctx.needs_input_grad[0]:
|
211 |
-
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
|
212 |
-
|
213 |
-
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
|
214 |
-
d_x = _plugin.bias_act(
|
215 |
-
d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
|
216 |
-
|
217 |
-
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
|
218 |
-
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
|
219 |
-
|
220 |
-
return d_dy, d_x, d_b, d_y
|
221 |
-
|
222 |
-
# Add to cache.
|
223 |
-
_bias_act_cuda_cache[key] = BiasActCuda
|
224 |
-
return BiasActCuda
|
225 |
-
|
226 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/bias_act.cpp
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
//
|
3 |
-
// NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
// and proprietary rights in and to this software, related documentation
|
5 |
-
// and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
// distribution of this software and related documentation without an express
|
7 |
-
// license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
#include <torch/extension.h>
|
10 |
-
#include <ATen/cuda/CUDAContext.h>
|
11 |
-
#include <c10/cuda/CUDAGuard.h>
|
12 |
-
#include "bias_act.h"
|
13 |
-
|
14 |
-
//------------------------------------------------------------------------
|
15 |
-
|
16 |
-
static bool has_same_layout(torch::Tensor x, torch::Tensor y)
|
17 |
-
{
|
18 |
-
if (x.dim() != y.dim())
|
19 |
-
return false;
|
20 |
-
for (int64_t i = 0; i < x.dim(); i++)
|
21 |
-
{
|
22 |
-
if (x.size(i) != y.size(i))
|
23 |
-
return false;
|
24 |
-
if (x.size(i) >= 2 && x.stride(i) != y.stride(i))
|
25 |
-
return false;
|
26 |
-
}
|
27 |
-
return true;
|
28 |
-
}
|
29 |
-
|
30 |
-
//------------------------------------------------------------------------
|
31 |
-
|
32 |
-
static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp)
|
33 |
-
{
|
34 |
-
// Validate arguments.
|
35 |
-
TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
|
36 |
-
TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x");
|
37 |
-
TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x");
|
38 |
-
TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x");
|
39 |
-
TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x");
|
40 |
-
TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
|
41 |
-
TORCH_CHECK(b.dim() == 1, "b must have rank 1");
|
42 |
-
TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds");
|
43 |
-
TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements");
|
44 |
-
TORCH_CHECK(grad >= 0, "grad must be non-negative");
|
45 |
-
|
46 |
-
// Validate layout.
|
47 |
-
TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense");
|
48 |
-
TORCH_CHECK(b.is_contiguous(), "b must be contiguous");
|
49 |
-
TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x");
|
50 |
-
TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x");
|
51 |
-
TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x");
|
52 |
-
|
53 |
-
// Create output tensor.
|
54 |
-
const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
|
55 |
-
torch::Tensor y = torch::empty_like(x);
|
56 |
-
TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x");
|
57 |
-
|
58 |
-
// Initialize CUDA kernel parameters.
|
59 |
-
bias_act_kernel_params p;
|
60 |
-
p.x = x.data_ptr();
|
61 |
-
p.b = (b.numel()) ? b.data_ptr() : NULL;
|
62 |
-
p.xref = (xref.numel()) ? xref.data_ptr() : NULL;
|
63 |
-
p.yref = (yref.numel()) ? yref.data_ptr() : NULL;
|
64 |
-
p.dy = (dy.numel()) ? dy.data_ptr() : NULL;
|
65 |
-
p.y = y.data_ptr();
|
66 |
-
p.grad = grad;
|
67 |
-
p.act = act;
|
68 |
-
p.alpha = alpha;
|
69 |
-
p.gain = gain;
|
70 |
-
p.clamp = clamp;
|
71 |
-
p.sizeX = (int)x.numel();
|
72 |
-
p.sizeB = (int)b.numel();
|
73 |
-
p.stepB = (b.numel()) ? (int)x.stride(dim) : 1;
|
74 |
-
|
75 |
-
// Choose CUDA kernel.
|
76 |
-
void* kernel;
|
77 |
-
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
|
78 |
-
{
|
79 |
-
kernel = choose_bias_act_kernel<scalar_t>(p);
|
80 |
-
});
|
81 |
-
TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func");
|
82 |
-
|
83 |
-
// Launch CUDA kernel.
|
84 |
-
p.loopX = 4;
|
85 |
-
int blockSize = 4 * 32;
|
86 |
-
int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
|
87 |
-
void* args[] = {&p};
|
88 |
-
AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
|
89 |
-
return y;
|
90 |
-
}
|
91 |
-
|
92 |
-
//------------------------------------------------------------------------
|
93 |
-
|
94 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
95 |
-
{
|
96 |
-
m.def("bias_act", &bias_act);
|
97 |
-
}
|
98 |
-
|
99 |
-
//------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/fma.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
|
10 |
-
|
11 |
-
import torch
|
12 |
-
|
13 |
-
# ----------------------------------------------------------------------------
|
14 |
-
|
15 |
-
|
16 |
-
def fma(a, b, c): # => a * b + c
|
17 |
-
return _FusedMultiplyAdd.apply(a, b, c)
|
18 |
-
|
19 |
-
# ----------------------------------------------------------------------------
|
20 |
-
|
21 |
-
|
22 |
-
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
|
23 |
-
@staticmethod
|
24 |
-
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
|
25 |
-
out = torch.addcmul(c, a, b)
|
26 |
-
ctx.save_for_backward(a, b)
|
27 |
-
ctx.c_shape = c.shape
|
28 |
-
return out
|
29 |
-
|
30 |
-
@staticmethod
|
31 |
-
def backward(ctx, dout): # pylint: disable=arguments-differ
|
32 |
-
a, b = ctx.saved_tensors
|
33 |
-
c_shape = ctx.c_shape
|
34 |
-
da = None
|
35 |
-
db = None
|
36 |
-
dc = None
|
37 |
-
|
38 |
-
if ctx.needs_input_grad[0]:
|
39 |
-
da = _unbroadcast(dout * b, a.shape)
|
40 |
-
|
41 |
-
if ctx.needs_input_grad[1]:
|
42 |
-
db = _unbroadcast(dout * a, b.shape)
|
43 |
-
|
44 |
-
if ctx.needs_input_grad[2]:
|
45 |
-
dc = _unbroadcast(dout, c_shape)
|
46 |
-
|
47 |
-
return da, db, dc
|
48 |
-
|
49 |
-
# ----------------------------------------------------------------------------
|
50 |
-
|
51 |
-
|
52 |
-
def _unbroadcast(x, shape):
|
53 |
-
extra_dims = x.ndim - len(shape)
|
54 |
-
assert extra_dims >= 0
|
55 |
-
dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (
|
56 |
-
i < extra_dims or shape[i - extra_dims] == 1)]
|
57 |
-
if len(dim):
|
58 |
-
x = x.sum(dim=dim, keepdim=True)
|
59 |
-
if extra_dims:
|
60 |
-
x = x.reshape(-1, *x.shape[extra_dims+1:])
|
61 |
-
assert x.shape == shape
|
62 |
-
return x
|
63 |
-
|
64 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py
DELETED
@@ -1,537 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import warnings
|
16 |
-
from functools import partial
|
17 |
-
from typing import Dict, List, Optional, Union
|
18 |
-
|
19 |
-
import jax
|
20 |
-
import jax.numpy as jnp
|
21 |
-
import numpy as np
|
22 |
-
from flax.core.frozen_dict import FrozenDict
|
23 |
-
from flax.jax_utils import unreplicate
|
24 |
-
from flax.training.common_utils import shard
|
25 |
-
from PIL import Image
|
26 |
-
from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel
|
27 |
-
|
28 |
-
from ...models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel
|
29 |
-
from ...schedulers import (
|
30 |
-
FlaxDDIMScheduler,
|
31 |
-
FlaxDPMSolverMultistepScheduler,
|
32 |
-
FlaxLMSDiscreteScheduler,
|
33 |
-
FlaxPNDMScheduler,
|
34 |
-
)
|
35 |
-
from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring
|
36 |
-
from ..pipeline_flax_utils import FlaxDiffusionPipeline
|
37 |
-
from ..stable_diffusion import FlaxStableDiffusionPipelineOutput
|
38 |
-
from ..stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker
|
39 |
-
|
40 |
-
|
41 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
42 |
-
|
43 |
-
# Set to True to use python for loop instead of jax.fori_loop for easier debugging
|
44 |
-
DEBUG = False
|
45 |
-
|
46 |
-
EXAMPLE_DOC_STRING = """
|
47 |
-
Examples:
|
48 |
-
```py
|
49 |
-
>>> import jax
|
50 |
-
>>> import numpy as np
|
51 |
-
>>> import jax.numpy as jnp
|
52 |
-
>>> from flax.jax_utils import replicate
|
53 |
-
>>> from flax.training.common_utils import shard
|
54 |
-
>>> from diffusers.utils import load_image
|
55 |
-
>>> from PIL import Image
|
56 |
-
>>> from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel
|
57 |
-
|
58 |
-
|
59 |
-
>>> def image_grid(imgs, rows, cols):
|
60 |
-
... w, h = imgs[0].size
|
61 |
-
... grid = Image.new("RGB", size=(cols * w, rows * h))
|
62 |
-
... for i, img in enumerate(imgs):
|
63 |
-
... grid.paste(img, box=(i % cols * w, i // cols * h))
|
64 |
-
... return grid
|
65 |
-
|
66 |
-
|
67 |
-
>>> def create_key(seed=0):
|
68 |
-
... return jax.random.PRNGKey(seed)
|
69 |
-
|
70 |
-
|
71 |
-
>>> rng = create_key(0)
|
72 |
-
|
73 |
-
>>> # get canny image
|
74 |
-
>>> canny_image = load_image(
|
75 |
-
... "https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg"
|
76 |
-
... )
|
77 |
-
|
78 |
-
>>> prompts = "best quality, extremely detailed"
|
79 |
-
>>> negative_prompts = "monochrome, lowres, bad anatomy, worst quality, low quality"
|
80 |
-
|
81 |
-
>>> # load control net and stable diffusion v1-5
|
82 |
-
>>> controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
|
83 |
-
... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32
|
84 |
-
... )
|
85 |
-
>>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
|
86 |
-
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32
|
87 |
-
... )
|
88 |
-
>>> params["controlnet"] = controlnet_params
|
89 |
-
|
90 |
-
>>> num_samples = jax.device_count()
|
91 |
-
>>> rng = jax.random.split(rng, jax.device_count())
|
92 |
-
|
93 |
-
>>> prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
|
94 |
-
>>> negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples)
|
95 |
-
>>> processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
|
96 |
-
|
97 |
-
>>> p_params = replicate(params)
|
98 |
-
>>> prompt_ids = shard(prompt_ids)
|
99 |
-
>>> negative_prompt_ids = shard(negative_prompt_ids)
|
100 |
-
>>> processed_image = shard(processed_image)
|
101 |
-
|
102 |
-
>>> output = pipe(
|
103 |
-
... prompt_ids=prompt_ids,
|
104 |
-
... image=processed_image,
|
105 |
-
... params=p_params,
|
106 |
-
... prng_seed=rng,
|
107 |
-
... num_inference_steps=50,
|
108 |
-
... neg_prompt_ids=negative_prompt_ids,
|
109 |
-
... jit=True,
|
110 |
-
... ).images
|
111 |
-
|
112 |
-
>>> output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
|
113 |
-
>>> output_images = image_grid(output_images, num_samples // 4, 4)
|
114 |
-
>>> output_images.save("generated_image.png")
|
115 |
-
```
|
116 |
-
"""
|
117 |
-
|
118 |
-
|
119 |
-
class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline):
|
120 |
-
r"""
|
121 |
-
Pipeline for text-to-image generation using Stable Diffusion with ControlNet Guidance.
|
122 |
-
|
123 |
-
This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
124 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
125 |
-
|
126 |
-
Args:
|
127 |
-
vae ([`FlaxAutoencoderKL`]):
|
128 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
129 |
-
text_encoder ([`FlaxCLIPTextModel`]):
|
130 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
131 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.FlaxCLIPTextModel),
|
132 |
-
specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
133 |
-
tokenizer (`CLIPTokenizer`):
|
134 |
-
Tokenizer of class
|
135 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
136 |
-
unet ([`FlaxUNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
137 |
-
controlnet ([`FlaxControlNetModel`]:
|
138 |
-
Provides additional conditioning to the unet during the denoising process.
|
139 |
-
scheduler ([`SchedulerMixin`]):
|
140 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
141 |
-
[`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or
|
142 |
-
[`FlaxDPMSolverMultistepScheduler`].
|
143 |
-
safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
|
144 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
145 |
-
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
146 |
-
feature_extractor ([`CLIPFeatureExtractor`]):
|
147 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
148 |
-
"""
|
149 |
-
|
150 |
-
def __init__(
|
151 |
-
self,
|
152 |
-
vae: FlaxAutoencoderKL,
|
153 |
-
text_encoder: FlaxCLIPTextModel,
|
154 |
-
tokenizer: CLIPTokenizer,
|
155 |
-
unet: FlaxUNet2DConditionModel,
|
156 |
-
controlnet: FlaxControlNetModel,
|
157 |
-
scheduler: Union[
|
158 |
-
FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
|
159 |
-
],
|
160 |
-
safety_checker: FlaxStableDiffusionSafetyChecker,
|
161 |
-
feature_extractor: CLIPFeatureExtractor,
|
162 |
-
dtype: jnp.dtype = jnp.float32,
|
163 |
-
):
|
164 |
-
super().__init__()
|
165 |
-
self.dtype = dtype
|
166 |
-
|
167 |
-
if safety_checker is None:
|
168 |
-
logger.warn(
|
169 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
170 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
171 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
172 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
173 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
174 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
175 |
-
)
|
176 |
-
|
177 |
-
self.register_modules(
|
178 |
-
vae=vae,
|
179 |
-
text_encoder=text_encoder,
|
180 |
-
tokenizer=tokenizer,
|
181 |
-
unet=unet,
|
182 |
-
controlnet=controlnet,
|
183 |
-
scheduler=scheduler,
|
184 |
-
safety_checker=safety_checker,
|
185 |
-
feature_extractor=feature_extractor,
|
186 |
-
)
|
187 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
188 |
-
|
189 |
-
def prepare_text_inputs(self, prompt: Union[str, List[str]]):
|
190 |
-
if not isinstance(prompt, (str, list)):
|
191 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
192 |
-
|
193 |
-
text_input = self.tokenizer(
|
194 |
-
prompt,
|
195 |
-
padding="max_length",
|
196 |
-
max_length=self.tokenizer.model_max_length,
|
197 |
-
truncation=True,
|
198 |
-
return_tensors="np",
|
199 |
-
)
|
200 |
-
|
201 |
-
return text_input.input_ids
|
202 |
-
|
203 |
-
def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]):
|
204 |
-
if not isinstance(image, (Image.Image, list)):
|
205 |
-
raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}")
|
206 |
-
|
207 |
-
if isinstance(image, Image.Image):
|
208 |
-
image = [image]
|
209 |
-
|
210 |
-
processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image])
|
211 |
-
|
212 |
-
return processed_images
|
213 |
-
|
214 |
-
def _get_has_nsfw_concepts(self, features, params):
|
215 |
-
has_nsfw_concepts = self.safety_checker(features, params)
|
216 |
-
return has_nsfw_concepts
|
217 |
-
|
218 |
-
def _run_safety_checker(self, images, safety_model_params, jit=False):
|
219 |
-
# safety_model_params should already be replicated when jit is True
|
220 |
-
pil_images = [Image.fromarray(image) for image in images]
|
221 |
-
features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
|
222 |
-
|
223 |
-
if jit:
|
224 |
-
features = shard(features)
|
225 |
-
has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
|
226 |
-
has_nsfw_concepts = unshard(has_nsfw_concepts)
|
227 |
-
safety_model_params = unreplicate(safety_model_params)
|
228 |
-
else:
|
229 |
-
has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
|
230 |
-
|
231 |
-
images_was_copied = False
|
232 |
-
for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
|
233 |
-
if has_nsfw_concept:
|
234 |
-
if not images_was_copied:
|
235 |
-
images_was_copied = True
|
236 |
-
images = images.copy()
|
237 |
-
|
238 |
-
images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image
|
239 |
-
|
240 |
-
if any(has_nsfw_concepts):
|
241 |
-
warnings.warn(
|
242 |
-
"Potential NSFW content was detected in one or more images. A black image will be returned"
|
243 |
-
" instead. Try again with a different prompt and/or seed."
|
244 |
-
)
|
245 |
-
|
246 |
-
return images, has_nsfw_concepts
|
247 |
-
|
248 |
-
def _generate(
|
249 |
-
self,
|
250 |
-
prompt_ids: jnp.array,
|
251 |
-
image: jnp.array,
|
252 |
-
params: Union[Dict, FrozenDict],
|
253 |
-
prng_seed: jax.random.KeyArray,
|
254 |
-
num_inference_steps: int,
|
255 |
-
guidance_scale: float,
|
256 |
-
latents: Optional[jnp.array] = None,
|
257 |
-
neg_prompt_ids: Optional[jnp.array] = None,
|
258 |
-
controlnet_conditioning_scale: float = 1.0,
|
259 |
-
):
|
260 |
-
height, width = image.shape[-2:]
|
261 |
-
if height % 64 != 0 or width % 64 != 0:
|
262 |
-
raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.")
|
263 |
-
|
264 |
-
# get prompt text embeddings
|
265 |
-
prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
|
266 |
-
|
267 |
-
# TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
|
268 |
-
# implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
|
269 |
-
batch_size = prompt_ids.shape[0]
|
270 |
-
|
271 |
-
max_length = prompt_ids.shape[-1]
|
272 |
-
|
273 |
-
if neg_prompt_ids is None:
|
274 |
-
uncond_input = self.tokenizer(
|
275 |
-
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
|
276 |
-
).input_ids
|
277 |
-
else:
|
278 |
-
uncond_input = neg_prompt_ids
|
279 |
-
negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
|
280 |
-
context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
|
281 |
-
|
282 |
-
image = jnp.concatenate([image] * 2)
|
283 |
-
|
284 |
-
latents_shape = (
|
285 |
-
batch_size,
|
286 |
-
self.unet.config.in_channels,
|
287 |
-
height // self.vae_scale_factor,
|
288 |
-
width // self.vae_scale_factor,
|
289 |
-
)
|
290 |
-
if latents is None:
|
291 |
-
latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32)
|
292 |
-
else:
|
293 |
-
if latents.shape != latents_shape:
|
294 |
-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
295 |
-
|
296 |
-
def loop_body(step, args):
|
297 |
-
latents, scheduler_state = args
|
298 |
-
# For classifier free guidance, we need to do two forward passes.
|
299 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
300 |
-
# to avoid doing two forward passes
|
301 |
-
latents_input = jnp.concatenate([latents] * 2)
|
302 |
-
|
303 |
-
t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
|
304 |
-
timestep = jnp.broadcast_to(t, latents_input.shape[0])
|
305 |
-
|
306 |
-
latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t)
|
307 |
-
|
308 |
-
down_block_res_samples, mid_block_res_sample = self.controlnet.apply(
|
309 |
-
{"params": params["controlnet"]},
|
310 |
-
jnp.array(latents_input),
|
311 |
-
jnp.array(timestep, dtype=jnp.int32),
|
312 |
-
encoder_hidden_states=context,
|
313 |
-
controlnet_cond=image,
|
314 |
-
conditioning_scale=controlnet_conditioning_scale,
|
315 |
-
return_dict=False,
|
316 |
-
)
|
317 |
-
|
318 |
-
# predict the noise residual
|
319 |
-
noise_pred = self.unet.apply(
|
320 |
-
{"params": params["unet"]},
|
321 |
-
jnp.array(latents_input),
|
322 |
-
jnp.array(timestep, dtype=jnp.int32),
|
323 |
-
encoder_hidden_states=context,
|
324 |
-
down_block_additional_residuals=down_block_res_samples,
|
325 |
-
mid_block_additional_residual=mid_block_res_sample,
|
326 |
-
).sample
|
327 |
-
|
328 |
-
# perform guidance
|
329 |
-
noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0)
|
330 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
|
331 |
-
|
332 |
-
# compute the previous noisy sample x_t -> x_t-1
|
333 |
-
latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
|
334 |
-
return latents, scheduler_state
|
335 |
-
|
336 |
-
scheduler_state = self.scheduler.set_timesteps(
|
337 |
-
params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape
|
338 |
-
)
|
339 |
-
|
340 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
341 |
-
latents = latents * params["scheduler"].init_noise_sigma
|
342 |
-
|
343 |
-
if DEBUG:
|
344 |
-
# run with python for loop
|
345 |
-
for i in range(num_inference_steps):
|
346 |
-
latents, scheduler_state = loop_body(i, (latents, scheduler_state))
|
347 |
-
else:
|
348 |
-
latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state))
|
349 |
-
|
350 |
-
# scale and decode the image latents with vae
|
351 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
352 |
-
image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
|
353 |
-
|
354 |
-
image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
|
355 |
-
return image
|
356 |
-
|
357 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
358 |
-
def __call__(
|
359 |
-
self,
|
360 |
-
prompt_ids: jnp.array,
|
361 |
-
image: jnp.array,
|
362 |
-
params: Union[Dict, FrozenDict],
|
363 |
-
prng_seed: jax.random.KeyArray,
|
364 |
-
num_inference_steps: int = 50,
|
365 |
-
guidance_scale: Union[float, jnp.array] = 7.5,
|
366 |
-
latents: jnp.array = None,
|
367 |
-
neg_prompt_ids: jnp.array = None,
|
368 |
-
controlnet_conditioning_scale: Union[float, jnp.array] = 1.0,
|
369 |
-
return_dict: bool = True,
|
370 |
-
jit: bool = False,
|
371 |
-
):
|
372 |
-
r"""
|
373 |
-
Function invoked when calling the pipeline for generation.
|
374 |
-
|
375 |
-
Args:
|
376 |
-
prompt_ids (`jnp.array`):
|
377 |
-
The prompt or prompts to guide the image generation.
|
378 |
-
image (`jnp.array`):
|
379 |
-
Array representing the ControlNet input condition. ControlNet use this input condition to generate
|
380 |
-
guidance to Unet.
|
381 |
-
params (`Dict` or `FrozenDict`): Dictionary containing the model parameters/weights
|
382 |
-
prng_seed (`jax.random.KeyArray` or `jax.Array`): Array containing random number generator key
|
383 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
384 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
385 |
-
expense of slower inference.
|
386 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
387 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
388 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
389 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
390 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
391 |
-
usually at the expense of lower image quality.
|
392 |
-
latents (`jnp.array`, *optional*):
|
393 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
394 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
395 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
396 |
-
controlnet_conditioning_scale (`float` or `jnp.array`, *optional*, defaults to 1.0):
|
397 |
-
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
398 |
-
to the residual in the original unet.
|
399 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
400 |
-
Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of
|
401 |
-
a plain tuple.
|
402 |
-
jit (`bool`, defaults to `False`):
|
403 |
-
Whether to run `pmap` versions of the generation and safety scoring functions. NOTE: This argument
|
404 |
-
exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a future release.
|
405 |
-
|
406 |
-
Examples:
|
407 |
-
|
408 |
-
Returns:
|
409 |
-
[`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`:
|
410 |
-
[`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a
|
411 |
-
`tuple. When returning a tuple, the first element is a list with the generated images, and the second
|
412 |
-
element is a list of `bool`s denoting whether the corresponding generated image likely represents
|
413 |
-
"not-safe-for-work" (nsfw) content, according to the `safety_checker`.
|
414 |
-
"""
|
415 |
-
|
416 |
-
height, width = image.shape[-2:]
|
417 |
-
|
418 |
-
if isinstance(guidance_scale, float):
|
419 |
-
# Convert to a tensor so each device gets a copy. Follow the prompt_ids for
|
420 |
-
# shape information, as they may be sharded (when `jit` is `True`), or not.
|
421 |
-
guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0])
|
422 |
-
if len(prompt_ids.shape) > 2:
|
423 |
-
# Assume sharded
|
424 |
-
guidance_scale = guidance_scale[:, None]
|
425 |
-
|
426 |
-
if isinstance(controlnet_conditioning_scale, float):
|
427 |
-
# Convert to a tensor so each device gets a copy. Follow the prompt_ids for
|
428 |
-
# shape information, as they may be sharded (when `jit` is `True`), or not.
|
429 |
-
controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0])
|
430 |
-
if len(prompt_ids.shape) > 2:
|
431 |
-
# Assume sharded
|
432 |
-
controlnet_conditioning_scale = controlnet_conditioning_scale[:, None]
|
433 |
-
|
434 |
-
if jit:
|
435 |
-
images = _p_generate(
|
436 |
-
self,
|
437 |
-
prompt_ids,
|
438 |
-
image,
|
439 |
-
params,
|
440 |
-
prng_seed,
|
441 |
-
num_inference_steps,
|
442 |
-
guidance_scale,
|
443 |
-
latents,
|
444 |
-
neg_prompt_ids,
|
445 |
-
controlnet_conditioning_scale,
|
446 |
-
)
|
447 |
-
else:
|
448 |
-
images = self._generate(
|
449 |
-
prompt_ids,
|
450 |
-
image,
|
451 |
-
params,
|
452 |
-
prng_seed,
|
453 |
-
num_inference_steps,
|
454 |
-
guidance_scale,
|
455 |
-
latents,
|
456 |
-
neg_prompt_ids,
|
457 |
-
controlnet_conditioning_scale,
|
458 |
-
)
|
459 |
-
|
460 |
-
if self.safety_checker is not None:
|
461 |
-
safety_params = params["safety_checker"]
|
462 |
-
images_uint8_casted = (images * 255).round().astype("uint8")
|
463 |
-
num_devices, batch_size = images.shape[:2]
|
464 |
-
|
465 |
-
images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
|
466 |
-
images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
|
467 |
-
images = np.array(images)
|
468 |
-
|
469 |
-
# block images
|
470 |
-
if any(has_nsfw_concept):
|
471 |
-
for i, is_nsfw in enumerate(has_nsfw_concept):
|
472 |
-
if is_nsfw:
|
473 |
-
images[i] = np.asarray(images_uint8_casted[i])
|
474 |
-
|
475 |
-
images = images.reshape(num_devices, batch_size, height, width, 3)
|
476 |
-
else:
|
477 |
-
images = np.asarray(images)
|
478 |
-
has_nsfw_concept = False
|
479 |
-
|
480 |
-
if not return_dict:
|
481 |
-
return (images, has_nsfw_concept)
|
482 |
-
|
483 |
-
return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
|
484 |
-
|
485 |
-
|
486 |
-
# Static argnums are pipe, num_inference_steps. A change would trigger recompilation.
|
487 |
-
# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`).
|
488 |
-
@partial(
|
489 |
-
jax.pmap,
|
490 |
-
in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0),
|
491 |
-
static_broadcasted_argnums=(0, 5),
|
492 |
-
)
|
493 |
-
def _p_generate(
|
494 |
-
pipe,
|
495 |
-
prompt_ids,
|
496 |
-
image,
|
497 |
-
params,
|
498 |
-
prng_seed,
|
499 |
-
num_inference_steps,
|
500 |
-
guidance_scale,
|
501 |
-
latents,
|
502 |
-
neg_prompt_ids,
|
503 |
-
controlnet_conditioning_scale,
|
504 |
-
):
|
505 |
-
return pipe._generate(
|
506 |
-
prompt_ids,
|
507 |
-
image,
|
508 |
-
params,
|
509 |
-
prng_seed,
|
510 |
-
num_inference_steps,
|
511 |
-
guidance_scale,
|
512 |
-
latents,
|
513 |
-
neg_prompt_ids,
|
514 |
-
controlnet_conditioning_scale,
|
515 |
-
)
|
516 |
-
|
517 |
-
|
518 |
-
@partial(jax.pmap, static_broadcasted_argnums=(0,))
|
519 |
-
def _p_get_has_nsfw_concepts(pipe, features, params):
|
520 |
-
return pipe._get_has_nsfw_concepts(features, params)
|
521 |
-
|
522 |
-
|
523 |
-
def unshard(x: jnp.ndarray):
|
524 |
-
# einops.rearrange(x, 'd b ... -> (d b) ...')
|
525 |
-
num_devices, batch_size = x.shape[:2]
|
526 |
-
rest = x.shape[2:]
|
527 |
-
return x.reshape(num_devices * batch_size, *rest)
|
528 |
-
|
529 |
-
|
530 |
-
def preprocess(image, dtype):
|
531 |
-
image = image.convert("RGB")
|
532 |
-
w, h = image.size
|
533 |
-
w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64
|
534 |
-
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
535 |
-
image = jnp.array(image).astype(dtype) / 255.0
|
536 |
-
image = image[None].transpose(0, 3, 1, 2)
|
537 |
-
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/utils/gaussian_target.py
DELETED
@@ -1,185 +0,0 @@
|
|
1 |
-
from math import sqrt
|
2 |
-
|
3 |
-
import torch
|
4 |
-
|
5 |
-
|
6 |
-
def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):
|
7 |
-
"""Generate 2D gaussian kernel.
|
8 |
-
|
9 |
-
Args:
|
10 |
-
radius (int): Radius of gaussian kernel.
|
11 |
-
sigma (int): Sigma of gaussian function. Default: 1.
|
12 |
-
dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.
|
13 |
-
device (str): Device of gaussian tensor. Default: 'cpu'.
|
14 |
-
|
15 |
-
Returns:
|
16 |
-
h (Tensor): Gaussian kernel with a
|
17 |
-
``(2 * radius + 1) * (2 * radius + 1)`` shape.
|
18 |
-
"""
|
19 |
-
x = torch.arange(
|
20 |
-
-radius, radius + 1, dtype=dtype, device=device).view(1, -1)
|
21 |
-
y = torch.arange(
|
22 |
-
-radius, radius + 1, dtype=dtype, device=device).view(-1, 1)
|
23 |
-
|
24 |
-
h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()
|
25 |
-
|
26 |
-
h[h < torch.finfo(h.dtype).eps * h.max()] = 0
|
27 |
-
return h
|
28 |
-
|
29 |
-
|
30 |
-
def gen_gaussian_target(heatmap, center, radius, k=1):
|
31 |
-
"""Generate 2D gaussian heatmap.
|
32 |
-
|
33 |
-
Args:
|
34 |
-
heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
|
35 |
-
it and maintain the max value.
|
36 |
-
center (list[int]): Coord of gaussian kernel's center.
|
37 |
-
radius (int): Radius of gaussian kernel.
|
38 |
-
k (int): Coefficient of gaussian kernel. Default: 1.
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
|
42 |
-
"""
|
43 |
-
diameter = 2 * radius + 1
|
44 |
-
gaussian_kernel = gaussian2D(
|
45 |
-
radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device)
|
46 |
-
|
47 |
-
x, y = center
|
48 |
-
|
49 |
-
height, width = heatmap.shape[:2]
|
50 |
-
|
51 |
-
left, right = min(x, radius), min(width - x, radius + 1)
|
52 |
-
top, bottom = min(y, radius), min(height - y, radius + 1)
|
53 |
-
|
54 |
-
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
|
55 |
-
masked_gaussian = gaussian_kernel[radius - top:radius + bottom,
|
56 |
-
radius - left:radius + right]
|
57 |
-
out_heatmap = heatmap
|
58 |
-
torch.max(
|
59 |
-
masked_heatmap,
|
60 |
-
masked_gaussian * k,
|
61 |
-
out=out_heatmap[y - top:y + bottom, x - left:x + right])
|
62 |
-
|
63 |
-
return out_heatmap
|
64 |
-
|
65 |
-
|
66 |
-
def gaussian_radius(det_size, min_overlap):
|
67 |
-
r"""Generate 2D gaussian radius.
|
68 |
-
|
69 |
-
This function is modified from the `official github repo
|
70 |
-
<https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/
|
71 |
-
utils.py#L65>`_.
|
72 |
-
|
73 |
-
Given ``min_overlap``, radius could computed by a quadratic equation
|
74 |
-
according to Vieta's formulas.
|
75 |
-
|
76 |
-
There are 3 cases for computing gaussian radius, details are following:
|
77 |
-
|
78 |
-
- Explanation of figure: ``lt`` and ``br`` indicates the left-top and
|
79 |
-
bottom-right corner of ground truth box. ``x`` indicates the
|
80 |
-
generated corner at the limited position when ``radius=r``.
|
81 |
-
|
82 |
-
- Case1: one corner is inside the gt box and the other is outside.
|
83 |
-
|
84 |
-
.. code:: text
|
85 |
-
|
86 |
-
|< width >|
|
87 |
-
|
88 |
-
lt-+----------+ -
|
89 |
-
| | | ^
|
90 |
-
+--x----------+--+
|
91 |
-
| | | |
|
92 |
-
| | | | height
|
93 |
-
| | overlap | |
|
94 |
-
| | | |
|
95 |
-
| | | | v
|
96 |
-
+--+---------br--+ -
|
97 |
-
| | |
|
98 |
-
+----------+--x
|
99 |
-
|
100 |
-
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
|
101 |
-
|
102 |
-
.. math::
|
103 |
-
\cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad
|
104 |
-
{r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\
|
105 |
-
{a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h}
|
106 |
-
{r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
|
107 |
-
|
108 |
-
- Case2: both two corners are inside the gt box.
|
109 |
-
|
110 |
-
.. code:: text
|
111 |
-
|
112 |
-
|< width >|
|
113 |
-
|
114 |
-
lt-+----------+ -
|
115 |
-
| | | ^
|
116 |
-
+--x-------+ |
|
117 |
-
| | | |
|
118 |
-
| |overlap| | height
|
119 |
-
| | | |
|
120 |
-
| +-------x--+
|
121 |
-
| | | v
|
122 |
-
+----------+-br -
|
123 |
-
|
124 |
-
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
|
125 |
-
|
126 |
-
.. math::
|
127 |
-
\cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad
|
128 |
-
{4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\
|
129 |
-
{a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h}
|
130 |
-
{r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
|
131 |
-
|
132 |
-
- Case3: both two corners are outside the gt box.
|
133 |
-
|
134 |
-
.. code:: text
|
135 |
-
|
136 |
-
|< width >|
|
137 |
-
|
138 |
-
x--+----------------+
|
139 |
-
| | |
|
140 |
-
+-lt-------------+ | -
|
141 |
-
| | | | ^
|
142 |
-
| | | |
|
143 |
-
| | overlap | | height
|
144 |
-
| | | |
|
145 |
-
| | | | v
|
146 |
-
| +------------br--+ -
|
147 |
-
| | |
|
148 |
-
+----------------+--x
|
149 |
-
|
150 |
-
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
|
151 |
-
|
152 |
-
.. math::
|
153 |
-
\cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad
|
154 |
-
{4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\
|
155 |
-
{a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\
|
156 |
-
{r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a}
|
157 |
-
|
158 |
-
Args:
|
159 |
-
det_size (list[int]): Shape of object.
|
160 |
-
min_overlap (float): Min IoU with ground truth for boxes generated by
|
161 |
-
keypoints inside the gaussian kernel.
|
162 |
-
|
163 |
-
Returns:
|
164 |
-
radius (int): Radius of gaussian kernel.
|
165 |
-
"""
|
166 |
-
height, width = det_size
|
167 |
-
|
168 |
-
a1 = 1
|
169 |
-
b1 = (height + width)
|
170 |
-
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
|
171 |
-
sq1 = sqrt(b1**2 - 4 * a1 * c1)
|
172 |
-
r1 = (b1 - sq1) / (2 * a1)
|
173 |
-
|
174 |
-
a2 = 4
|
175 |
-
b2 = 2 * (height + width)
|
176 |
-
c2 = (1 - min_overlap) * width * height
|
177 |
-
sq2 = sqrt(b2**2 - 4 * a2 * c2)
|
178 |
-
r2 = (b2 - sq2) / (2 * a2)
|
179 |
-
|
180 |
-
a3 = 4 * min_overlap
|
181 |
-
b3 = -2 * min_overlap * (height + width)
|
182 |
-
c3 = (min_overlap - 1) * width * height
|
183 |
-
sq3 = sqrt(b3**2 - 4 * a3 * c3)
|
184 |
-
r3 = (b3 + sq3) / (2 * a3)
|
185 |
-
return min(r1, r2, r3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/pascal_context.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
import os.path as osp
|
2 |
-
|
3 |
-
from .builder import DATASETS
|
4 |
-
from .custom import CustomDataset
|
5 |
-
|
6 |
-
|
7 |
-
@DATASETS.register_module()
|
8 |
-
class PascalContextDataset(CustomDataset):
|
9 |
-
"""PascalContext dataset.
|
10 |
-
|
11 |
-
In segmentation map annotation for PascalContext, 0 stands for background,
|
12 |
-
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
13 |
-
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
14 |
-
fixed to '.png'.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
split (str): Split txt file for PascalContext.
|
18 |
-
"""
|
19 |
-
|
20 |
-
CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench',
|
21 |
-
'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus',
|
22 |
-
'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
|
23 |
-
'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
|
24 |
-
'floor', 'flower', 'food', 'grass', 'ground', 'horse',
|
25 |
-
'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person',
|
26 |
-
'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep',
|
27 |
-
'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table',
|
28 |
-
'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water',
|
29 |
-
'window', 'wood')
|
30 |
-
|
31 |
-
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
32 |
-
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
33 |
-
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
34 |
-
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
35 |
-
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
36 |
-
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
37 |
-
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
38 |
-
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
39 |
-
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
40 |
-
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
41 |
-
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
42 |
-
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
43 |
-
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
44 |
-
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
45 |
-
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
|
46 |
-
|
47 |
-
def __init__(self, split, **kwargs):
|
48 |
-
super(PascalContextDataset, self).__init__(
|
49 |
-
img_suffix='.jpg',
|
50 |
-
seg_map_suffix='.png',
|
51 |
-
split=split,
|
52 |
-
reduce_zero_label=False,
|
53 |
-
**kwargs)
|
54 |
-
assert osp.exists(self.img_dir) and self.split is not None
|
55 |
-
|
56 |
-
|
57 |
-
@DATASETS.register_module()
|
58 |
-
class PascalContextDataset59(CustomDataset):
|
59 |
-
"""PascalContext dataset.
|
60 |
-
|
61 |
-
In segmentation map annotation for PascalContext, 0 stands for background,
|
62 |
-
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
63 |
-
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
64 |
-
fixed to '.png'.
|
65 |
-
|
66 |
-
Args:
|
67 |
-
split (str): Split txt file for PascalContext.
|
68 |
-
"""
|
69 |
-
|
70 |
-
CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
|
71 |
-
'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet',
|
72 |
-
'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow',
|
73 |
-
'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower',
|
74 |
-
'food', 'grass', 'ground', 'horse', 'keyboard', 'light',
|
75 |
-
'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform',
|
76 |
-
'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk',
|
77 |
-
'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train',
|
78 |
-
'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
|
79 |
-
|
80 |
-
PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
|
81 |
-
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
|
82 |
-
[4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
|
83 |
-
[120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
|
84 |
-
[204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
|
85 |
-
[61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
|
86 |
-
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
|
87 |
-
[112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
|
88 |
-
[10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
|
89 |
-
[102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
|
90 |
-
[0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
|
91 |
-
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
|
92 |
-
[250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
|
93 |
-
[255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
|
94 |
-
[0, 235, 255], [0, 173, 255], [31, 0, 255]]
|
95 |
-
|
96 |
-
def __init__(self, split, **kwargs):
|
97 |
-
super(PascalContextDataset59, self).__init__(
|
98 |
-
img_suffix='.jpg',
|
99 |
-
seg_map_suffix='.png',
|
100 |
-
split=split,
|
101 |
-
reduce_zero_label=True,
|
102 |
-
**kwargs)
|
103 |
-
assert osp.exists(self.img_dir) and self.split is not None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/audiocraft/utils/notebook.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
try:
|
8 |
-
import IPython.display as ipd # type: ignore
|
9 |
-
except ImportError:
|
10 |
-
# Note in a notebook...
|
11 |
-
pass
|
12 |
-
|
13 |
-
|
14 |
-
import torch
|
15 |
-
|
16 |
-
|
17 |
-
def display_audio(samples: torch.Tensor, sample_rate: int):
|
18 |
-
"""Renders an audio player for the given audio samples.
|
19 |
-
|
20 |
-
Args:
|
21 |
-
samples (torch.Tensor): a Tensor of decoded audio samples
|
22 |
-
with shapes [B, C, T] or [C, T]
|
23 |
-
sample_rate (int): sample rate audio should be displayed with.
|
24 |
-
"""
|
25 |
-
assert samples.dim() == 2 or samples.dim() == 3
|
26 |
-
|
27 |
-
samples = samples.detach().cpu()
|
28 |
-
if samples.dim() == 2:
|
29 |
-
samples = samples[None, ...]
|
30 |
-
|
31 |
-
for audio in samples:
|
32 |
-
ipd.display(ipd.Audio(audio, rate=sample_rate))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/context.py
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
import contextlib
|
4 |
-
import functools
|
5 |
-
import tempfile
|
6 |
-
import shutil
|
7 |
-
import operator
|
8 |
-
|
9 |
-
|
10 |
-
@contextlib.contextmanager
|
11 |
-
def pushd(dir):
|
12 |
-
orig = os.getcwd()
|
13 |
-
os.chdir(dir)
|
14 |
-
try:
|
15 |
-
yield dir
|
16 |
-
finally:
|
17 |
-
os.chdir(orig)
|
18 |
-
|
19 |
-
|
20 |
-
@contextlib.contextmanager
|
21 |
-
def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
|
22 |
-
"""
|
23 |
-
Get a tarball, extract it, change to that directory, yield, then
|
24 |
-
clean up.
|
25 |
-
`runner` is the function to invoke commands.
|
26 |
-
`pushd` is a context manager for changing the directory.
|
27 |
-
"""
|
28 |
-
if target_dir is None:
|
29 |
-
target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
|
30 |
-
if runner is None:
|
31 |
-
runner = functools.partial(subprocess.check_call, shell=True)
|
32 |
-
# In the tar command, use --strip-components=1 to strip the first path and
|
33 |
-
# then
|
34 |
-
# use -C to cause the files to be extracted to {target_dir}. This ensures
|
35 |
-
# that we always know where the files were extracted.
|
36 |
-
runner('mkdir {target_dir}'.format(**vars()))
|
37 |
-
try:
|
38 |
-
getter = 'wget {url} -O -'
|
39 |
-
extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
|
40 |
-
cmd = ' | '.join((getter, extract))
|
41 |
-
runner(cmd.format(compression=infer_compression(url), **vars()))
|
42 |
-
with pushd(target_dir):
|
43 |
-
yield target_dir
|
44 |
-
finally:
|
45 |
-
runner('rm -Rf {target_dir}'.format(**vars()))
|
46 |
-
|
47 |
-
|
48 |
-
def infer_compression(url):
|
49 |
-
"""
|
50 |
-
Given a URL or filename, infer the compression code for tar.
|
51 |
-
"""
|
52 |
-
# cheat and just assume it's the last two characters
|
53 |
-
compression_indicator = url[-2:]
|
54 |
-
mapping = dict(gz='z', bz='j', xz='J')
|
55 |
-
# Assume 'z' (gzip) if no match
|
56 |
-
return mapping.get(compression_indicator, 'z')
|
57 |
-
|
58 |
-
|
59 |
-
@contextlib.contextmanager
|
60 |
-
def temp_dir(remover=shutil.rmtree):
|
61 |
-
"""
|
62 |
-
Create a temporary directory context. Pass a custom remover
|
63 |
-
to override the removal behavior.
|
64 |
-
"""
|
65 |
-
temp_dir = tempfile.mkdtemp()
|
66 |
-
try:
|
67 |
-
yield temp_dir
|
68 |
-
finally:
|
69 |
-
remover(temp_dir)
|
70 |
-
|
71 |
-
|
72 |
-
@contextlib.contextmanager
|
73 |
-
def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
|
74 |
-
"""
|
75 |
-
Check out the repo indicated by url.
|
76 |
-
|
77 |
-
If dest_ctx is supplied, it should be a context manager
|
78 |
-
to yield the target directory for the check out.
|
79 |
-
"""
|
80 |
-
exe = 'git' if 'git' in url else 'hg'
|
81 |
-
with dest_ctx() as repo_dir:
|
82 |
-
cmd = [exe, 'clone', url, repo_dir]
|
83 |
-
if branch:
|
84 |
-
cmd.extend(['--branch', branch])
|
85 |
-
devnull = open(os.path.devnull, 'w')
|
86 |
-
stdout = devnull if quiet else None
|
87 |
-
subprocess.check_call(cmd, stdout=stdout)
|
88 |
-
yield repo_dir
|
89 |
-
|
90 |
-
|
91 |
-
@contextlib.contextmanager
|
92 |
-
def null():
|
93 |
-
yield
|
94 |
-
|
95 |
-
|
96 |
-
class ExceptionTrap:
|
97 |
-
"""
|
98 |
-
A context manager that will catch certain exceptions and provide an
|
99 |
-
indication they occurred.
|
100 |
-
|
101 |
-
>>> with ExceptionTrap() as trap:
|
102 |
-
... raise Exception()
|
103 |
-
>>> bool(trap)
|
104 |
-
True
|
105 |
-
|
106 |
-
>>> with ExceptionTrap() as trap:
|
107 |
-
... pass
|
108 |
-
>>> bool(trap)
|
109 |
-
False
|
110 |
-
|
111 |
-
>>> with ExceptionTrap(ValueError) as trap:
|
112 |
-
... raise ValueError("1 + 1 is not 3")
|
113 |
-
>>> bool(trap)
|
114 |
-
True
|
115 |
-
|
116 |
-
>>> with ExceptionTrap(ValueError) as trap:
|
117 |
-
... raise Exception()
|
118 |
-
Traceback (most recent call last):
|
119 |
-
...
|
120 |
-
Exception
|
121 |
-
|
122 |
-
>>> bool(trap)
|
123 |
-
False
|
124 |
-
"""
|
125 |
-
|
126 |
-
exc_info = None, None, None
|
127 |
-
|
128 |
-
def __init__(self, exceptions=(Exception,)):
|
129 |
-
self.exceptions = exceptions
|
130 |
-
|
131 |
-
def __enter__(self):
|
132 |
-
return self
|
133 |
-
|
134 |
-
@property
|
135 |
-
def type(self):
|
136 |
-
return self.exc_info[0]
|
137 |
-
|
138 |
-
@property
|
139 |
-
def value(self):
|
140 |
-
return self.exc_info[1]
|
141 |
-
|
142 |
-
@property
|
143 |
-
def tb(self):
|
144 |
-
return self.exc_info[2]
|
145 |
-
|
146 |
-
def __exit__(self, *exc_info):
|
147 |
-
type = exc_info[0]
|
148 |
-
matches = type and issubclass(type, self.exceptions)
|
149 |
-
if matches:
|
150 |
-
self.exc_info = exc_info
|
151 |
-
return matches
|
152 |
-
|
153 |
-
def __bool__(self):
|
154 |
-
return bool(self.type)
|
155 |
-
|
156 |
-
def raises(self, func, *, _test=bool):
|
157 |
-
"""
|
158 |
-
Wrap func and replace the result with the truth
|
159 |
-
value of the trap (True if an exception occurred).
|
160 |
-
|
161 |
-
First, give the decorator an alias to support Python 3.8
|
162 |
-
Syntax.
|
163 |
-
|
164 |
-
>>> raises = ExceptionTrap(ValueError).raises
|
165 |
-
|
166 |
-
Now decorate a function that always fails.
|
167 |
-
|
168 |
-
>>> @raises
|
169 |
-
... def fail():
|
170 |
-
... raise ValueError('failed')
|
171 |
-
>>> fail()
|
172 |
-
True
|
173 |
-
"""
|
174 |
-
|
175 |
-
@functools.wraps(func)
|
176 |
-
def wrapper(*args, **kwargs):
|
177 |
-
with ExceptionTrap(self.exceptions) as trap:
|
178 |
-
func(*args, **kwargs)
|
179 |
-
return _test(trap)
|
180 |
-
|
181 |
-
return wrapper
|
182 |
-
|
183 |
-
def passes(self, func):
|
184 |
-
"""
|
185 |
-
Wrap func and replace the result with the truth
|
186 |
-
value of the trap (True if no exception).
|
187 |
-
|
188 |
-
First, give the decorator an alias to support Python 3.8
|
189 |
-
Syntax.
|
190 |
-
|
191 |
-
>>> passes = ExceptionTrap(ValueError).passes
|
192 |
-
|
193 |
-
Now decorate a function that always fails.
|
194 |
-
|
195 |
-
>>> @passes
|
196 |
-
... def fail():
|
197 |
-
... raise ValueError('failed')
|
198 |
-
|
199 |
-
>>> fail()
|
200 |
-
False
|
201 |
-
"""
|
202 |
-
return self.raises(func, _test=operator.not_)
|
203 |
-
|
204 |
-
|
205 |
-
class suppress(contextlib.suppress, contextlib.ContextDecorator):
|
206 |
-
"""
|
207 |
-
A version of contextlib.suppress with decorator support.
|
208 |
-
|
209 |
-
>>> @suppress(KeyError)
|
210 |
-
... def key_error():
|
211 |
-
... {}['']
|
212 |
-
>>> key_error()
|
213 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/resunet.py
DELETED
@@ -1,715 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from typing import Dict, List, NoReturn, Tuple
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import torch.nn.functional as F
|
6 |
-
from torchlibrosa.stft import STFT, ISTFT, magphase
|
7 |
-
from models.base import Base, init_layer, init_bn, act
|
8 |
-
|
9 |
-
|
10 |
-
class FiLM(nn.Module):
|
11 |
-
def __init__(self, film_meta, condition_size):
|
12 |
-
super(FiLM, self).__init__()
|
13 |
-
|
14 |
-
self.condition_size = condition_size
|
15 |
-
|
16 |
-
self.modules, _ = self.create_film_modules(
|
17 |
-
film_meta=film_meta,
|
18 |
-
ancestor_names=[],
|
19 |
-
)
|
20 |
-
|
21 |
-
def create_film_modules(self, film_meta, ancestor_names):
|
22 |
-
|
23 |
-
modules = {}
|
24 |
-
|
25 |
-
# Pre-order traversal of modules
|
26 |
-
for module_name, value in film_meta.items():
|
27 |
-
|
28 |
-
if isinstance(value, int):
|
29 |
-
|
30 |
-
ancestor_names.append(module_name)
|
31 |
-
unique_module_name = '->'.join(ancestor_names)
|
32 |
-
|
33 |
-
modules[module_name] = self.add_film_layer_to_module(
|
34 |
-
num_features=value,
|
35 |
-
unique_module_name=unique_module_name,
|
36 |
-
)
|
37 |
-
|
38 |
-
elif isinstance(value, dict):
|
39 |
-
|
40 |
-
ancestor_names.append(module_name)
|
41 |
-
|
42 |
-
modules[module_name], _ = self.create_film_modules(
|
43 |
-
film_meta=value,
|
44 |
-
ancestor_names=ancestor_names,
|
45 |
-
)
|
46 |
-
|
47 |
-
ancestor_names.pop()
|
48 |
-
|
49 |
-
return modules, ancestor_names
|
50 |
-
|
51 |
-
def add_film_layer_to_module(self, num_features, unique_module_name):
|
52 |
-
|
53 |
-
layer = nn.Linear(self.condition_size, num_features)
|
54 |
-
init_layer(layer)
|
55 |
-
self.add_module(name=unique_module_name, module=layer)
|
56 |
-
|
57 |
-
return layer
|
58 |
-
|
59 |
-
def forward(self, conditions):
|
60 |
-
|
61 |
-
film_dict = self.calculate_film_data(
|
62 |
-
conditions=conditions,
|
63 |
-
modules=self.modules,
|
64 |
-
)
|
65 |
-
|
66 |
-
return film_dict
|
67 |
-
|
68 |
-
def calculate_film_data(self, conditions, modules):
|
69 |
-
|
70 |
-
film_data = {}
|
71 |
-
|
72 |
-
# Pre-order traversal of modules
|
73 |
-
for module_name, module in modules.items():
|
74 |
-
|
75 |
-
if isinstance(module, nn.Module):
|
76 |
-
film_data[module_name] = module(conditions)[:, :, None, None]
|
77 |
-
|
78 |
-
elif isinstance(module, dict):
|
79 |
-
film_data[module_name] = self.calculate_film_data(conditions, module)
|
80 |
-
|
81 |
-
return film_data
|
82 |
-
|
83 |
-
|
84 |
-
class ConvBlockRes(nn.Module):
|
85 |
-
def __init__(
|
86 |
-
self,
|
87 |
-
in_channels: int,
|
88 |
-
out_channels: int,
|
89 |
-
kernel_size: Tuple,
|
90 |
-
momentum: float,
|
91 |
-
has_film,
|
92 |
-
):
|
93 |
-
r"""Residual block."""
|
94 |
-
super(ConvBlockRes, self).__init__()
|
95 |
-
|
96 |
-
padding = [kernel_size[0] // 2, kernel_size[1] // 2]
|
97 |
-
|
98 |
-
self.bn1 = nn.BatchNorm2d(in_channels, momentum=momentum)
|
99 |
-
self.bn2 = nn.BatchNorm2d(out_channels, momentum=momentum)
|
100 |
-
|
101 |
-
self.conv1 = nn.Conv2d(
|
102 |
-
in_channels=in_channels,
|
103 |
-
out_channels=out_channels,
|
104 |
-
kernel_size=kernel_size,
|
105 |
-
stride=(1, 1),
|
106 |
-
dilation=(1, 1),
|
107 |
-
padding=padding,
|
108 |
-
bias=False,
|
109 |
-
)
|
110 |
-
|
111 |
-
self.conv2 = nn.Conv2d(
|
112 |
-
in_channels=out_channels,
|
113 |
-
out_channels=out_channels,
|
114 |
-
kernel_size=kernel_size,
|
115 |
-
stride=(1, 1),
|
116 |
-
dilation=(1, 1),
|
117 |
-
padding=padding,
|
118 |
-
bias=False,
|
119 |
-
)
|
120 |
-
|
121 |
-
if in_channels != out_channels:
|
122 |
-
self.shortcut = nn.Conv2d(
|
123 |
-
in_channels=in_channels,
|
124 |
-
out_channels=out_channels,
|
125 |
-
kernel_size=(1, 1),
|
126 |
-
stride=(1, 1),
|
127 |
-
padding=(0, 0),
|
128 |
-
)
|
129 |
-
self.is_shortcut = True
|
130 |
-
else:
|
131 |
-
self.is_shortcut = False
|
132 |
-
|
133 |
-
self.has_film = has_film
|
134 |
-
|
135 |
-
self.init_weights()
|
136 |
-
|
137 |
-
def init_weights(self) -> NoReturn:
|
138 |
-
r"""Initialize weights."""
|
139 |
-
init_bn(self.bn1)
|
140 |
-
init_bn(self.bn2)
|
141 |
-
init_layer(self.conv1)
|
142 |
-
init_layer(self.conv2)
|
143 |
-
|
144 |
-
if self.is_shortcut:
|
145 |
-
init_layer(self.shortcut)
|
146 |
-
|
147 |
-
def forward(self, input_tensor: torch.Tensor, film_dict: Dict) -> torch.Tensor:
|
148 |
-
r"""Forward data into the module.
|
149 |
-
|
150 |
-
Args:
|
151 |
-
input_tensor: (batch_size, input_feature_maps, time_steps, freq_bins)
|
152 |
-
|
153 |
-
Returns:
|
154 |
-
output_tensor: (batch_size, output_feature_maps, time_steps, freq_bins)
|
155 |
-
"""
|
156 |
-
b1 = film_dict['beta1']
|
157 |
-
b2 = film_dict['beta2']
|
158 |
-
|
159 |
-
x = self.conv1(F.leaky_relu_(self.bn1(input_tensor) + b1, negative_slope=0.01))
|
160 |
-
x = self.conv2(F.leaky_relu_(self.bn2(x) + b2, negative_slope=0.01))
|
161 |
-
|
162 |
-
if self.is_shortcut:
|
163 |
-
return self.shortcut(input_tensor) + x
|
164 |
-
else:
|
165 |
-
return input_tensor + x
|
166 |
-
|
167 |
-
|
168 |
-
class EncoderBlockRes1B(nn.Module):
|
169 |
-
def __init__(
|
170 |
-
self,
|
171 |
-
in_channels: int,
|
172 |
-
out_channels: int,
|
173 |
-
kernel_size: Tuple,
|
174 |
-
downsample: Tuple,
|
175 |
-
momentum: float,
|
176 |
-
has_film,
|
177 |
-
):
|
178 |
-
r"""Encoder block, contains 8 convolutional layers."""
|
179 |
-
super(EncoderBlockRes1B, self).__init__()
|
180 |
-
|
181 |
-
self.conv_block1 = ConvBlockRes(
|
182 |
-
in_channels, out_channels, kernel_size, momentum, has_film,
|
183 |
-
)
|
184 |
-
self.downsample = downsample
|
185 |
-
|
186 |
-
def forward(self, input_tensor: torch.Tensor, film_dict: Dict) -> torch.Tensor:
|
187 |
-
r"""Forward data into the module.
|
188 |
-
|
189 |
-
Args:
|
190 |
-
input_tensor: (batch_size, input_feature_maps, time_steps, freq_bins)
|
191 |
-
|
192 |
-
Returns:
|
193 |
-
encoder_pool: (batch_size, output_feature_maps, downsampled_time_steps, downsampled_freq_bins)
|
194 |
-
encoder: (batch_size, output_feature_maps, time_steps, freq_bins)
|
195 |
-
"""
|
196 |
-
encoder = self.conv_block1(input_tensor, film_dict['conv_block1'])
|
197 |
-
encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
|
198 |
-
return encoder_pool, encoder
|
199 |
-
|
200 |
-
|
201 |
-
class DecoderBlockRes1B(nn.Module):
|
202 |
-
def __init__(
|
203 |
-
self,
|
204 |
-
in_channels: int,
|
205 |
-
out_channels: int,
|
206 |
-
kernel_size: Tuple,
|
207 |
-
upsample: Tuple,
|
208 |
-
momentum: float,
|
209 |
-
has_film,
|
210 |
-
):
|
211 |
-
r"""Decoder block, contains 1 transposed convolutional and 8 convolutional layers."""
|
212 |
-
super(DecoderBlockRes1B, self).__init__()
|
213 |
-
self.kernel_size = kernel_size
|
214 |
-
self.stride = upsample
|
215 |
-
|
216 |
-
self.conv1 = torch.nn.ConvTranspose2d(
|
217 |
-
in_channels=in_channels,
|
218 |
-
out_channels=out_channels,
|
219 |
-
kernel_size=self.stride,
|
220 |
-
stride=self.stride,
|
221 |
-
padding=(0, 0),
|
222 |
-
bias=False,
|
223 |
-
dilation=(1, 1),
|
224 |
-
)
|
225 |
-
|
226 |
-
self.bn1 = nn.BatchNorm2d(in_channels, momentum=momentum)
|
227 |
-
self.conv_block2 = ConvBlockRes(
|
228 |
-
out_channels * 2, out_channels, kernel_size, momentum, has_film,
|
229 |
-
)
|
230 |
-
self.bn2 = nn.BatchNorm2d(in_channels, momentum=momentum)
|
231 |
-
self.has_film = has_film
|
232 |
-
|
233 |
-
self.init_weights()
|
234 |
-
|
235 |
-
def init_weights(self):
|
236 |
-
r"""Initialize weights."""
|
237 |
-
init_bn(self.bn1)
|
238 |
-
init_layer(self.conv1)
|
239 |
-
|
240 |
-
def forward(
|
241 |
-
self, input_tensor: torch.Tensor, concat_tensor: torch.Tensor, film_dict: Dict,
|
242 |
-
) -> torch.Tensor:
|
243 |
-
r"""Forward data into the module.
|
244 |
-
|
245 |
-
Args:
|
246 |
-
input_tensor: (batch_size, input_feature_maps, downsampled_time_steps, downsampled_freq_bins)
|
247 |
-
concat_tensor: (batch_size, input_feature_maps, time_steps, freq_bins)
|
248 |
-
|
249 |
-
Returns:
|
250 |
-
output_tensor: (batch_size, output_feature_maps, time_steps, freq_bins)
|
251 |
-
"""
|
252 |
-
# b1 = film_dict['beta1']
|
253 |
-
|
254 |
-
b1 = film_dict['beta1']
|
255 |
-
x = self.conv1(F.leaky_relu_(self.bn1(input_tensor) + b1))
|
256 |
-
# (batch_size, input_feature_maps, time_steps, freq_bins)
|
257 |
-
|
258 |
-
x = torch.cat((x, concat_tensor), dim=1)
|
259 |
-
# (batch_size, input_feature_maps * 2, time_steps, freq_bins)
|
260 |
-
|
261 |
-
x = self.conv_block2(x, film_dict['conv_block2'])
|
262 |
-
# output_tensor: (batch_size, output_feature_maps, time_steps, freq_bins)
|
263 |
-
|
264 |
-
return x
|
265 |
-
|
266 |
-
|
267 |
-
class ResUNet30_Base(nn.Module, Base):
|
268 |
-
def __init__(self, input_channels, output_channels):
|
269 |
-
super(ResUNet30_Base, self).__init__()
|
270 |
-
|
271 |
-
window_size = 2048
|
272 |
-
hop_size = 320
|
273 |
-
center = True
|
274 |
-
pad_mode = "reflect"
|
275 |
-
window = "hann"
|
276 |
-
momentum = 0.01
|
277 |
-
|
278 |
-
self.output_channels = output_channels
|
279 |
-
self.target_sources_num = 1
|
280 |
-
self.K = 3
|
281 |
-
|
282 |
-
self.time_downsample_ratio = 2 ** 5 # This number equals 2^{#encoder_blcoks}
|
283 |
-
|
284 |
-
self.stft = STFT(
|
285 |
-
n_fft=window_size,
|
286 |
-
hop_length=hop_size,
|
287 |
-
win_length=window_size,
|
288 |
-
window=window,
|
289 |
-
center=center,
|
290 |
-
pad_mode=pad_mode,
|
291 |
-
freeze_parameters=True,
|
292 |
-
)
|
293 |
-
|
294 |
-
self.istft = ISTFT(
|
295 |
-
n_fft=window_size,
|
296 |
-
hop_length=hop_size,
|
297 |
-
win_length=window_size,
|
298 |
-
window=window,
|
299 |
-
center=center,
|
300 |
-
pad_mode=pad_mode,
|
301 |
-
freeze_parameters=True,
|
302 |
-
)
|
303 |
-
|
304 |
-
self.bn0 = nn.BatchNorm2d(window_size // 2 + 1, momentum=momentum)
|
305 |
-
|
306 |
-
self.pre_conv = nn.Conv2d(
|
307 |
-
in_channels=input_channels,
|
308 |
-
out_channels=32,
|
309 |
-
kernel_size=(1, 1),
|
310 |
-
stride=(1, 1),
|
311 |
-
padding=(0, 0),
|
312 |
-
bias=True,
|
313 |
-
)
|
314 |
-
|
315 |
-
self.encoder_block1 = EncoderBlockRes1B(
|
316 |
-
in_channels=32,
|
317 |
-
out_channels=32,
|
318 |
-
kernel_size=(3, 3),
|
319 |
-
downsample=(2, 2),
|
320 |
-
momentum=momentum,
|
321 |
-
has_film=True,
|
322 |
-
)
|
323 |
-
self.encoder_block2 = EncoderBlockRes1B(
|
324 |
-
in_channels=32,
|
325 |
-
out_channels=64,
|
326 |
-
kernel_size=(3, 3),
|
327 |
-
downsample=(2, 2),
|
328 |
-
momentum=momentum,
|
329 |
-
has_film=True,
|
330 |
-
)
|
331 |
-
self.encoder_block3 = EncoderBlockRes1B(
|
332 |
-
in_channels=64,
|
333 |
-
out_channels=128,
|
334 |
-
kernel_size=(3, 3),
|
335 |
-
downsample=(2, 2),
|
336 |
-
momentum=momentum,
|
337 |
-
has_film=True,
|
338 |
-
)
|
339 |
-
self.encoder_block4 = EncoderBlockRes1B(
|
340 |
-
in_channels=128,
|
341 |
-
out_channels=256,
|
342 |
-
kernel_size=(3, 3),
|
343 |
-
downsample=(2, 2),
|
344 |
-
momentum=momentum,
|
345 |
-
has_film=True,
|
346 |
-
)
|
347 |
-
self.encoder_block5 = EncoderBlockRes1B(
|
348 |
-
in_channels=256,
|
349 |
-
out_channels=384,
|
350 |
-
kernel_size=(3, 3),
|
351 |
-
downsample=(2, 2),
|
352 |
-
momentum=momentum,
|
353 |
-
has_film=True,
|
354 |
-
)
|
355 |
-
self.encoder_block6 = EncoderBlockRes1B(
|
356 |
-
in_channels=384,
|
357 |
-
out_channels=384,
|
358 |
-
kernel_size=(3, 3),
|
359 |
-
downsample=(1, 2),
|
360 |
-
momentum=momentum,
|
361 |
-
has_film=True,
|
362 |
-
)
|
363 |
-
self.conv_block7a = EncoderBlockRes1B(
|
364 |
-
in_channels=384,
|
365 |
-
out_channels=384,
|
366 |
-
kernel_size=(3, 3),
|
367 |
-
downsample=(1, 1),
|
368 |
-
momentum=momentum,
|
369 |
-
has_film=True,
|
370 |
-
)
|
371 |
-
self.decoder_block1 = DecoderBlockRes1B(
|
372 |
-
in_channels=384,
|
373 |
-
out_channels=384,
|
374 |
-
kernel_size=(3, 3),
|
375 |
-
upsample=(1, 2),
|
376 |
-
momentum=momentum,
|
377 |
-
has_film=True,
|
378 |
-
)
|
379 |
-
self.decoder_block2 = DecoderBlockRes1B(
|
380 |
-
in_channels=384,
|
381 |
-
out_channels=384,
|
382 |
-
kernel_size=(3, 3),
|
383 |
-
upsample=(2, 2),
|
384 |
-
momentum=momentum,
|
385 |
-
has_film=True,
|
386 |
-
)
|
387 |
-
self.decoder_block3 = DecoderBlockRes1B(
|
388 |
-
in_channels=384,
|
389 |
-
out_channels=256,
|
390 |
-
kernel_size=(3, 3),
|
391 |
-
upsample=(2, 2),
|
392 |
-
momentum=momentum,
|
393 |
-
has_film=True,
|
394 |
-
)
|
395 |
-
self.decoder_block4 = DecoderBlockRes1B(
|
396 |
-
in_channels=256,
|
397 |
-
out_channels=128,
|
398 |
-
kernel_size=(3, 3),
|
399 |
-
upsample=(2, 2),
|
400 |
-
momentum=momentum,
|
401 |
-
has_film=True,
|
402 |
-
)
|
403 |
-
self.decoder_block5 = DecoderBlockRes1B(
|
404 |
-
in_channels=128,
|
405 |
-
out_channels=64,
|
406 |
-
kernel_size=(3, 3),
|
407 |
-
upsample=(2, 2),
|
408 |
-
momentum=momentum,
|
409 |
-
has_film=True,
|
410 |
-
)
|
411 |
-
self.decoder_block6 = DecoderBlockRes1B(
|
412 |
-
in_channels=64,
|
413 |
-
out_channels=32,
|
414 |
-
kernel_size=(3, 3),
|
415 |
-
upsample=(2, 2),
|
416 |
-
momentum=momentum,
|
417 |
-
has_film=True,
|
418 |
-
)
|
419 |
-
|
420 |
-
self.after_conv = nn.Conv2d(
|
421 |
-
in_channels=32,
|
422 |
-
out_channels=output_channels * self.K,
|
423 |
-
kernel_size=(1, 1),
|
424 |
-
stride=(1, 1),
|
425 |
-
padding=(0, 0),
|
426 |
-
bias=True,
|
427 |
-
)
|
428 |
-
|
429 |
-
self.init_weights()
|
430 |
-
|
431 |
-
def init_weights(self):
|
432 |
-
init_bn(self.bn0)
|
433 |
-
init_layer(self.pre_conv)
|
434 |
-
init_layer(self.after_conv)
|
435 |
-
|
436 |
-
def feature_maps_to_wav(
|
437 |
-
self,
|
438 |
-
input_tensor: torch.Tensor,
|
439 |
-
sp: torch.Tensor,
|
440 |
-
sin_in: torch.Tensor,
|
441 |
-
cos_in: torch.Tensor,
|
442 |
-
audio_length: int,
|
443 |
-
) -> torch.Tensor:
|
444 |
-
r"""Convert feature maps to waveform.
|
445 |
-
|
446 |
-
Args:
|
447 |
-
input_tensor: (batch_size, target_sources_num * output_channels * self.K, time_steps, freq_bins)
|
448 |
-
sp: (batch_size, input_channels, time_steps, freq_bins)
|
449 |
-
sin_in: (batch_size, input_channels, time_steps, freq_bins)
|
450 |
-
cos_in: (batch_size, input_channels, time_steps, freq_bins)
|
451 |
-
|
452 |
-
(There is input_channels == output_channels for the source separation task.)
|
453 |
-
|
454 |
-
Outputs:
|
455 |
-
waveform: (batch_size, target_sources_num * output_channels, segment_samples)
|
456 |
-
"""
|
457 |
-
batch_size, _, time_steps, freq_bins = input_tensor.shape
|
458 |
-
|
459 |
-
x = input_tensor.reshape(
|
460 |
-
batch_size,
|
461 |
-
self.target_sources_num,
|
462 |
-
self.output_channels,
|
463 |
-
self.K,
|
464 |
-
time_steps,
|
465 |
-
freq_bins,
|
466 |
-
)
|
467 |
-
# x: (batch_size, target_sources_num, output_channels, self.K, time_steps, freq_bins)
|
468 |
-
|
469 |
-
mask_mag = torch.sigmoid(x[:, :, :, 0, :, :])
|
470 |
-
_mask_real = torch.tanh(x[:, :, :, 1, :, :])
|
471 |
-
_mask_imag = torch.tanh(x[:, :, :, 2, :, :])
|
472 |
-
# linear_mag = torch.tanh(x[:, :, :, 3, :, :])
|
473 |
-
_, mask_cos, mask_sin = magphase(_mask_real, _mask_imag)
|
474 |
-
# mask_cos, mask_sin: (batch_size, target_sources_num, output_channels, time_steps, freq_bins)
|
475 |
-
|
476 |
-
# Y = |Y|cos∠Y + j|Y|sin∠Y
|
477 |
-
# = |Y|cos(∠X + ∠M) + j|Y|sin(∠X + ∠M)
|
478 |
-
# = |Y|(cos∠X cos∠M - sin∠X sin∠M) + j|Y|(sin∠X cos∠M + cos∠X sin∠M)
|
479 |
-
out_cos = (
|
480 |
-
cos_in[:, None, :, :, :] * mask_cos - sin_in[:, None, :, :, :] * mask_sin
|
481 |
-
)
|
482 |
-
out_sin = (
|
483 |
-
sin_in[:, None, :, :, :] * mask_cos + cos_in[:, None, :, :, :] * mask_sin
|
484 |
-
)
|
485 |
-
# out_cos: (batch_size, target_sources_num, output_channels, time_steps, freq_bins)
|
486 |
-
# out_sin: (batch_size, target_sources_num, output_channels, time_steps, freq_bins)
|
487 |
-
|
488 |
-
# Calculate |Y|.
|
489 |
-
out_mag = F.relu_(sp[:, None, :, :, :] * mask_mag)
|
490 |
-
# out_mag = F.relu_(sp[:, None, :, :, :] * mask_mag + linear_mag)
|
491 |
-
# out_mag: (batch_size, target_sources_num, output_channels, time_steps, freq_bins)
|
492 |
-
|
493 |
-
# Calculate Y_{real} and Y_{imag} for ISTFT.
|
494 |
-
out_real = out_mag * out_cos
|
495 |
-
out_imag = out_mag * out_sin
|
496 |
-
# out_real, out_imag: (batch_size, target_sources_num, output_channels, time_steps, freq_bins)
|
497 |
-
|
498 |
-
# Reformat shape to (N, 1, time_steps, freq_bins) for ISTFT where
|
499 |
-
# N = batch_size * target_sources_num * output_channels
|
500 |
-
shape = (
|
501 |
-
batch_size * self.target_sources_num * self.output_channels,
|
502 |
-
1,
|
503 |
-
time_steps,
|
504 |
-
freq_bins,
|
505 |
-
)
|
506 |
-
out_real = out_real.reshape(shape)
|
507 |
-
out_imag = out_imag.reshape(shape)
|
508 |
-
|
509 |
-
# ISTFT.
|
510 |
-
x = self.istft(out_real, out_imag, audio_length)
|
511 |
-
# (batch_size * target_sources_num * output_channels, segments_num)
|
512 |
-
|
513 |
-
# Reshape.
|
514 |
-
waveform = x.reshape(
|
515 |
-
batch_size, self.target_sources_num * self.output_channels, audio_length
|
516 |
-
)
|
517 |
-
# (batch_size, target_sources_num * output_channels, segments_num)
|
518 |
-
|
519 |
-
return waveform
|
520 |
-
|
521 |
-
|
522 |
-
def forward(self, mixtures, film_dict):
|
523 |
-
"""
|
524 |
-
Args:
|
525 |
-
input: (batch_size, segment_samples, channels_num)
|
526 |
-
|
527 |
-
Outputs:
|
528 |
-
output_dict: {
|
529 |
-
'wav': (batch_size, segment_samples, channels_num),
|
530 |
-
'sp': (batch_size, channels_num, time_steps, freq_bins)}
|
531 |
-
"""
|
532 |
-
|
533 |
-
mag, cos_in, sin_in = self.wav_to_spectrogram_phase(mixtures)
|
534 |
-
x = mag
|
535 |
-
|
536 |
-
# Batch normalization
|
537 |
-
x = x.transpose(1, 3)
|
538 |
-
x = self.bn0(x)
|
539 |
-
x = x.transpose(1, 3)
|
540 |
-
"""(batch_size, chanenls, time_steps, freq_bins)"""
|
541 |
-
|
542 |
-
# Pad spectrogram to be evenly divided by downsample ratio.
|
543 |
-
origin_len = x.shape[2]
|
544 |
-
pad_len = (
|
545 |
-
int(np.ceil(x.shape[2] / self.time_downsample_ratio)) * self.time_downsample_ratio
|
546 |
-
- origin_len
|
547 |
-
)
|
548 |
-
x = F.pad(x, pad=(0, 0, 0, pad_len))
|
549 |
-
"""(batch_size, channels, padded_time_steps, freq_bins)"""
|
550 |
-
|
551 |
-
# Let frequency bins be evenly divided by 2, e.g., 513 -> 512
|
552 |
-
x = x[..., 0 : x.shape[-1] - 1] # (bs, channels, T, F)
|
553 |
-
|
554 |
-
# UNet
|
555 |
-
x = self.pre_conv(x)
|
556 |
-
x1_pool, x1 = self.encoder_block1(x, film_dict['encoder_block1']) # x1_pool: (bs, 32, T / 2, F / 2)
|
557 |
-
x2_pool, x2 = self.encoder_block2(x1_pool, film_dict['encoder_block2']) # x2_pool: (bs, 64, T / 4, F / 4)
|
558 |
-
x3_pool, x3 = self.encoder_block3(x2_pool, film_dict['encoder_block3']) # x3_pool: (bs, 128, T / 8, F / 8)
|
559 |
-
x4_pool, x4 = self.encoder_block4(x3_pool, film_dict['encoder_block4']) # x4_pool: (bs, 256, T / 16, F / 16)
|
560 |
-
x5_pool, x5 = self.encoder_block5(x4_pool, film_dict['encoder_block5']) # x5_pool: (bs, 384, T / 32, F / 32)
|
561 |
-
x6_pool, x6 = self.encoder_block6(x5_pool, film_dict['encoder_block6']) # x6_pool: (bs, 384, T / 32, F / 64)
|
562 |
-
x_center, _ = self.conv_block7a(x6_pool, film_dict['conv_block7a']) # (bs, 384, T / 32, F / 64)
|
563 |
-
x7 = self.decoder_block1(x_center, x6, film_dict['decoder_block1']) # (bs, 384, T / 32, F / 32)
|
564 |
-
x8 = self.decoder_block2(x7, x5, film_dict['decoder_block2']) # (bs, 384, T / 16, F / 16)
|
565 |
-
x9 = self.decoder_block3(x8, x4, film_dict['decoder_block3']) # (bs, 256, T / 8, F / 8)
|
566 |
-
x10 = self.decoder_block4(x9, x3, film_dict['decoder_block4']) # (bs, 128, T / 4, F / 4)
|
567 |
-
x11 = self.decoder_block5(x10, x2, film_dict['decoder_block5']) # (bs, 64, T / 2, F / 2)
|
568 |
-
x12 = self.decoder_block6(x11, x1, film_dict['decoder_block6']) # (bs, 32, T, F)
|
569 |
-
|
570 |
-
x = self.after_conv(x12)
|
571 |
-
|
572 |
-
# Recover shape
|
573 |
-
x = F.pad(x, pad=(0, 1))
|
574 |
-
x = x[:, :, 0:origin_len, :]
|
575 |
-
|
576 |
-
audio_length = mixtures.shape[2]
|
577 |
-
|
578 |
-
# Recover each subband spectrograms to subband waveforms. Then synthesis
|
579 |
-
# the subband waveforms to a waveform.
|
580 |
-
separated_audio = self.feature_maps_to_wav(
|
581 |
-
input_tensor=x,
|
582 |
-
# input_tensor: (batch_size, target_sources_num * output_channels * self.K, T, F')
|
583 |
-
sp=mag,
|
584 |
-
# sp: (batch_size, input_channels, T, F')
|
585 |
-
sin_in=sin_in,
|
586 |
-
# sin_in: (batch_size, input_channels, T, F')
|
587 |
-
cos_in=cos_in,
|
588 |
-
# cos_in: (batch_size, input_channels, T, F')
|
589 |
-
audio_length=audio_length,
|
590 |
-
)
|
591 |
-
# (batch_size, target_sources_num * output_channels, subbands_num, segment_samples)
|
592 |
-
|
593 |
-
output_dict = {'waveform': separated_audio}
|
594 |
-
|
595 |
-
return output_dict
|
596 |
-
|
597 |
-
|
598 |
-
def get_film_meta(module):
|
599 |
-
|
600 |
-
film_meta = {}
|
601 |
-
|
602 |
-
if hasattr(module, 'has_film'):\
|
603 |
-
|
604 |
-
if module.has_film:
|
605 |
-
film_meta['beta1'] = module.bn1.num_features
|
606 |
-
film_meta['beta2'] = module.bn2.num_features
|
607 |
-
else:
|
608 |
-
film_meta['beta1'] = 0
|
609 |
-
film_meta['beta2'] = 0
|
610 |
-
|
611 |
-
for child_name, child_module in module.named_children():
|
612 |
-
|
613 |
-
child_meta = get_film_meta(child_module)
|
614 |
-
|
615 |
-
if len(child_meta) > 0:
|
616 |
-
film_meta[child_name] = child_meta
|
617 |
-
|
618 |
-
return film_meta
|
619 |
-
|
620 |
-
|
621 |
-
class ResUNet30(nn.Module):
|
622 |
-
def __init__(self, input_channels, output_channels, condition_size):
|
623 |
-
super(ResUNet30, self).__init__()
|
624 |
-
|
625 |
-
self.base = ResUNet30_Base(
|
626 |
-
input_channels=input_channels,
|
627 |
-
output_channels=output_channels,
|
628 |
-
)
|
629 |
-
|
630 |
-
self.film_meta = get_film_meta(
|
631 |
-
module=self.base,
|
632 |
-
)
|
633 |
-
|
634 |
-
self.film = FiLM(
|
635 |
-
film_meta=self.film_meta,
|
636 |
-
condition_size=condition_size
|
637 |
-
)
|
638 |
-
|
639 |
-
|
640 |
-
def forward(self, input_dict):
|
641 |
-
mixtures = input_dict['mixture']
|
642 |
-
conditions = input_dict['condition']
|
643 |
-
|
644 |
-
film_dict = self.film(
|
645 |
-
conditions=conditions,
|
646 |
-
)
|
647 |
-
|
648 |
-
output_dict = self.base(
|
649 |
-
mixtures=mixtures,
|
650 |
-
film_dict=film_dict,
|
651 |
-
)
|
652 |
-
|
653 |
-
return output_dict
|
654 |
-
|
655 |
-
|
656 |
-
@torch.no_grad()
|
657 |
-
def chunk_inference(self, input_dict):
|
658 |
-
chunk_config = {
|
659 |
-
'NL': 1.0,
|
660 |
-
'NC': 3.0,
|
661 |
-
'NR': 1.0,
|
662 |
-
'RATE': self.sampling_rate
|
663 |
-
}
|
664 |
-
|
665 |
-
mixtures = input_dict['mixture']
|
666 |
-
conditions = input_dict['condition']
|
667 |
-
|
668 |
-
film_dict = self.film(
|
669 |
-
conditions=conditions,
|
670 |
-
)
|
671 |
-
|
672 |
-
NL = int(chunk_config['NL'] * chunk_config['RATE'])
|
673 |
-
NC = int(chunk_config['NC'] * chunk_config['RATE'])
|
674 |
-
NR = int(chunk_config['NR'] * chunk_config['RATE'])
|
675 |
-
|
676 |
-
L = mixtures.shape[2]
|
677 |
-
|
678 |
-
out_np = np.zeros([1, L])
|
679 |
-
|
680 |
-
WINDOW = NL + NC + NR
|
681 |
-
current_idx = 0
|
682 |
-
|
683 |
-
while current_idx + WINDOW < L:
|
684 |
-
chunk_in = mixtures[:, :, current_idx:current_idx + WINDOW]
|
685 |
-
|
686 |
-
chunk_out = self.base(
|
687 |
-
mixtures=chunk_in,
|
688 |
-
film_dict=film_dict,
|
689 |
-
)['waveform']
|
690 |
-
|
691 |
-
chunk_out_np = chunk_out.squeeze(0).cpu().data.numpy()
|
692 |
-
|
693 |
-
if current_idx == 0:
|
694 |
-
out_np[:, current_idx:current_idx+WINDOW-NR] = \
|
695 |
-
chunk_out_np[:, :-NR] if NR != 0 else chunk_out_np
|
696 |
-
else:
|
697 |
-
out_np[:, current_idx+NL:current_idx+WINDOW-NR] = \
|
698 |
-
chunk_out_np[:, NL:-NR] if NR != 0 else chunk_out_np[:, NL:]
|
699 |
-
|
700 |
-
current_idx += NC
|
701 |
-
|
702 |
-
if current_idx < L:
|
703 |
-
chunk_in = mixtures[:, :, current_idx:current_idx + WINDOW]
|
704 |
-
chunk_out = self.base(
|
705 |
-
mixtures=chunk_in,
|
706 |
-
film_dict=film_dict,
|
707 |
-
)['waveform']
|
708 |
-
|
709 |
-
chunk_out_np = chunk_out.squeeze(0).cpu().data.numpy()
|
710 |
-
|
711 |
-
seg_len = chunk_out_np.shape[1]
|
712 |
-
out_np[:, current_idx + NL:current_idx + seg_len] = \
|
713 |
-
chunk_out_np[:, NL:]
|
714 |
-
|
715 |
-
return out_np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/config/test_yacs_config.py
DELETED
@@ -1,270 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
|
5 |
-
import os
|
6 |
-
import tempfile
|
7 |
-
import unittest
|
8 |
-
import torch
|
9 |
-
from omegaconf import OmegaConf
|
10 |
-
|
11 |
-
from detectron2 import model_zoo
|
12 |
-
from detectron2.config import configurable, downgrade_config, get_cfg, upgrade_config
|
13 |
-
from detectron2.layers import ShapeSpec
|
14 |
-
from detectron2.modeling import build_model
|
15 |
-
|
16 |
-
_V0_CFG = """
|
17 |
-
MODEL:
|
18 |
-
RPN_HEAD:
|
19 |
-
NAME: "TEST"
|
20 |
-
VERSION: 0
|
21 |
-
"""
|
22 |
-
|
23 |
-
_V1_CFG = """
|
24 |
-
MODEL:
|
25 |
-
WEIGHT: "/path/to/weight"
|
26 |
-
"""
|
27 |
-
|
28 |
-
|
29 |
-
class TestConfigVersioning(unittest.TestCase):
|
30 |
-
def test_upgrade_downgrade_consistency(self):
|
31 |
-
cfg = get_cfg()
|
32 |
-
# check that custom is preserved
|
33 |
-
cfg.USER_CUSTOM = 1
|
34 |
-
|
35 |
-
down = downgrade_config(cfg, to_version=0)
|
36 |
-
up = upgrade_config(down)
|
37 |
-
self.assertTrue(up == cfg)
|
38 |
-
|
39 |
-
def _merge_cfg_str(self, cfg, merge_str):
|
40 |
-
f = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False)
|
41 |
-
try:
|
42 |
-
f.write(merge_str)
|
43 |
-
f.close()
|
44 |
-
cfg.merge_from_file(f.name)
|
45 |
-
finally:
|
46 |
-
os.remove(f.name)
|
47 |
-
return cfg
|
48 |
-
|
49 |
-
def test_auto_upgrade(self):
|
50 |
-
cfg = get_cfg()
|
51 |
-
latest_ver = cfg.VERSION
|
52 |
-
cfg.USER_CUSTOM = 1
|
53 |
-
|
54 |
-
self._merge_cfg_str(cfg, _V0_CFG)
|
55 |
-
|
56 |
-
self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, "TEST")
|
57 |
-
self.assertEqual(cfg.VERSION, latest_ver)
|
58 |
-
|
59 |
-
def test_guess_v1(self):
|
60 |
-
cfg = get_cfg()
|
61 |
-
latest_ver = cfg.VERSION
|
62 |
-
self._merge_cfg_str(cfg, _V1_CFG)
|
63 |
-
self.assertEqual(cfg.VERSION, latest_ver)
|
64 |
-
|
65 |
-
|
66 |
-
class _TestClassA(torch.nn.Module):
|
67 |
-
@configurable
|
68 |
-
def __init__(self, arg1, arg2, arg3=3):
|
69 |
-
super().__init__()
|
70 |
-
self.arg1 = arg1
|
71 |
-
self.arg2 = arg2
|
72 |
-
self.arg3 = arg3
|
73 |
-
assert arg1 == 1
|
74 |
-
assert arg2 == 2
|
75 |
-
assert arg3 == 3
|
76 |
-
|
77 |
-
@classmethod
|
78 |
-
def from_config(cls, cfg):
|
79 |
-
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
|
80 |
-
return args
|
81 |
-
|
82 |
-
|
83 |
-
class _TestClassB(_TestClassA):
|
84 |
-
@configurable
|
85 |
-
def __init__(self, input_shape, arg1, arg2, arg3=3):
|
86 |
-
"""
|
87 |
-
Doc of _TestClassB
|
88 |
-
"""
|
89 |
-
assert input_shape == "shape"
|
90 |
-
super().__init__(arg1, arg2, arg3)
|
91 |
-
|
92 |
-
@classmethod
|
93 |
-
def from_config(cls, cfg, input_shape): # test extra positional arg in from_config
|
94 |
-
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
|
95 |
-
args["input_shape"] = input_shape
|
96 |
-
return args
|
97 |
-
|
98 |
-
|
99 |
-
class _LegacySubClass(_TestClassB):
|
100 |
-
# an old subclass written in cfg style
|
101 |
-
def __init__(self, cfg, input_shape, arg4=4):
|
102 |
-
super().__init__(cfg, input_shape)
|
103 |
-
assert self.arg1 == 1
|
104 |
-
assert self.arg2 == 2
|
105 |
-
assert self.arg3 == 3
|
106 |
-
|
107 |
-
|
108 |
-
class _NewSubClassNewInit(_TestClassB):
|
109 |
-
# test new subclass with a new __init__
|
110 |
-
@configurable
|
111 |
-
def __init__(self, input_shape, arg4=4, **kwargs):
|
112 |
-
super().__init__(input_shape, **kwargs)
|
113 |
-
assert self.arg1 == 1
|
114 |
-
assert self.arg2 == 2
|
115 |
-
assert self.arg3 == 3
|
116 |
-
|
117 |
-
|
118 |
-
class _LegacySubClassNotCfg(_TestClassB):
|
119 |
-
# an old subclass written in cfg style, but argument is not called "cfg"
|
120 |
-
def __init__(self, config, input_shape):
|
121 |
-
super().__init__(config, input_shape)
|
122 |
-
assert self.arg1 == 1
|
123 |
-
assert self.arg2 == 2
|
124 |
-
assert self.arg3 == 3
|
125 |
-
|
126 |
-
|
127 |
-
class _TestClassC(_TestClassB):
|
128 |
-
@classmethod
|
129 |
-
def from_config(cls, cfg, input_shape, **kwargs): # test extra kwarg overwrite
|
130 |
-
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
|
131 |
-
args["input_shape"] = input_shape
|
132 |
-
args.update(kwargs)
|
133 |
-
return args
|
134 |
-
|
135 |
-
|
136 |
-
class _TestClassD(_TestClassA):
|
137 |
-
@configurable
|
138 |
-
def __init__(self, input_shape: ShapeSpec, arg1: int, arg2, arg3=3):
|
139 |
-
assert input_shape == "shape"
|
140 |
-
super().__init__(arg1, arg2, arg3)
|
141 |
-
|
142 |
-
# _TestClassA.from_config does not have input_shape args.
|
143 |
-
# Test whether input_shape will be forwarded to __init__
|
144 |
-
|
145 |
-
|
146 |
-
@configurable(from_config=lambda cfg, arg2: {"arg1": cfg.ARG1, "arg2": arg2, "arg3": cfg.ARG3})
|
147 |
-
def _test_func(arg1, arg2=2, arg3=3, arg4=4):
|
148 |
-
return arg1, arg2, arg3, arg4
|
149 |
-
|
150 |
-
|
151 |
-
class TestConfigurable(unittest.TestCase):
|
152 |
-
def testInitWithArgs(self):
|
153 |
-
_ = _TestClassA(arg1=1, arg2=2, arg3=3)
|
154 |
-
_ = _TestClassB("shape", arg1=1, arg2=2)
|
155 |
-
_ = _TestClassC("shape", arg1=1, arg2=2)
|
156 |
-
_ = _TestClassD("shape", arg1=1, arg2=2, arg3=3)
|
157 |
-
|
158 |
-
def testPatchedAttr(self):
|
159 |
-
self.assertTrue("Doc" in _TestClassB.__init__.__doc__)
|
160 |
-
self.assertEqual(_TestClassD.__init__.__annotations__["arg1"], int)
|
161 |
-
|
162 |
-
def testInitWithCfg(self):
|
163 |
-
cfg = get_cfg()
|
164 |
-
cfg.ARG1 = 1
|
165 |
-
cfg.ARG2 = 2
|
166 |
-
cfg.ARG3 = 3
|
167 |
-
_ = _TestClassA(cfg)
|
168 |
-
_ = _TestClassB(cfg, input_shape="shape")
|
169 |
-
_ = _TestClassC(cfg, input_shape="shape")
|
170 |
-
_ = _TestClassD(cfg, input_shape="shape")
|
171 |
-
_ = _LegacySubClass(cfg, input_shape="shape")
|
172 |
-
_ = _NewSubClassNewInit(cfg, input_shape="shape")
|
173 |
-
_ = _LegacySubClassNotCfg(cfg, input_shape="shape")
|
174 |
-
with self.assertRaises(TypeError):
|
175 |
-
# disallow forwarding positional args to __init__ since it's prone to errors
|
176 |
-
_ = _TestClassD(cfg, "shape")
|
177 |
-
|
178 |
-
# call with kwargs instead
|
179 |
-
_ = _TestClassA(cfg=cfg)
|
180 |
-
_ = _TestClassB(cfg=cfg, input_shape="shape")
|
181 |
-
_ = _TestClassC(cfg=cfg, input_shape="shape")
|
182 |
-
_ = _TestClassD(cfg=cfg, input_shape="shape")
|
183 |
-
_ = _LegacySubClass(cfg=cfg, input_shape="shape")
|
184 |
-
_ = _NewSubClassNewInit(cfg=cfg, input_shape="shape")
|
185 |
-
_ = _LegacySubClassNotCfg(config=cfg, input_shape="shape")
|
186 |
-
|
187 |
-
def testInitWithCfgOverwrite(self):
|
188 |
-
cfg = get_cfg()
|
189 |
-
cfg.ARG1 = 1
|
190 |
-
cfg.ARG2 = 999 # wrong config
|
191 |
-
with self.assertRaises(AssertionError):
|
192 |
-
_ = _TestClassA(cfg, arg3=3)
|
193 |
-
|
194 |
-
# overwrite arg2 with correct config later:
|
195 |
-
_ = _TestClassA(cfg, arg2=2, arg3=3)
|
196 |
-
_ = _TestClassB(cfg, input_shape="shape", arg2=2, arg3=3)
|
197 |
-
_ = _TestClassC(cfg, input_shape="shape", arg2=2, arg3=3)
|
198 |
-
_ = _TestClassD(cfg, input_shape="shape", arg2=2, arg3=3)
|
199 |
-
|
200 |
-
# call with kwargs cfg=cfg instead
|
201 |
-
_ = _TestClassA(cfg=cfg, arg2=2, arg3=3)
|
202 |
-
_ = _TestClassB(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
|
203 |
-
_ = _TestClassC(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
|
204 |
-
_ = _TestClassD(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
|
205 |
-
|
206 |
-
def testInitWithCfgWrongArgs(self):
|
207 |
-
cfg = get_cfg()
|
208 |
-
cfg.ARG1 = 1
|
209 |
-
cfg.ARG2 = 2
|
210 |
-
with self.assertRaises(TypeError):
|
211 |
-
_ = _TestClassB(cfg, "shape", not_exist=1)
|
212 |
-
with self.assertRaises(TypeError):
|
213 |
-
_ = _TestClassC(cfg, "shape", not_exist=1)
|
214 |
-
with self.assertRaises(TypeError):
|
215 |
-
_ = _TestClassD(cfg, "shape", not_exist=1)
|
216 |
-
|
217 |
-
def testBadClass(self):
|
218 |
-
class _BadClass1:
|
219 |
-
@configurable
|
220 |
-
def __init__(self, a=1, b=2):
|
221 |
-
pass
|
222 |
-
|
223 |
-
class _BadClass2:
|
224 |
-
@configurable
|
225 |
-
def __init__(self, a=1, b=2):
|
226 |
-
pass
|
227 |
-
|
228 |
-
def from_config(self, cfg): # noqa
|
229 |
-
pass
|
230 |
-
|
231 |
-
class _BadClass3:
|
232 |
-
@configurable
|
233 |
-
def __init__(self, a=1, b=2):
|
234 |
-
pass
|
235 |
-
|
236 |
-
# bad name: must be cfg
|
237 |
-
@classmethod
|
238 |
-
def from_config(cls, config): # noqa
|
239 |
-
pass
|
240 |
-
|
241 |
-
with self.assertRaises(AttributeError):
|
242 |
-
_ = _BadClass1(a=1)
|
243 |
-
|
244 |
-
with self.assertRaises(TypeError):
|
245 |
-
_ = _BadClass2(a=1)
|
246 |
-
|
247 |
-
with self.assertRaises(TypeError):
|
248 |
-
_ = _BadClass3(get_cfg())
|
249 |
-
|
250 |
-
def testFuncWithCfg(self):
|
251 |
-
cfg = get_cfg()
|
252 |
-
cfg.ARG1 = 10
|
253 |
-
cfg.ARG3 = 30
|
254 |
-
|
255 |
-
self.assertEqual(_test_func(1), (1, 2, 3, 4))
|
256 |
-
with self.assertRaises(TypeError):
|
257 |
-
_test_func(cfg)
|
258 |
-
self.assertEqual(_test_func(cfg, arg2=2), (10, 2, 30, 4))
|
259 |
-
self.assertEqual(_test_func(cfg, arg1=100, arg2=20), (100, 20, 30, 4))
|
260 |
-
self.assertEqual(_test_func(cfg, arg1=100, arg2=20, arg4=40), (100, 20, 30, 40))
|
261 |
-
|
262 |
-
self.assertTrue(callable(_test_func.from_config))
|
263 |
-
|
264 |
-
def testOmegaConf(self):
|
265 |
-
cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
|
266 |
-
cfg = OmegaConf.create(cfg.dump())
|
267 |
-
if not torch.cuda.is_available():
|
268 |
-
cfg.MODEL.DEVICE = "cpu"
|
269 |
-
# test that a model can be built with omegaconf config as well
|
270 |
-
build_model(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/test_rotated_boxes.py
DELETED
@@ -1,437 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from __future__ import absolute_import, division, print_function, unicode_literals
|
3 |
-
import logging
|
4 |
-
import math
|
5 |
-
import random
|
6 |
-
import unittest
|
7 |
-
import torch
|
8 |
-
from fvcore.common.benchmark import benchmark
|
9 |
-
|
10 |
-
from detectron2.layers.rotated_boxes import pairwise_iou_rotated
|
11 |
-
from detectron2.structures.boxes import Boxes
|
12 |
-
from detectron2.structures.rotated_boxes import RotatedBoxes, pairwise_iou
|
13 |
-
from detectron2.utils.testing import reload_script_model
|
14 |
-
|
15 |
-
logger = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
|
18 |
-
class TestRotatedBoxesLayer(unittest.TestCase):
|
19 |
-
def test_iou_0_dim_cpu(self):
|
20 |
-
boxes1 = torch.rand(0, 5, dtype=torch.float32)
|
21 |
-
boxes2 = torch.rand(10, 5, dtype=torch.float32)
|
22 |
-
expected_ious = torch.zeros(0, 10, dtype=torch.float32)
|
23 |
-
ious = pairwise_iou_rotated(boxes1, boxes2)
|
24 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
25 |
-
|
26 |
-
boxes1 = torch.rand(10, 5, dtype=torch.float32)
|
27 |
-
boxes2 = torch.rand(0, 5, dtype=torch.float32)
|
28 |
-
expected_ious = torch.zeros(10, 0, dtype=torch.float32)
|
29 |
-
ious = pairwise_iou_rotated(boxes1, boxes2)
|
30 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
31 |
-
|
32 |
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
|
33 |
-
def test_iou_0_dim_cuda(self):
|
34 |
-
boxes1 = torch.rand(0, 5, dtype=torch.float32)
|
35 |
-
boxes2 = torch.rand(10, 5, dtype=torch.float32)
|
36 |
-
expected_ious = torch.zeros(0, 10, dtype=torch.float32)
|
37 |
-
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
|
38 |
-
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
|
39 |
-
|
40 |
-
boxes1 = torch.rand(10, 5, dtype=torch.float32)
|
41 |
-
boxes2 = torch.rand(0, 5, dtype=torch.float32)
|
42 |
-
expected_ious = torch.zeros(10, 0, dtype=torch.float32)
|
43 |
-
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
|
44 |
-
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
|
45 |
-
|
46 |
-
def test_iou_half_overlap_cpu(self):
|
47 |
-
boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32)
|
48 |
-
boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32)
|
49 |
-
expected_ious = torch.tensor([[0.5]], dtype=torch.float32)
|
50 |
-
ious = pairwise_iou_rotated(boxes1, boxes2)
|
51 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
52 |
-
|
53 |
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
|
54 |
-
def test_iou_half_overlap_cuda(self):
|
55 |
-
boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32)
|
56 |
-
boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32)
|
57 |
-
expected_ious = torch.tensor([[0.5]], dtype=torch.float32)
|
58 |
-
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
|
59 |
-
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
|
60 |
-
|
61 |
-
def test_iou_precision(self):
|
62 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
63 |
-
boxes1 = torch.tensor([[565, 565, 10, 10.0, 0]], dtype=torch.float32, device=device)
|
64 |
-
boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32, device=device)
|
65 |
-
iou = 8.3 / 10.0
|
66 |
-
expected_ious = torch.tensor([[iou]], dtype=torch.float32)
|
67 |
-
ious = pairwise_iou_rotated(boxes1, boxes2)
|
68 |
-
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
|
69 |
-
|
70 |
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
|
71 |
-
def test_iou_too_many_boxes_cuda(self):
|
72 |
-
s1, s2 = 5, 1289035
|
73 |
-
boxes1 = torch.zeros(s1, 5)
|
74 |
-
boxes2 = torch.zeros(s2, 5)
|
75 |
-
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
|
76 |
-
self.assertTupleEqual(tuple(ious_cuda.shape), (s1, s2))
|
77 |
-
|
78 |
-
def test_iou_extreme(self):
|
79 |
-
# Cause floating point issues in cuda kernels (#1266)
|
80 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
81 |
-
boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device)
|
82 |
-
boxes2 = torch.tensor(
|
83 |
-
[
|
84 |
-
[
|
85 |
-
-1.117407639806935e17,
|
86 |
-
1.3858420478349148e18,
|
87 |
-
1000.0000610351562,
|
88 |
-
1000.0000610351562,
|
89 |
-
1612.0,
|
90 |
-
]
|
91 |
-
],
|
92 |
-
device=device,
|
93 |
-
)
|
94 |
-
ious = pairwise_iou_rotated(boxes1, boxes2)
|
95 |
-
self.assertTrue(ious.min() >= 0, ious)
|
96 |
-
|
97 |
-
def test_iou_issue_2154(self):
|
98 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
99 |
-
boxes1 = torch.tensor(
|
100 |
-
[
|
101 |
-
[
|
102 |
-
296.6620178222656,
|
103 |
-
458.73883056640625,
|
104 |
-
23.515729904174805,
|
105 |
-
47.677001953125,
|
106 |
-
0.08795166015625,
|
107 |
-
]
|
108 |
-
],
|
109 |
-
device=device,
|
110 |
-
)
|
111 |
-
boxes2 = torch.tensor(
|
112 |
-
[[296.66201, 458.73882000000003, 23.51573, 47.67702, 0.087951]],
|
113 |
-
device=device,
|
114 |
-
)
|
115 |
-
ious = pairwise_iou_rotated(boxes1, boxes2)
|
116 |
-
expected_ious = torch.tensor([[1.0]], dtype=torch.float32)
|
117 |
-
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
|
118 |
-
|
119 |
-
def test_iou_issue_2167(self):
|
120 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
121 |
-
boxes1 = torch.tensor(
|
122 |
-
[
|
123 |
-
[
|
124 |
-
2563.74462890625000000000,
|
125 |
-
1436.79016113281250000000,
|
126 |
-
2174.70336914062500000000,
|
127 |
-
214.09500122070312500000,
|
128 |
-
115.11834716796875000000,
|
129 |
-
]
|
130 |
-
],
|
131 |
-
device=device,
|
132 |
-
)
|
133 |
-
boxes2 = torch.tensor(
|
134 |
-
[
|
135 |
-
[
|
136 |
-
2563.74462890625000000000,
|
137 |
-
1436.79028320312500000000,
|
138 |
-
2174.70288085937500000000,
|
139 |
-
214.09495544433593750000,
|
140 |
-
115.11835479736328125000,
|
141 |
-
]
|
142 |
-
],
|
143 |
-
device=device,
|
144 |
-
)
|
145 |
-
ious = pairwise_iou_rotated(boxes1, boxes2)
|
146 |
-
expected_ious = torch.tensor([[1.0]], dtype=torch.float32)
|
147 |
-
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
|
148 |
-
|
149 |
-
|
150 |
-
class TestRotatedBoxesStructure(unittest.TestCase):
|
151 |
-
def test_clip_area_0_degree(self):
|
152 |
-
for _ in range(50):
|
153 |
-
num_boxes = 100
|
154 |
-
boxes_5d = torch.zeros(num_boxes, 5)
|
155 |
-
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
|
156 |
-
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
|
157 |
-
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
|
158 |
-
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
|
159 |
-
# Convert from (x_ctr, y_ctr, w, h, 0) to (x1, y1, x2, y2)
|
160 |
-
boxes_4d = torch.zeros(num_boxes, 4)
|
161 |
-
boxes_4d[:, 0] = boxes_5d[:, 0] - boxes_5d[:, 2] / 2.0
|
162 |
-
boxes_4d[:, 1] = boxes_5d[:, 1] - boxes_5d[:, 3] / 2.0
|
163 |
-
boxes_4d[:, 2] = boxes_5d[:, 0] + boxes_5d[:, 2] / 2.0
|
164 |
-
boxes_4d[:, 3] = boxes_5d[:, 1] + boxes_5d[:, 3] / 2.0
|
165 |
-
|
166 |
-
image_size = (500, 600)
|
167 |
-
test_boxes_4d = Boxes(boxes_4d)
|
168 |
-
test_boxes_5d = RotatedBoxes(boxes_5d)
|
169 |
-
# Before clip
|
170 |
-
areas_4d = test_boxes_4d.area()
|
171 |
-
areas_5d = test_boxes_5d.area()
|
172 |
-
self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5))
|
173 |
-
# After clip
|
174 |
-
test_boxes_4d.clip(image_size)
|
175 |
-
test_boxes_5d.clip(image_size)
|
176 |
-
areas_4d = test_boxes_4d.area()
|
177 |
-
areas_5d = test_boxes_5d.area()
|
178 |
-
self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5))
|
179 |
-
|
180 |
-
def test_clip_area_arbitrary_angle(self):
|
181 |
-
num_boxes = 100
|
182 |
-
boxes_5d = torch.zeros(num_boxes, 5)
|
183 |
-
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
|
184 |
-
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
|
185 |
-
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
|
186 |
-
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
|
187 |
-
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
|
188 |
-
clip_angle_threshold = random.uniform(0, 180)
|
189 |
-
|
190 |
-
image_size = (500, 600)
|
191 |
-
test_boxes_5d = RotatedBoxes(boxes_5d)
|
192 |
-
# Before clip
|
193 |
-
areas_before = test_boxes_5d.area()
|
194 |
-
# After clip
|
195 |
-
test_boxes_5d.clip(image_size, clip_angle_threshold)
|
196 |
-
areas_diff = test_boxes_5d.area() - areas_before
|
197 |
-
|
198 |
-
# the areas should only decrease after clipping
|
199 |
-
self.assertTrue(torch.all(areas_diff <= 0))
|
200 |
-
# whenever the box is clipped (thus the area shrinks),
|
201 |
-
# the angle for the box must be within the clip_angle_threshold
|
202 |
-
# Note that the clip function will normalize the angle range
|
203 |
-
# to be within (-180, 180]
|
204 |
-
self.assertTrue(
|
205 |
-
torch.all(torch.abs(boxes_5d[:, 4][torch.where(areas_diff < 0)]) < clip_angle_threshold)
|
206 |
-
)
|
207 |
-
|
208 |
-
def test_normalize_angles(self):
|
209 |
-
# torch.manual_seed(0)
|
210 |
-
for _ in range(50):
|
211 |
-
num_boxes = 100
|
212 |
-
boxes_5d = torch.zeros(num_boxes, 5)
|
213 |
-
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
|
214 |
-
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
|
215 |
-
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
|
216 |
-
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
|
217 |
-
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
|
218 |
-
rotated_boxes = RotatedBoxes(boxes_5d)
|
219 |
-
normalized_boxes = rotated_boxes.clone()
|
220 |
-
normalized_boxes.normalize_angles()
|
221 |
-
self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] >= -180))
|
222 |
-
self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] < 180))
|
223 |
-
# x, y, w, h should not change
|
224 |
-
self.assertTrue(torch.allclose(boxes_5d[:, :4], normalized_boxes.tensor[:, :4]))
|
225 |
-
# the cos/sin values of the angles should stay the same
|
226 |
-
|
227 |
-
self.assertTrue(
|
228 |
-
torch.allclose(
|
229 |
-
torch.cos(boxes_5d[:, 4] * math.pi / 180),
|
230 |
-
torch.cos(normalized_boxes.tensor[:, 4] * math.pi / 180),
|
231 |
-
atol=1e-5,
|
232 |
-
)
|
233 |
-
)
|
234 |
-
|
235 |
-
self.assertTrue(
|
236 |
-
torch.allclose(
|
237 |
-
torch.sin(boxes_5d[:, 4] * math.pi / 180),
|
238 |
-
torch.sin(normalized_boxes.tensor[:, 4] * math.pi / 180),
|
239 |
-
atol=1e-5,
|
240 |
-
)
|
241 |
-
)
|
242 |
-
|
243 |
-
def test_pairwise_iou_0_degree(self):
|
244 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
245 |
-
boxes1 = torch.tensor(
|
246 |
-
[[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]],
|
247 |
-
dtype=torch.float32,
|
248 |
-
device=device,
|
249 |
-
)
|
250 |
-
boxes2 = torch.tensor(
|
251 |
-
[
|
252 |
-
[0.5, 0.5, 1.0, 1.0, 0.0],
|
253 |
-
[0.25, 0.5, 0.5, 1.0, 0.0],
|
254 |
-
[0.5, 0.25, 1.0, 0.5, 0.0],
|
255 |
-
[0.25, 0.25, 0.5, 0.5, 0.0],
|
256 |
-
[0.75, 0.75, 0.5, 0.5, 0.0],
|
257 |
-
[1.0, 1.0, 1.0, 1.0, 0.0],
|
258 |
-
],
|
259 |
-
dtype=torch.float32,
|
260 |
-
device=device,
|
261 |
-
)
|
262 |
-
expected_ious = torch.tensor(
|
263 |
-
[
|
264 |
-
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
|
265 |
-
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
|
266 |
-
],
|
267 |
-
dtype=torch.float32,
|
268 |
-
device=device,
|
269 |
-
)
|
270 |
-
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
|
271 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
272 |
-
|
273 |
-
def test_pairwise_iou_45_degrees(self):
|
274 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
275 |
-
boxes1 = torch.tensor(
|
276 |
-
[
|
277 |
-
[1, 1, math.sqrt(2), math.sqrt(2), 45],
|
278 |
-
[1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45],
|
279 |
-
],
|
280 |
-
dtype=torch.float32,
|
281 |
-
device=device,
|
282 |
-
)
|
283 |
-
boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device)
|
284 |
-
expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device)
|
285 |
-
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
|
286 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
287 |
-
|
288 |
-
def test_pairwise_iou_orthogonal(self):
|
289 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
290 |
-
boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device)
|
291 |
-
boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device)
|
292 |
-
iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0)
|
293 |
-
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
|
294 |
-
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
|
295 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
296 |
-
|
297 |
-
def test_pairwise_iou_large_close_boxes(self):
|
298 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
299 |
-
boxes1 = torch.tensor(
|
300 |
-
[[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]],
|
301 |
-
dtype=torch.float32,
|
302 |
-
device=device,
|
303 |
-
)
|
304 |
-
boxes2 = torch.tensor(
|
305 |
-
[[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]],
|
306 |
-
dtype=torch.float32,
|
307 |
-
device=device,
|
308 |
-
)
|
309 |
-
iou = 364.259155 / 364.259186
|
310 |
-
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
|
311 |
-
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
|
312 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
313 |
-
|
314 |
-
def test_pairwise_iou_many_boxes(self):
|
315 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
316 |
-
num_boxes1 = 100
|
317 |
-
num_boxes2 = 200
|
318 |
-
boxes1 = torch.stack(
|
319 |
-
[
|
320 |
-
torch.tensor(
|
321 |
-
[5 + 20 * i, 5 + 20 * i, 10, 10, 0],
|
322 |
-
dtype=torch.float32,
|
323 |
-
device=device,
|
324 |
-
)
|
325 |
-
for i in range(num_boxes1)
|
326 |
-
]
|
327 |
-
)
|
328 |
-
boxes2 = torch.stack(
|
329 |
-
[
|
330 |
-
torch.tensor(
|
331 |
-
[5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0],
|
332 |
-
dtype=torch.float32,
|
333 |
-
device=device,
|
334 |
-
)
|
335 |
-
for i in range(num_boxes2)
|
336 |
-
]
|
337 |
-
)
|
338 |
-
expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device)
|
339 |
-
for i in range(min(num_boxes1, num_boxes2)):
|
340 |
-
expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0
|
341 |
-
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
|
342 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
343 |
-
|
344 |
-
def test_pairwise_iou_issue1207_simplified(self):
|
345 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
346 |
-
# Simplified test case of D2-issue-1207
|
347 |
-
boxes1 = torch.tensor([[3, 3, 8, 2, -45.0]], device=device)
|
348 |
-
boxes2 = torch.tensor([[6, 0, 8, 2, -45.0]], device=device)
|
349 |
-
iou = 0.0
|
350 |
-
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
|
351 |
-
|
352 |
-
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
|
353 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
354 |
-
|
355 |
-
def test_pairwise_iou_issue1207(self):
|
356 |
-
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
|
357 |
-
# The original test case in D2-issue-1207
|
358 |
-
boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device)
|
359 |
-
boxes2 = torch.tensor([[190.0, 127.0, 80.0, 21.0, -46.0]], device=device)
|
360 |
-
|
361 |
-
iou = 0.0
|
362 |
-
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
|
363 |
-
|
364 |
-
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
|
365 |
-
self.assertTrue(torch.allclose(ious, expected_ious))
|
366 |
-
|
367 |
-
def test_empty_cat(self):
|
368 |
-
x = RotatedBoxes.cat([])
|
369 |
-
self.assertTrue(x.tensor.shape, (0, 5))
|
370 |
-
|
371 |
-
def test_scriptability(self):
|
372 |
-
def func(x):
|
373 |
-
boxes = RotatedBoxes(x)
|
374 |
-
test = boxes.to(torch.device("cpu")).tensor
|
375 |
-
return boxes.area(), test
|
376 |
-
|
377 |
-
f = torch.jit.script(func)
|
378 |
-
f = reload_script_model(f)
|
379 |
-
f(torch.rand((3, 5)))
|
380 |
-
|
381 |
-
data = torch.rand((3, 5))
|
382 |
-
|
383 |
-
def func_cat(x: torch.Tensor):
|
384 |
-
boxes1 = RotatedBoxes(x)
|
385 |
-
boxes2 = RotatedBoxes(x)
|
386 |
-
# this is not supported by torchscript for now.
|
387 |
-
# boxes3 = RotatedBoxes.cat([boxes1, boxes2])
|
388 |
-
boxes3 = boxes1.cat([boxes1, boxes2])
|
389 |
-
return boxes3
|
390 |
-
|
391 |
-
f = torch.jit.script(func_cat)
|
392 |
-
script_box = f(data)
|
393 |
-
self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor))
|
394 |
-
|
395 |
-
|
396 |
-
def benchmark_rotated_iou():
|
397 |
-
num_boxes1 = 200
|
398 |
-
num_boxes2 = 500
|
399 |
-
boxes1 = torch.stack(
|
400 |
-
[
|
401 |
-
torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32)
|
402 |
-
for i in range(num_boxes1)
|
403 |
-
]
|
404 |
-
)
|
405 |
-
boxes2 = torch.stack(
|
406 |
-
[
|
407 |
-
torch.tensor(
|
408 |
-
[5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0],
|
409 |
-
dtype=torch.float32,
|
410 |
-
)
|
411 |
-
for i in range(num_boxes2)
|
412 |
-
]
|
413 |
-
)
|
414 |
-
|
415 |
-
def func(dev, n=1):
|
416 |
-
b1 = boxes1.to(device=dev)
|
417 |
-
b2 = boxes2.to(device=dev)
|
418 |
-
|
419 |
-
def bench():
|
420 |
-
for _ in range(n):
|
421 |
-
pairwise_iou_rotated(b1, b2)
|
422 |
-
if dev.type == "cuda":
|
423 |
-
torch.cuda.synchronize()
|
424 |
-
|
425 |
-
return bench
|
426 |
-
|
427 |
-
# only run it once per timed loop, since it's slow
|
428 |
-
args = [{"dev": torch.device("cpu"), "n": 1}]
|
429 |
-
if torch.cuda.is_available():
|
430 |
-
args.append({"dev": torch.device("cuda"), "n": 10})
|
431 |
-
|
432 |
-
benchmark(func, "rotated_iou", args, warmup_iters=3)
|
433 |
-
|
434 |
-
|
435 |
-
if __name__ == "__main__":
|
436 |
-
unittest.main()
|
437 |
-
benchmark_rotated_iou()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ayakasuki/anime-ai-detect/app.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
-
|
4 |
-
detection_pipeline = pipeline("image-classification", "saltacc/anime-ai-detect")
|
5 |
-
|
6 |
-
|
7 |
-
def detect(img):
|
8 |
-
print(img)
|
9 |
-
output = detection_pipeline(img, top_k=2)
|
10 |
-
final = {}
|
11 |
-
for d in output:
|
12 |
-
final[d["label"]] = d["score"]
|
13 |
-
return final
|
14 |
-
|
15 |
-
|
16 |
-
iface = gr.Interface(fn=detect, inputs=gr.Image(type="pil"), outputs=gr.Label(label="result"))
|
17 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bambicita/rvc-models/infer_pack/commons.py
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
def init_weights(m, mean=0.0, std=0.01):
|
9 |
-
classname = m.__class__.__name__
|
10 |
-
if classname.find("Conv") != -1:
|
11 |
-
m.weight.data.normal_(mean, std)
|
12 |
-
|
13 |
-
|
14 |
-
def get_padding(kernel_size, dilation=1):
|
15 |
-
return int((kernel_size * dilation - dilation) / 2)
|
16 |
-
|
17 |
-
|
18 |
-
def convert_pad_shape(pad_shape):
|
19 |
-
l = pad_shape[::-1]
|
20 |
-
pad_shape = [item for sublist in l for item in sublist]
|
21 |
-
return pad_shape
|
22 |
-
|
23 |
-
|
24 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
25 |
-
"""KL(P||Q)"""
|
26 |
-
kl = (logs_q - logs_p) - 0.5
|
27 |
-
kl += (
|
28 |
-
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
|
29 |
-
)
|
30 |
-
return kl
|
31 |
-
|
32 |
-
|
33 |
-
def rand_gumbel(shape):
|
34 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
35 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
36 |
-
return -torch.log(-torch.log(uniform_samples))
|
37 |
-
|
38 |
-
|
39 |
-
def rand_gumbel_like(x):
|
40 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
41 |
-
return g
|
42 |
-
|
43 |
-
|
44 |
-
def slice_segments(x, ids_str, segment_size=4):
|
45 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
46 |
-
for i in range(x.size(0)):
|
47 |
-
idx_str = ids_str[i]
|
48 |
-
idx_end = idx_str + segment_size
|
49 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
50 |
-
return ret
|
51 |
-
|
52 |
-
|
53 |
-
def slice_segments2(x, ids_str, segment_size=4):
|
54 |
-
ret = torch.zeros_like(x[:, :segment_size])
|
55 |
-
for i in range(x.size(0)):
|
56 |
-
idx_str = ids_str[i]
|
57 |
-
idx_end = idx_str + segment_size
|
58 |
-
ret[i] = x[i, idx_str:idx_end]
|
59 |
-
return ret
|
60 |
-
|
61 |
-
|
62 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
63 |
-
b, d, t = x.size()
|
64 |
-
if x_lengths is None:
|
65 |
-
x_lengths = t
|
66 |
-
ids_str_max = x_lengths - segment_size + 1
|
67 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
68 |
-
ret = slice_segments(x, ids_str, segment_size)
|
69 |
-
return ret, ids_str
|
70 |
-
|
71 |
-
|
72 |
-
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
73 |
-
position = torch.arange(length, dtype=torch.float)
|
74 |
-
num_timescales = channels // 2
|
75 |
-
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
|
76 |
-
num_timescales - 1
|
77 |
-
)
|
78 |
-
inv_timescales = min_timescale * torch.exp(
|
79 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
|
80 |
-
)
|
81 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
82 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
83 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
84 |
-
signal = signal.view(1, channels, length)
|
85 |
-
return signal
|
86 |
-
|
87 |
-
|
88 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
89 |
-
b, channels, length = x.size()
|
90 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
91 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
92 |
-
|
93 |
-
|
94 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
95 |
-
b, channels, length = x.size()
|
96 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
97 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
98 |
-
|
99 |
-
|
100 |
-
def subsequent_mask(length):
|
101 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
102 |
-
return mask
|
103 |
-
|
104 |
-
|
105 |
-
@torch.jit.script
|
106 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
107 |
-
n_channels_int = n_channels[0]
|
108 |
-
in_act = input_a + input_b
|
109 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
110 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
111 |
-
acts = t_act * s_act
|
112 |
-
return acts
|
113 |
-
|
114 |
-
|
115 |
-
def convert_pad_shape(pad_shape):
|
116 |
-
l = pad_shape[::-1]
|
117 |
-
pad_shape = [item for sublist in l for item in sublist]
|
118 |
-
return pad_shape
|
119 |
-
|
120 |
-
|
121 |
-
def shift_1d(x):
|
122 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
123 |
-
return x
|
124 |
-
|
125 |
-
|
126 |
-
def sequence_mask(length, max_length=None):
|
127 |
-
if max_length is None:
|
128 |
-
max_length = length.max()
|
129 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
130 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
131 |
-
|
132 |
-
|
133 |
-
def generate_path(duration, mask):
|
134 |
-
"""
|
135 |
-
duration: [b, 1, t_x]
|
136 |
-
mask: [b, 1, t_y, t_x]
|
137 |
-
"""
|
138 |
-
device = duration.device
|
139 |
-
|
140 |
-
b, _, t_y, t_x = mask.shape
|
141 |
-
cum_duration = torch.cumsum(duration, -1)
|
142 |
-
|
143 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
144 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
145 |
-
path = path.view(b, t_x, t_y)
|
146 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
147 |
-
path = path.unsqueeze(1).transpose(2, 3) * mask
|
148 |
-
return path
|
149 |
-
|
150 |
-
|
151 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
152 |
-
if isinstance(parameters, torch.Tensor):
|
153 |
-
parameters = [parameters]
|
154 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
155 |
-
norm_type = float(norm_type)
|
156 |
-
if clip_value is not None:
|
157 |
-
clip_value = float(clip_value)
|
158 |
-
|
159 |
-
total_norm = 0
|
160 |
-
for p in parameters:
|
161 |
-
param_norm = p.grad.data.norm(norm_type)
|
162 |
-
total_norm += param_norm.item() ** norm_type
|
163 |
-
if clip_value is not None:
|
164 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
165 |
-
total_norm = total_norm ** (1.0 / norm_type)
|
166 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/tools/infer/train-index.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
格式:直接cid为自带的index位;aid放不下了,通过字典来查,反正就5w个
|
3 |
-
"""
|
4 |
-
import os
|
5 |
-
import logging
|
6 |
-
|
7 |
-
logger = logging.getLogger(__name__)
|
8 |
-
|
9 |
-
import faiss
|
10 |
-
import numpy as np
|
11 |
-
|
12 |
-
# ###########如果是原始特征要先写save
|
13 |
-
inp_root = r"E:\codes\py39\dataset\mi\2-co256"
|
14 |
-
npys = []
|
15 |
-
for name in sorted(list(os.listdir(inp_root))):
|
16 |
-
phone = np.load("%s/%s" % (inp_root, name))
|
17 |
-
npys.append(phone)
|
18 |
-
big_npy = np.concatenate(npys, 0)
|
19 |
-
logger.debug(big_npy.shape) # (6196072, 192)#fp32#4.43G
|
20 |
-
np.save("infer/big_src_feature_mi.npy", big_npy)
|
21 |
-
|
22 |
-
##################train+add
|
23 |
-
# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy")
|
24 |
-
logger.debug(big_npy.shape)
|
25 |
-
index = faiss.index_factory(256, "IVF512,Flat") # mi
|
26 |
-
logger.info("Training...")
|
27 |
-
index_ivf = faiss.extract_index_ivf(index) #
|
28 |
-
index_ivf.nprobe = 9
|
29 |
-
index.train(big_npy)
|
30 |
-
faiss.write_index(index, "infer/trained_IVF512_Flat_mi_baseline_src_feat.index")
|
31 |
-
logger.info("Adding...")
|
32 |
-
index.add(big_npy)
|
33 |
-
faiss.write_index(index, "infer/added_IVF512_Flat_mi_baseline_src_feat.index")
|
34 |
-
"""
|
35 |
-
大小(都是FP32)
|
36 |
-
big_src_feature 2.95G
|
37 |
-
(3098036, 256)
|
38 |
-
big_emb 4.43G
|
39 |
-
(6196072, 192)
|
40 |
-
big_emb双倍是因为求特征要repeat后再加pitch
|
41 |
-
|
42 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Azul 39s Pistas Joe 39s 3d Scavenger Hunt Descargar.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Pistas de Blue Joe’s 3D Scavenger Hunt Descargar: Un juego divertido y educativo para niños en edad preescolar</h1>
|
3 |
-
<p>Si usted está buscando un juego divertido y educativo para su preescolar, es posible que desee comprobar hacia fuera <strong>Pistas de Blue Joe’s 3D Scavenger Hunt</strong>. Este es un juego basado en el popular programa infantil <em>Blue’s Clues</em>, donde puedes unirte a Joe, Blue y Mailbox mientras exploran su vecindario y buscan artículos para su búsqueda del tesoro. El juego está diseñado para niños de 3 a 6 años y les ayuda a desarrollar sus habilidades de resolución de problemas, memoria y coordinación mano-ojo. En este artículo, te diremos cómo descargar y jugar el juego, qué puedes hacer en el juego, por qué deberías jugar el juego y algunas preguntas frecuentes.</p>
|
4 |
-
<h2>Cómo descargar y jugar el juego</h2>
|
5 |
-
<p>Descargar y jugar <strong>Pistas de Blue La búsqueda del tesoro en 3D de Joe es fácil y simple. Solo sigue estos pasos:</p>
|
6 |
-
<h2>azul 39;s pistas joe 39;s 3d scavenger hunt descargar</h2><br /><p><b><b>Download File</b> ✫✫✫ <a href="https://bltlly.com/2v6Jz1">https://bltlly.com/2v6Jz1</a></b></p><br /><br />
|
7 |
-
<ol>
|
8 |
-
<li>Visite el sitio web oficial o la página de Internet Archive. Puedes encontrar el juego en <a href="( 1 )">Shockwave.com</a>, donde puedes descargar una versión de prueba gratuita o comprar la versión completa por $6.99. También puedes encontrar el juego en <a href="( 2 )">Internet Archive</a>, donde puedes descargarlo gratis. </li>
|
9 |
-
<li>Elija su versión preferida y haga clic en el enlace de descarga. Dependiendo de su sistema informático y navegador, es posible que necesite instalar algunos complementos o software para ejecutar el juego. Por ejemplo, puede que necesite instalar <a href="( 6 )">Shockwave Player</a> o <a href="">Adobe Flash Player</a>. </li>
|
10 |
-
<li>Instale e inicie el juego en su computadora. Siga las instrucciones en su pantalla para completar el proceso de instalación. Una vez hecho, puedes abrir el juego desde tu escritorio o menú de inicio. </li>
|
11 |
-
|
12 |
-
</ol>
|
13 |
-
<h2>Qué puedes hacer en el juego</h2>
|
14 |
-
<p>En <strong>Pistas de Blue Joe’s 3D Scavenger Hunt</strong>, puedes hacer muchas actividades divertidas y educativas con Joe, Blue y Mailbox. Estas son algunas de las cosas que puedes hacer en el juego:</p>
|
15 |
-
<ul>
|
16 |
-
<li>Siga las pistas y encontrar los elementos que Joe necesita para su búsqueda del tesoro. Joe te dará una lista de artículos que quiere encontrar en cada lugar, como una bola roja, una flor amarilla o un libro azul. También verá tres huellas de patas en la pantalla, que son las pistas de Blue. Puede hacer clic en ellas para ver lo que Blue quiere encontrar. Puede utilizar su ratón para moverse y buscar los artículos. Cuando encuentre un artículo, puede hacer clic en él para agregarlo a su inventario. También puede utilizar la lupa para acercar y alejar. </li>
|
17 |
-
<li>Interactúa con varios objetos y personajes en el entorno 3D. Puedes hacer clic en cualquier cosa que parezca interesante o curiosa, como árboles, animales, coches o personas. Escucharás sonidos, verás animaciones u obtendrás comentarios de Joe o Blue. También puedes hablar con algunos de los personajes, como Mr. Salt, Mrs. Pepper, Paprika, Slippery Soap, Shovel, Pail y más. Te saludarán, te harán preguntas o te darán pistas. </li>
|
18 |
-
<li>Aprende sobre colores, formas, números, letras, animales y más. El juego está lleno de contenido educativo que ayudará a tu preescolar a aprender cosas nuevas y reforzar lo que ya sabe. Por ejemplo, puedes aprender sobre colores mezclando pinturas en la tienda, formas combinando piezas de rompecabezas en la escuela, números contando manzanas en la granja, letras deletreando palabras en el parque, animales identificando sus sonidos y nombres en la granja, y más. </li>
|
19 |
-
</ul>
|
20 |
-
<h2>Por qué deberías jugar el juego</h2>
|
21 |
-
<p><strong>Pistas de Blue Joe’s 3D Scavenger Hunt</strong> no es solo un juego divertido para niños en edad preescolar, sino también uno beneficioso. Estas son algunas de las razones por las que deberías jugar el juego:</p>
|
22 |
-
<ul>
|
23 |
-
|
24 |
-
<li>Comentarios y valoraciones de otros jugadores y padres: El juego ha recibido críticas y valoraciones positivas de otros jugadores y padres que lo han probado. Han elogiado el juego por sus gráficos, efectos de sonido, actuación de voz, jugabilidad, valor educativo y factor de entretenimiento. También han comentado que el juego es fácil de instalar y jugar, adecuado para diferentes edades y habilidades, y fiel al espectáculo. </li>
|
25 |
-
<li>Curiosidades y curiosidades sobre el juego y el espectáculo: El juego fue lanzado en 2002 por <a href="">Nick Jr.</a> y <a href="">Shockwave.com</a>, como parte de una serie de juegos <em>Blue’s Clues</em> para PC. El juego cuenta con Joe como el anfitrión de la serie, que reemplazó a Steve en 2000. El juego también cuenta con Mailbox como compañero de Joe, quien lo ayuda con su búsqueda del tesoro. El juego se basa en el episodio "Joe’s 3D Scavenger Hunt", que se emitió el 9 de septiembre de 2002. </li>
|
26 |
-
</ul>
|
27 |
-
<h2>Conclusión</h2>
|
28 |
-
<p>En conclusión,<strong>Pistas de Blue Joe’s 3D Scavenger Hunt</strong> es un juego divertido y educativo para preescolares que puedes descargar y jugar en tu computadora. El juego te permite unirte a Joe, Blue y Mailbox mientras exploran su vecindario y buscan artículos para su búsqueda del tesoro. El juego también ayuda a tu preescolar a desarrollar sus habilidades de resolución de problemas, memoria y coordinación mano-ojo, así como aprender sobre colores, formas, números, letras, animales y más. El juego ha recibido críticas y valoraciones positivas de otros jugadores y padres, y se basa en el episodio "Joe’s 3D Scavenger Hunt" del programa. Si usted está interesado en jugar el juego, puede descargarlo desde el sitio web oficial o la página del Archivo de Internet. ¡Esperamos que te diviertas jugando y aprendiendo con Joe y Blue! </p>
|
29 |
-
<h2>Preguntas frecuentes</h2>
|
30 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre <strong>Pistas de Blue Joe’s 3D Scavenger Hunt</strong>:</p>
|
31 |
-
<p></p>
|
32 |
-
<ol>
|
33 |
-
<li><strong>¿Cuáles son los requisitos del sistema para el juego? </strong><br>
|
34 |
-
|
35 |
-
<li><strong>¿Cuánto dura el juego? </strong><br>
|
36 |
-
El juego dura aproximadamente una hora, dependiendo de lo rápido que encuentres los objetos y las pistas. También puedes reproducir el juego tantas veces como quieras, ya que los elementos y las pistas cambiarán cada vez. </li>
|
37 |
-
<li><strong>¿El juego es seguro y libre de virus? </strong><br>
|
38 |
-
Sí, el juego es seguro y libre de virus. Puede descargarlo desde el sitio web oficial o la página del Archivo de Internet, que son fuentes de confianza. También puede escanear el juego con su software antivirus antes de instalarlo. </li>
|
39 |
-
<li><strong>¿Puedo jugar el juego en línea sin descargarlo? </strong><br>
|
40 |
-
Sí, puedes jugar el juego en línea sin descargarlo. Puedes encontrar el juego en <a href="">Nick Jr.</a>, donde puedes jugarlo gratis en tu navegador. Sin embargo, usted tendrá que tener Shockwave Player o Adobe Flash Player instalado para jugar el juego en línea. </li>
|
41 |
-
<li><strong>¿Dónde puedo encontrar más juegos como este? </strong><br>
|
42 |
-
Puedes encontrar más juegos como este en <a href=">Shockwave.com</a> o <a href=">Archivo de Internet</a>, donde puedes descargar o jugar en línea otros juegos <em>Pistas de Blue</em> para PC, como <em>Pistas de Blue: Blue Takes You to School</em,> <em>Pistas de Blue: Actividades de Blue’s Art Time</em>, o <em>Pistas de Blue: Actividades de Blue’s Reading Time</em>. También puedes encontrar más juegos en <a href=">Nick Jr.</a>, donde puedes jugar en línea otros juegos <em>Blue’s Clues</em> para niños, como <em>Blue’s Clues: Blue’s Mix 'n Match Dress Up</em>, <em>Blue’s Clues: Blue’s Matching Game</em>, o <em>Pistas de Blue: Aventura de cumpleaños de Blue</em>.</li>
|
43 |
-
</ol></p> 64aa2da5cf<br />
|
44 |
-
<br />
|
45 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/metadata_legacy.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
"""Metadata generation logic for legacy source distributions.
|
2 |
-
"""
|
3 |
-
|
4 |
-
import logging
|
5 |
-
import os
|
6 |
-
|
7 |
-
from pip._internal.build_env import BuildEnvironment
|
8 |
-
from pip._internal.cli.spinners import open_spinner
|
9 |
-
from pip._internal.exceptions import (
|
10 |
-
InstallationError,
|
11 |
-
InstallationSubprocessError,
|
12 |
-
MetadataGenerationFailed,
|
13 |
-
)
|
14 |
-
from pip._internal.utils.setuptools_build import make_setuptools_egg_info_args
|
15 |
-
from pip._internal.utils.subprocess import call_subprocess
|
16 |
-
from pip._internal.utils.temp_dir import TempDirectory
|
17 |
-
|
18 |
-
logger = logging.getLogger(__name__)
|
19 |
-
|
20 |
-
|
21 |
-
def _find_egg_info(directory: str) -> str:
|
22 |
-
"""Find an .egg-info subdirectory in `directory`."""
|
23 |
-
filenames = [f for f in os.listdir(directory) if f.endswith(".egg-info")]
|
24 |
-
|
25 |
-
if not filenames:
|
26 |
-
raise InstallationError(f"No .egg-info directory found in {directory}")
|
27 |
-
|
28 |
-
if len(filenames) > 1:
|
29 |
-
raise InstallationError(
|
30 |
-
"More than one .egg-info directory found in {}".format(directory)
|
31 |
-
)
|
32 |
-
|
33 |
-
return os.path.join(directory, filenames[0])
|
34 |
-
|
35 |
-
|
36 |
-
def generate_metadata(
|
37 |
-
build_env: BuildEnvironment,
|
38 |
-
setup_py_path: str,
|
39 |
-
source_dir: str,
|
40 |
-
isolated: bool,
|
41 |
-
details: str,
|
42 |
-
) -> str:
|
43 |
-
"""Generate metadata using setup.py-based defacto mechanisms.
|
44 |
-
|
45 |
-
Returns the generated metadata directory.
|
46 |
-
"""
|
47 |
-
logger.debug(
|
48 |
-
"Running setup.py (path:%s) egg_info for package %s",
|
49 |
-
setup_py_path,
|
50 |
-
details,
|
51 |
-
)
|
52 |
-
|
53 |
-
egg_info_dir = TempDirectory(kind="pip-egg-info", globally_managed=True).path
|
54 |
-
|
55 |
-
args = make_setuptools_egg_info_args(
|
56 |
-
setup_py_path,
|
57 |
-
egg_info_dir=egg_info_dir,
|
58 |
-
no_user_config=isolated,
|
59 |
-
)
|
60 |
-
|
61 |
-
with build_env:
|
62 |
-
with open_spinner("Preparing metadata (setup.py)") as spinner:
|
63 |
-
try:
|
64 |
-
call_subprocess(
|
65 |
-
args,
|
66 |
-
cwd=source_dir,
|
67 |
-
command_desc="python setup.py egg_info",
|
68 |
-
spinner=spinner,
|
69 |
-
)
|
70 |
-
except InstallationSubprocessError as error:
|
71 |
-
raise MetadataGenerationFailed(package_details=details) from error
|
72 |
-
|
73 |
-
# Return the .egg-info directory.
|
74 |
-
return _find_egg_info(egg_info_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/fallback.py
DELETED
@@ -1,1010 +0,0 @@
|
|
1 |
-
"""Fallback pure Python implementation of msgpack"""
|
2 |
-
from datetime import datetime as _DateTime
|
3 |
-
import sys
|
4 |
-
import struct
|
5 |
-
|
6 |
-
|
7 |
-
PY2 = sys.version_info[0] == 2
|
8 |
-
if PY2:
|
9 |
-
int_types = (int, long)
|
10 |
-
|
11 |
-
def dict_iteritems(d):
|
12 |
-
return d.iteritems()
|
13 |
-
|
14 |
-
else:
|
15 |
-
int_types = int
|
16 |
-
unicode = str
|
17 |
-
xrange = range
|
18 |
-
|
19 |
-
def dict_iteritems(d):
|
20 |
-
return d.items()
|
21 |
-
|
22 |
-
|
23 |
-
if sys.version_info < (3, 5):
|
24 |
-
# Ugly hack...
|
25 |
-
RecursionError = RuntimeError
|
26 |
-
|
27 |
-
def _is_recursionerror(e):
|
28 |
-
return (
|
29 |
-
len(e.args) == 1
|
30 |
-
and isinstance(e.args[0], str)
|
31 |
-
and e.args[0].startswith("maximum recursion depth exceeded")
|
32 |
-
)
|
33 |
-
|
34 |
-
else:
|
35 |
-
|
36 |
-
def _is_recursionerror(e):
|
37 |
-
return True
|
38 |
-
|
39 |
-
|
40 |
-
if hasattr(sys, "pypy_version_info"):
|
41 |
-
# StringIO is slow on PyPy, StringIO is faster. However: PyPy's own
|
42 |
-
# StringBuilder is fastest.
|
43 |
-
from __pypy__ import newlist_hint
|
44 |
-
|
45 |
-
try:
|
46 |
-
from __pypy__.builders import BytesBuilder as StringBuilder
|
47 |
-
except ImportError:
|
48 |
-
from __pypy__.builders import StringBuilder
|
49 |
-
USING_STRINGBUILDER = True
|
50 |
-
|
51 |
-
class StringIO(object):
|
52 |
-
def __init__(self, s=b""):
|
53 |
-
if s:
|
54 |
-
self.builder = StringBuilder(len(s))
|
55 |
-
self.builder.append(s)
|
56 |
-
else:
|
57 |
-
self.builder = StringBuilder()
|
58 |
-
|
59 |
-
def write(self, s):
|
60 |
-
if isinstance(s, memoryview):
|
61 |
-
s = s.tobytes()
|
62 |
-
elif isinstance(s, bytearray):
|
63 |
-
s = bytes(s)
|
64 |
-
self.builder.append(s)
|
65 |
-
|
66 |
-
def getvalue(self):
|
67 |
-
return self.builder.build()
|
68 |
-
|
69 |
-
else:
|
70 |
-
USING_STRINGBUILDER = False
|
71 |
-
from io import BytesIO as StringIO
|
72 |
-
|
73 |
-
newlist_hint = lambda size: []
|
74 |
-
|
75 |
-
|
76 |
-
from .exceptions import BufferFull, OutOfData, ExtraData, FormatError, StackError
|
77 |
-
|
78 |
-
from .ext import ExtType, Timestamp
|
79 |
-
|
80 |
-
|
81 |
-
EX_SKIP = 0
|
82 |
-
EX_CONSTRUCT = 1
|
83 |
-
EX_READ_ARRAY_HEADER = 2
|
84 |
-
EX_READ_MAP_HEADER = 3
|
85 |
-
|
86 |
-
TYPE_IMMEDIATE = 0
|
87 |
-
TYPE_ARRAY = 1
|
88 |
-
TYPE_MAP = 2
|
89 |
-
TYPE_RAW = 3
|
90 |
-
TYPE_BIN = 4
|
91 |
-
TYPE_EXT = 5
|
92 |
-
|
93 |
-
DEFAULT_RECURSE_LIMIT = 511
|
94 |
-
|
95 |
-
|
96 |
-
def _check_type_strict(obj, t, type=type, tuple=tuple):
|
97 |
-
if type(t) is tuple:
|
98 |
-
return type(obj) in t
|
99 |
-
else:
|
100 |
-
return type(obj) is t
|
101 |
-
|
102 |
-
|
103 |
-
def _get_data_from_buffer(obj):
|
104 |
-
view = memoryview(obj)
|
105 |
-
if view.itemsize != 1:
|
106 |
-
raise ValueError("cannot unpack from multi-byte object")
|
107 |
-
return view
|
108 |
-
|
109 |
-
|
110 |
-
def unpackb(packed, **kwargs):
|
111 |
-
"""
|
112 |
-
Unpack an object from `packed`.
|
113 |
-
|
114 |
-
Raises ``ExtraData`` when *packed* contains extra bytes.
|
115 |
-
Raises ``ValueError`` when *packed* is incomplete.
|
116 |
-
Raises ``FormatError`` when *packed* is not valid msgpack.
|
117 |
-
Raises ``StackError`` when *packed* contains too nested.
|
118 |
-
Other exceptions can be raised during unpacking.
|
119 |
-
|
120 |
-
See :class:`Unpacker` for options.
|
121 |
-
"""
|
122 |
-
unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs)
|
123 |
-
unpacker.feed(packed)
|
124 |
-
try:
|
125 |
-
ret = unpacker._unpack()
|
126 |
-
except OutOfData:
|
127 |
-
raise ValueError("Unpack failed: incomplete input")
|
128 |
-
except RecursionError as e:
|
129 |
-
if _is_recursionerror(e):
|
130 |
-
raise StackError
|
131 |
-
raise
|
132 |
-
if unpacker._got_extradata():
|
133 |
-
raise ExtraData(ret, unpacker._get_extradata())
|
134 |
-
return ret
|
135 |
-
|
136 |
-
|
137 |
-
if sys.version_info < (2, 7, 6):
|
138 |
-
|
139 |
-
def _unpack_from(f, b, o=0):
|
140 |
-
"""Explicit type cast for legacy struct.unpack_from"""
|
141 |
-
return struct.unpack_from(f, bytes(b), o)
|
142 |
-
|
143 |
-
else:
|
144 |
-
_unpack_from = struct.unpack_from
|
145 |
-
|
146 |
-
_NO_FORMAT_USED = ""
|
147 |
-
_MSGPACK_HEADERS = {
|
148 |
-
0xC4: (1, _NO_FORMAT_USED, TYPE_BIN),
|
149 |
-
0xC5: (2, ">H", TYPE_BIN),
|
150 |
-
0xC6: (4, ">I", TYPE_BIN),
|
151 |
-
0xC7: (2, "Bb", TYPE_EXT),
|
152 |
-
0xC8: (3, ">Hb", TYPE_EXT),
|
153 |
-
0xC9: (5, ">Ib", TYPE_EXT),
|
154 |
-
0xCA: (4, ">f"),
|
155 |
-
0xCB: (8, ">d"),
|
156 |
-
0xCC: (1, _NO_FORMAT_USED),
|
157 |
-
0xCD: (2, ">H"),
|
158 |
-
0xCE: (4, ">I"),
|
159 |
-
0xCF: (8, ">Q"),
|
160 |
-
0xD0: (1, "b"),
|
161 |
-
0xD1: (2, ">h"),
|
162 |
-
0xD2: (4, ">i"),
|
163 |
-
0xD3: (8, ">q"),
|
164 |
-
0xD4: (1, "b1s", TYPE_EXT),
|
165 |
-
0xD5: (2, "b2s", TYPE_EXT),
|
166 |
-
0xD6: (4, "b4s", TYPE_EXT),
|
167 |
-
0xD7: (8, "b8s", TYPE_EXT),
|
168 |
-
0xD8: (16, "b16s", TYPE_EXT),
|
169 |
-
0xD9: (1, _NO_FORMAT_USED, TYPE_RAW),
|
170 |
-
0xDA: (2, ">H", TYPE_RAW),
|
171 |
-
0xDB: (4, ">I", TYPE_RAW),
|
172 |
-
0xDC: (2, ">H", TYPE_ARRAY),
|
173 |
-
0xDD: (4, ">I", TYPE_ARRAY),
|
174 |
-
0xDE: (2, ">H", TYPE_MAP),
|
175 |
-
0xDF: (4, ">I", TYPE_MAP),
|
176 |
-
}
|
177 |
-
|
178 |
-
|
179 |
-
class Unpacker(object):
|
180 |
-
"""Streaming unpacker.
|
181 |
-
|
182 |
-
Arguments:
|
183 |
-
|
184 |
-
:param file_like:
|
185 |
-
File-like object having `.read(n)` method.
|
186 |
-
If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
|
187 |
-
|
188 |
-
:param int read_size:
|
189 |
-
Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`)
|
190 |
-
|
191 |
-
:param bool use_list:
|
192 |
-
If true, unpack msgpack array to Python list.
|
193 |
-
Otherwise, unpack to Python tuple. (default: True)
|
194 |
-
|
195 |
-
:param bool raw:
|
196 |
-
If true, unpack msgpack raw to Python bytes.
|
197 |
-
Otherwise, unpack to Python str by decoding with UTF-8 encoding (default).
|
198 |
-
|
199 |
-
:param int timestamp:
|
200 |
-
Control how timestamp type is unpacked:
|
201 |
-
|
202 |
-
0 - Timestamp
|
203 |
-
1 - float (Seconds from the EPOCH)
|
204 |
-
2 - int (Nanoseconds from the EPOCH)
|
205 |
-
3 - datetime.datetime (UTC). Python 2 is not supported.
|
206 |
-
|
207 |
-
:param bool strict_map_key:
|
208 |
-
If true (default), only str or bytes are accepted for map (dict) keys.
|
209 |
-
|
210 |
-
:param callable object_hook:
|
211 |
-
When specified, it should be callable.
|
212 |
-
Unpacker calls it with a dict argument after unpacking msgpack map.
|
213 |
-
(See also simplejson)
|
214 |
-
|
215 |
-
:param callable object_pairs_hook:
|
216 |
-
When specified, it should be callable.
|
217 |
-
Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
|
218 |
-
(See also simplejson)
|
219 |
-
|
220 |
-
:param str unicode_errors:
|
221 |
-
The error handler for decoding unicode. (default: 'strict')
|
222 |
-
This option should be used only when you have msgpack data which
|
223 |
-
contains invalid UTF-8 string.
|
224 |
-
|
225 |
-
:param int max_buffer_size:
|
226 |
-
Limits size of data waiting unpacked. 0 means 2**32-1.
|
227 |
-
The default value is 100*1024*1024 (100MiB).
|
228 |
-
Raises `BufferFull` exception when it is insufficient.
|
229 |
-
You should set this parameter when unpacking data from untrusted source.
|
230 |
-
|
231 |
-
:param int max_str_len:
|
232 |
-
Deprecated, use *max_buffer_size* instead.
|
233 |
-
Limits max length of str. (default: max_buffer_size)
|
234 |
-
|
235 |
-
:param int max_bin_len:
|
236 |
-
Deprecated, use *max_buffer_size* instead.
|
237 |
-
Limits max length of bin. (default: max_buffer_size)
|
238 |
-
|
239 |
-
:param int max_array_len:
|
240 |
-
Limits max length of array.
|
241 |
-
(default: max_buffer_size)
|
242 |
-
|
243 |
-
:param int max_map_len:
|
244 |
-
Limits max length of map.
|
245 |
-
(default: max_buffer_size//2)
|
246 |
-
|
247 |
-
:param int max_ext_len:
|
248 |
-
Deprecated, use *max_buffer_size* instead.
|
249 |
-
Limits max size of ext type. (default: max_buffer_size)
|
250 |
-
|
251 |
-
Example of streaming deserialize from file-like object::
|
252 |
-
|
253 |
-
unpacker = Unpacker(file_like)
|
254 |
-
for o in unpacker:
|
255 |
-
process(o)
|
256 |
-
|
257 |
-
Example of streaming deserialize from socket::
|
258 |
-
|
259 |
-
unpacker = Unpacker()
|
260 |
-
while True:
|
261 |
-
buf = sock.recv(1024**2)
|
262 |
-
if not buf:
|
263 |
-
break
|
264 |
-
unpacker.feed(buf)
|
265 |
-
for o in unpacker:
|
266 |
-
process(o)
|
267 |
-
|
268 |
-
Raises ``ExtraData`` when *packed* contains extra bytes.
|
269 |
-
Raises ``OutOfData`` when *packed* is incomplete.
|
270 |
-
Raises ``FormatError`` when *packed* is not valid msgpack.
|
271 |
-
Raises ``StackError`` when *packed* contains too nested.
|
272 |
-
Other exceptions can be raised during unpacking.
|
273 |
-
"""
|
274 |
-
|
275 |
-
def __init__(
|
276 |
-
self,
|
277 |
-
file_like=None,
|
278 |
-
read_size=0,
|
279 |
-
use_list=True,
|
280 |
-
raw=False,
|
281 |
-
timestamp=0,
|
282 |
-
strict_map_key=True,
|
283 |
-
object_hook=None,
|
284 |
-
object_pairs_hook=None,
|
285 |
-
list_hook=None,
|
286 |
-
unicode_errors=None,
|
287 |
-
max_buffer_size=100 * 1024 * 1024,
|
288 |
-
ext_hook=ExtType,
|
289 |
-
max_str_len=-1,
|
290 |
-
max_bin_len=-1,
|
291 |
-
max_array_len=-1,
|
292 |
-
max_map_len=-1,
|
293 |
-
max_ext_len=-1,
|
294 |
-
):
|
295 |
-
if unicode_errors is None:
|
296 |
-
unicode_errors = "strict"
|
297 |
-
|
298 |
-
if file_like is None:
|
299 |
-
self._feeding = True
|
300 |
-
else:
|
301 |
-
if not callable(file_like.read):
|
302 |
-
raise TypeError("`file_like.read` must be callable")
|
303 |
-
self.file_like = file_like
|
304 |
-
self._feeding = False
|
305 |
-
|
306 |
-
#: array of bytes fed.
|
307 |
-
self._buffer = bytearray()
|
308 |
-
#: Which position we currently reads
|
309 |
-
self._buff_i = 0
|
310 |
-
|
311 |
-
# When Unpacker is used as an iterable, between the calls to next(),
|
312 |
-
# the buffer is not "consumed" completely, for efficiency sake.
|
313 |
-
# Instead, it is done sloppily. To make sure we raise BufferFull at
|
314 |
-
# the correct moments, we have to keep track of how sloppy we were.
|
315 |
-
# Furthermore, when the buffer is incomplete (that is: in the case
|
316 |
-
# we raise an OutOfData) we need to rollback the buffer to the correct
|
317 |
-
# state, which _buf_checkpoint records.
|
318 |
-
self._buf_checkpoint = 0
|
319 |
-
|
320 |
-
if not max_buffer_size:
|
321 |
-
max_buffer_size = 2**31 - 1
|
322 |
-
if max_str_len == -1:
|
323 |
-
max_str_len = max_buffer_size
|
324 |
-
if max_bin_len == -1:
|
325 |
-
max_bin_len = max_buffer_size
|
326 |
-
if max_array_len == -1:
|
327 |
-
max_array_len = max_buffer_size
|
328 |
-
if max_map_len == -1:
|
329 |
-
max_map_len = max_buffer_size // 2
|
330 |
-
if max_ext_len == -1:
|
331 |
-
max_ext_len = max_buffer_size
|
332 |
-
|
333 |
-
self._max_buffer_size = max_buffer_size
|
334 |
-
if read_size > self._max_buffer_size:
|
335 |
-
raise ValueError("read_size must be smaller than max_buffer_size")
|
336 |
-
self._read_size = read_size or min(self._max_buffer_size, 16 * 1024)
|
337 |
-
self._raw = bool(raw)
|
338 |
-
self._strict_map_key = bool(strict_map_key)
|
339 |
-
self._unicode_errors = unicode_errors
|
340 |
-
self._use_list = use_list
|
341 |
-
if not (0 <= timestamp <= 3):
|
342 |
-
raise ValueError("timestamp must be 0..3")
|
343 |
-
self._timestamp = timestamp
|
344 |
-
self._list_hook = list_hook
|
345 |
-
self._object_hook = object_hook
|
346 |
-
self._object_pairs_hook = object_pairs_hook
|
347 |
-
self._ext_hook = ext_hook
|
348 |
-
self._max_str_len = max_str_len
|
349 |
-
self._max_bin_len = max_bin_len
|
350 |
-
self._max_array_len = max_array_len
|
351 |
-
self._max_map_len = max_map_len
|
352 |
-
self._max_ext_len = max_ext_len
|
353 |
-
self._stream_offset = 0
|
354 |
-
|
355 |
-
if list_hook is not None and not callable(list_hook):
|
356 |
-
raise TypeError("`list_hook` is not callable")
|
357 |
-
if object_hook is not None and not callable(object_hook):
|
358 |
-
raise TypeError("`object_hook` is not callable")
|
359 |
-
if object_pairs_hook is not None and not callable(object_pairs_hook):
|
360 |
-
raise TypeError("`object_pairs_hook` is not callable")
|
361 |
-
if object_hook is not None and object_pairs_hook is not None:
|
362 |
-
raise TypeError(
|
363 |
-
"object_pairs_hook and object_hook are mutually " "exclusive"
|
364 |
-
)
|
365 |
-
if not callable(ext_hook):
|
366 |
-
raise TypeError("`ext_hook` is not callable")
|
367 |
-
|
368 |
-
def feed(self, next_bytes):
|
369 |
-
assert self._feeding
|
370 |
-
view = _get_data_from_buffer(next_bytes)
|
371 |
-
if len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size:
|
372 |
-
raise BufferFull
|
373 |
-
|
374 |
-
# Strip buffer before checkpoint before reading file.
|
375 |
-
if self._buf_checkpoint > 0:
|
376 |
-
del self._buffer[: self._buf_checkpoint]
|
377 |
-
self._buff_i -= self._buf_checkpoint
|
378 |
-
self._buf_checkpoint = 0
|
379 |
-
|
380 |
-
# Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython
|
381 |
-
self._buffer.extend(view)
|
382 |
-
|
383 |
-
def _consume(self):
|
384 |
-
"""Gets rid of the used parts of the buffer."""
|
385 |
-
self._stream_offset += self._buff_i - self._buf_checkpoint
|
386 |
-
self._buf_checkpoint = self._buff_i
|
387 |
-
|
388 |
-
def _got_extradata(self):
|
389 |
-
return self._buff_i < len(self._buffer)
|
390 |
-
|
391 |
-
def _get_extradata(self):
|
392 |
-
return self._buffer[self._buff_i :]
|
393 |
-
|
394 |
-
def read_bytes(self, n):
|
395 |
-
ret = self._read(n, raise_outofdata=False)
|
396 |
-
self._consume()
|
397 |
-
return ret
|
398 |
-
|
399 |
-
def _read(self, n, raise_outofdata=True):
|
400 |
-
# (int) -> bytearray
|
401 |
-
self._reserve(n, raise_outofdata=raise_outofdata)
|
402 |
-
i = self._buff_i
|
403 |
-
ret = self._buffer[i : i + n]
|
404 |
-
self._buff_i = i + len(ret)
|
405 |
-
return ret
|
406 |
-
|
407 |
-
def _reserve(self, n, raise_outofdata=True):
|
408 |
-
remain_bytes = len(self._buffer) - self._buff_i - n
|
409 |
-
|
410 |
-
# Fast path: buffer has n bytes already
|
411 |
-
if remain_bytes >= 0:
|
412 |
-
return
|
413 |
-
|
414 |
-
if self._feeding:
|
415 |
-
self._buff_i = self._buf_checkpoint
|
416 |
-
raise OutOfData
|
417 |
-
|
418 |
-
# Strip buffer before checkpoint before reading file.
|
419 |
-
if self._buf_checkpoint > 0:
|
420 |
-
del self._buffer[: self._buf_checkpoint]
|
421 |
-
self._buff_i -= self._buf_checkpoint
|
422 |
-
self._buf_checkpoint = 0
|
423 |
-
|
424 |
-
# Read from file
|
425 |
-
remain_bytes = -remain_bytes
|
426 |
-
if remain_bytes + len(self._buffer) > self._max_buffer_size:
|
427 |
-
raise BufferFull
|
428 |
-
while remain_bytes > 0:
|
429 |
-
to_read_bytes = max(self._read_size, remain_bytes)
|
430 |
-
read_data = self.file_like.read(to_read_bytes)
|
431 |
-
if not read_data:
|
432 |
-
break
|
433 |
-
assert isinstance(read_data, bytes)
|
434 |
-
self._buffer += read_data
|
435 |
-
remain_bytes -= len(read_data)
|
436 |
-
|
437 |
-
if len(self._buffer) < n + self._buff_i and raise_outofdata:
|
438 |
-
self._buff_i = 0 # rollback
|
439 |
-
raise OutOfData
|
440 |
-
|
441 |
-
def _read_header(self):
|
442 |
-
typ = TYPE_IMMEDIATE
|
443 |
-
n = 0
|
444 |
-
obj = None
|
445 |
-
self._reserve(1)
|
446 |
-
b = self._buffer[self._buff_i]
|
447 |
-
self._buff_i += 1
|
448 |
-
if b & 0b10000000 == 0:
|
449 |
-
obj = b
|
450 |
-
elif b & 0b11100000 == 0b11100000:
|
451 |
-
obj = -1 - (b ^ 0xFF)
|
452 |
-
elif b & 0b11100000 == 0b10100000:
|
453 |
-
n = b & 0b00011111
|
454 |
-
typ = TYPE_RAW
|
455 |
-
if n > self._max_str_len:
|
456 |
-
raise ValueError("%s exceeds max_str_len(%s)" % (n, self._max_str_len))
|
457 |
-
obj = self._read(n)
|
458 |
-
elif b & 0b11110000 == 0b10010000:
|
459 |
-
n = b & 0b00001111
|
460 |
-
typ = TYPE_ARRAY
|
461 |
-
if n > self._max_array_len:
|
462 |
-
raise ValueError(
|
463 |
-
"%s exceeds max_array_len(%s)" % (n, self._max_array_len)
|
464 |
-
)
|
465 |
-
elif b & 0b11110000 == 0b10000000:
|
466 |
-
n = b & 0b00001111
|
467 |
-
typ = TYPE_MAP
|
468 |
-
if n > self._max_map_len:
|
469 |
-
raise ValueError("%s exceeds max_map_len(%s)" % (n, self._max_map_len))
|
470 |
-
elif b == 0xC0:
|
471 |
-
obj = None
|
472 |
-
elif b == 0xC2:
|
473 |
-
obj = False
|
474 |
-
elif b == 0xC3:
|
475 |
-
obj = True
|
476 |
-
elif 0xC4 <= b <= 0xC6:
|
477 |
-
size, fmt, typ = _MSGPACK_HEADERS[b]
|
478 |
-
self._reserve(size)
|
479 |
-
if len(fmt) > 0:
|
480 |
-
n = _unpack_from(fmt, self._buffer, self._buff_i)[0]
|
481 |
-
else:
|
482 |
-
n = self._buffer[self._buff_i]
|
483 |
-
self._buff_i += size
|
484 |
-
if n > self._max_bin_len:
|
485 |
-
raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
|
486 |
-
obj = self._read(n)
|
487 |
-
elif 0xC7 <= b <= 0xC9:
|
488 |
-
size, fmt, typ = _MSGPACK_HEADERS[b]
|
489 |
-
self._reserve(size)
|
490 |
-
L, n = _unpack_from(fmt, self._buffer, self._buff_i)
|
491 |
-
self._buff_i += size
|
492 |
-
if L > self._max_ext_len:
|
493 |
-
raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
|
494 |
-
obj = self._read(L)
|
495 |
-
elif 0xCA <= b <= 0xD3:
|
496 |
-
size, fmt = _MSGPACK_HEADERS[b]
|
497 |
-
self._reserve(size)
|
498 |
-
if len(fmt) > 0:
|
499 |
-
obj = _unpack_from(fmt, self._buffer, self._buff_i)[0]
|
500 |
-
else:
|
501 |
-
obj = self._buffer[self._buff_i]
|
502 |
-
self._buff_i += size
|
503 |
-
elif 0xD4 <= b <= 0xD8:
|
504 |
-
size, fmt, typ = _MSGPACK_HEADERS[b]
|
505 |
-
if self._max_ext_len < size:
|
506 |
-
raise ValueError(
|
507 |
-
"%s exceeds max_ext_len(%s)" % (size, self._max_ext_len)
|
508 |
-
)
|
509 |
-
self._reserve(size + 1)
|
510 |
-
n, obj = _unpack_from(fmt, self._buffer, self._buff_i)
|
511 |
-
self._buff_i += size + 1
|
512 |
-
elif 0xD9 <= b <= 0xDB:
|
513 |
-
size, fmt, typ = _MSGPACK_HEADERS[b]
|
514 |
-
self._reserve(size)
|
515 |
-
if len(fmt) > 0:
|
516 |
-
(n,) = _unpack_from(fmt, self._buffer, self._buff_i)
|
517 |
-
else:
|
518 |
-
n = self._buffer[self._buff_i]
|
519 |
-
self._buff_i += size
|
520 |
-
if n > self._max_str_len:
|
521 |
-
raise ValueError("%s exceeds max_str_len(%s)" % (n, self._max_str_len))
|
522 |
-
obj = self._read(n)
|
523 |
-
elif 0xDC <= b <= 0xDD:
|
524 |
-
size, fmt, typ = _MSGPACK_HEADERS[b]
|
525 |
-
self._reserve(size)
|
526 |
-
(n,) = _unpack_from(fmt, self._buffer, self._buff_i)
|
527 |
-
self._buff_i += size
|
528 |
-
if n > self._max_array_len:
|
529 |
-
raise ValueError(
|
530 |
-
"%s exceeds max_array_len(%s)" % (n, self._max_array_len)
|
531 |
-
)
|
532 |
-
elif 0xDE <= b <= 0xDF:
|
533 |
-
size, fmt, typ = _MSGPACK_HEADERS[b]
|
534 |
-
self._reserve(size)
|
535 |
-
(n,) = _unpack_from(fmt, self._buffer, self._buff_i)
|
536 |
-
self._buff_i += size
|
537 |
-
if n > self._max_map_len:
|
538 |
-
raise ValueError("%s exceeds max_map_len(%s)" % (n, self._max_map_len))
|
539 |
-
else:
|
540 |
-
raise FormatError("Unknown header: 0x%x" % b)
|
541 |
-
return typ, n, obj
|
542 |
-
|
543 |
-
def _unpack(self, execute=EX_CONSTRUCT):
|
544 |
-
typ, n, obj = self._read_header()
|
545 |
-
|
546 |
-
if execute == EX_READ_ARRAY_HEADER:
|
547 |
-
if typ != TYPE_ARRAY:
|
548 |
-
raise ValueError("Expected array")
|
549 |
-
return n
|
550 |
-
if execute == EX_READ_MAP_HEADER:
|
551 |
-
if typ != TYPE_MAP:
|
552 |
-
raise ValueError("Expected map")
|
553 |
-
return n
|
554 |
-
# TODO should we eliminate the recursion?
|
555 |
-
if typ == TYPE_ARRAY:
|
556 |
-
if execute == EX_SKIP:
|
557 |
-
for i in xrange(n):
|
558 |
-
# TODO check whether we need to call `list_hook`
|
559 |
-
self._unpack(EX_SKIP)
|
560 |
-
return
|
561 |
-
ret = newlist_hint(n)
|
562 |
-
for i in xrange(n):
|
563 |
-
ret.append(self._unpack(EX_CONSTRUCT))
|
564 |
-
if self._list_hook is not None:
|
565 |
-
ret = self._list_hook(ret)
|
566 |
-
# TODO is the interaction between `list_hook` and `use_list` ok?
|
567 |
-
return ret if self._use_list else tuple(ret)
|
568 |
-
if typ == TYPE_MAP:
|
569 |
-
if execute == EX_SKIP:
|
570 |
-
for i in xrange(n):
|
571 |
-
# TODO check whether we need to call hooks
|
572 |
-
self._unpack(EX_SKIP)
|
573 |
-
self._unpack(EX_SKIP)
|
574 |
-
return
|
575 |
-
if self._object_pairs_hook is not None:
|
576 |
-
ret = self._object_pairs_hook(
|
577 |
-
(self._unpack(EX_CONSTRUCT), self._unpack(EX_CONSTRUCT))
|
578 |
-
for _ in xrange(n)
|
579 |
-
)
|
580 |
-
else:
|
581 |
-
ret = {}
|
582 |
-
for _ in xrange(n):
|
583 |
-
key = self._unpack(EX_CONSTRUCT)
|
584 |
-
if self._strict_map_key and type(key) not in (unicode, bytes):
|
585 |
-
raise ValueError(
|
586 |
-
"%s is not allowed for map key" % str(type(key))
|
587 |
-
)
|
588 |
-
if not PY2 and type(key) is str:
|
589 |
-
key = sys.intern(key)
|
590 |
-
ret[key] = self._unpack(EX_CONSTRUCT)
|
591 |
-
if self._object_hook is not None:
|
592 |
-
ret = self._object_hook(ret)
|
593 |
-
return ret
|
594 |
-
if execute == EX_SKIP:
|
595 |
-
return
|
596 |
-
if typ == TYPE_RAW:
|
597 |
-
if self._raw:
|
598 |
-
obj = bytes(obj)
|
599 |
-
else:
|
600 |
-
obj = obj.decode("utf_8", self._unicode_errors)
|
601 |
-
return obj
|
602 |
-
if typ == TYPE_BIN:
|
603 |
-
return bytes(obj)
|
604 |
-
if typ == TYPE_EXT:
|
605 |
-
if n == -1: # timestamp
|
606 |
-
ts = Timestamp.from_bytes(bytes(obj))
|
607 |
-
if self._timestamp == 1:
|
608 |
-
return ts.to_unix()
|
609 |
-
elif self._timestamp == 2:
|
610 |
-
return ts.to_unix_nano()
|
611 |
-
elif self._timestamp == 3:
|
612 |
-
return ts.to_datetime()
|
613 |
-
else:
|
614 |
-
return ts
|
615 |
-
else:
|
616 |
-
return self._ext_hook(n, bytes(obj))
|
617 |
-
assert typ == TYPE_IMMEDIATE
|
618 |
-
return obj
|
619 |
-
|
620 |
-
def __iter__(self):
|
621 |
-
return self
|
622 |
-
|
623 |
-
def __next__(self):
|
624 |
-
try:
|
625 |
-
ret = self._unpack(EX_CONSTRUCT)
|
626 |
-
self._consume()
|
627 |
-
return ret
|
628 |
-
except OutOfData:
|
629 |
-
self._consume()
|
630 |
-
raise StopIteration
|
631 |
-
except RecursionError:
|
632 |
-
raise StackError
|
633 |
-
|
634 |
-
next = __next__
|
635 |
-
|
636 |
-
def skip(self):
|
637 |
-
self._unpack(EX_SKIP)
|
638 |
-
self._consume()
|
639 |
-
|
640 |
-
def unpack(self):
|
641 |
-
try:
|
642 |
-
ret = self._unpack(EX_CONSTRUCT)
|
643 |
-
except RecursionError:
|
644 |
-
raise StackError
|
645 |
-
self._consume()
|
646 |
-
return ret
|
647 |
-
|
648 |
-
def read_array_header(self):
|
649 |
-
ret = self._unpack(EX_READ_ARRAY_HEADER)
|
650 |
-
self._consume()
|
651 |
-
return ret
|
652 |
-
|
653 |
-
def read_map_header(self):
|
654 |
-
ret = self._unpack(EX_READ_MAP_HEADER)
|
655 |
-
self._consume()
|
656 |
-
return ret
|
657 |
-
|
658 |
-
def tell(self):
|
659 |
-
return self._stream_offset
|
660 |
-
|
661 |
-
|
662 |
-
class Packer(object):
|
663 |
-
"""
|
664 |
-
MessagePack Packer
|
665 |
-
|
666 |
-
Usage::
|
667 |
-
|
668 |
-
packer = Packer()
|
669 |
-
astream.write(packer.pack(a))
|
670 |
-
astream.write(packer.pack(b))
|
671 |
-
|
672 |
-
Packer's constructor has some keyword arguments:
|
673 |
-
|
674 |
-
:param callable default:
|
675 |
-
Convert user type to builtin type that Packer supports.
|
676 |
-
See also simplejson's document.
|
677 |
-
|
678 |
-
:param bool use_single_float:
|
679 |
-
Use single precision float type for float. (default: False)
|
680 |
-
|
681 |
-
:param bool autoreset:
|
682 |
-
Reset buffer after each pack and return its content as `bytes`. (default: True).
|
683 |
-
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
|
684 |
-
|
685 |
-
:param bool use_bin_type:
|
686 |
-
Use bin type introduced in msgpack spec 2.0 for bytes.
|
687 |
-
It also enables str8 type for unicode. (default: True)
|
688 |
-
|
689 |
-
:param bool strict_types:
|
690 |
-
If set to true, types will be checked to be exact. Derived classes
|
691 |
-
from serializable types will not be serialized and will be
|
692 |
-
treated as unsupported type and forwarded to default.
|
693 |
-
Additionally tuples will not be serialized as lists.
|
694 |
-
This is useful when trying to implement accurate serialization
|
695 |
-
for python types.
|
696 |
-
|
697 |
-
:param bool datetime:
|
698 |
-
If set to true, datetime with tzinfo is packed into Timestamp type.
|
699 |
-
Note that the tzinfo is stripped in the timestamp.
|
700 |
-
You can get UTC datetime with `timestamp=3` option of the Unpacker.
|
701 |
-
(Python 2 is not supported).
|
702 |
-
|
703 |
-
:param str unicode_errors:
|
704 |
-
The error handler for encoding unicode. (default: 'strict')
|
705 |
-
DO NOT USE THIS!! This option is kept for very specific usage.
|
706 |
-
|
707 |
-
Example of streaming deserialize from file-like object::
|
708 |
-
|
709 |
-
unpacker = Unpacker(file_like)
|
710 |
-
for o in unpacker:
|
711 |
-
process(o)
|
712 |
-
|
713 |
-
Example of streaming deserialize from socket::
|
714 |
-
|
715 |
-
unpacker = Unpacker()
|
716 |
-
while True:
|
717 |
-
buf = sock.recv(1024**2)
|
718 |
-
if not buf:
|
719 |
-
break
|
720 |
-
unpacker.feed(buf)
|
721 |
-
for o in unpacker:
|
722 |
-
process(o)
|
723 |
-
|
724 |
-
Raises ``ExtraData`` when *packed* contains extra bytes.
|
725 |
-
Raises ``OutOfData`` when *packed* is incomplete.
|
726 |
-
Raises ``FormatError`` when *packed* is not valid msgpack.
|
727 |
-
Raises ``StackError`` when *packed* contains too nested.
|
728 |
-
Other exceptions can be raised during unpacking.
|
729 |
-
"""
|
730 |
-
|
731 |
-
def __init__(
|
732 |
-
self,
|
733 |
-
default=None,
|
734 |
-
use_single_float=False,
|
735 |
-
autoreset=True,
|
736 |
-
use_bin_type=True,
|
737 |
-
strict_types=False,
|
738 |
-
datetime=False,
|
739 |
-
unicode_errors=None,
|
740 |
-
):
|
741 |
-
self._strict_types = strict_types
|
742 |
-
self._use_float = use_single_float
|
743 |
-
self._autoreset = autoreset
|
744 |
-
self._use_bin_type = use_bin_type
|
745 |
-
self._buffer = StringIO()
|
746 |
-
if PY2 and datetime:
|
747 |
-
raise ValueError("datetime is not supported in Python 2")
|
748 |
-
self._datetime = bool(datetime)
|
749 |
-
self._unicode_errors = unicode_errors or "strict"
|
750 |
-
if default is not None:
|
751 |
-
if not callable(default):
|
752 |
-
raise TypeError("default must be callable")
|
753 |
-
self._default = default
|
754 |
-
|
755 |
-
def _pack(
|
756 |
-
self,
|
757 |
-
obj,
|
758 |
-
nest_limit=DEFAULT_RECURSE_LIMIT,
|
759 |
-
check=isinstance,
|
760 |
-
check_type_strict=_check_type_strict,
|
761 |
-
):
|
762 |
-
default_used = False
|
763 |
-
if self._strict_types:
|
764 |
-
check = check_type_strict
|
765 |
-
list_types = list
|
766 |
-
else:
|
767 |
-
list_types = (list, tuple)
|
768 |
-
while True:
|
769 |
-
if nest_limit < 0:
|
770 |
-
raise ValueError("recursion limit exceeded")
|
771 |
-
if obj is None:
|
772 |
-
return self._buffer.write(b"\xc0")
|
773 |
-
if check(obj, bool):
|
774 |
-
if obj:
|
775 |
-
return self._buffer.write(b"\xc3")
|
776 |
-
return self._buffer.write(b"\xc2")
|
777 |
-
if check(obj, int_types):
|
778 |
-
if 0 <= obj < 0x80:
|
779 |
-
return self._buffer.write(struct.pack("B", obj))
|
780 |
-
if -0x20 <= obj < 0:
|
781 |
-
return self._buffer.write(struct.pack("b", obj))
|
782 |
-
if 0x80 <= obj <= 0xFF:
|
783 |
-
return self._buffer.write(struct.pack("BB", 0xCC, obj))
|
784 |
-
if -0x80 <= obj < 0:
|
785 |
-
return self._buffer.write(struct.pack(">Bb", 0xD0, obj))
|
786 |
-
if 0xFF < obj <= 0xFFFF:
|
787 |
-
return self._buffer.write(struct.pack(">BH", 0xCD, obj))
|
788 |
-
if -0x8000 <= obj < -0x80:
|
789 |
-
return self._buffer.write(struct.pack(">Bh", 0xD1, obj))
|
790 |
-
if 0xFFFF < obj <= 0xFFFFFFFF:
|
791 |
-
return self._buffer.write(struct.pack(">BI", 0xCE, obj))
|
792 |
-
if -0x80000000 <= obj < -0x8000:
|
793 |
-
return self._buffer.write(struct.pack(">Bi", 0xD2, obj))
|
794 |
-
if 0xFFFFFFFF < obj <= 0xFFFFFFFFFFFFFFFF:
|
795 |
-
return self._buffer.write(struct.pack(">BQ", 0xCF, obj))
|
796 |
-
if -0x8000000000000000 <= obj < -0x80000000:
|
797 |
-
return self._buffer.write(struct.pack(">Bq", 0xD3, obj))
|
798 |
-
if not default_used and self._default is not None:
|
799 |
-
obj = self._default(obj)
|
800 |
-
default_used = True
|
801 |
-
continue
|
802 |
-
raise OverflowError("Integer value out of range")
|
803 |
-
if check(obj, (bytes, bytearray)):
|
804 |
-
n = len(obj)
|
805 |
-
if n >= 2**32:
|
806 |
-
raise ValueError("%s is too large" % type(obj).__name__)
|
807 |
-
self._pack_bin_header(n)
|
808 |
-
return self._buffer.write(obj)
|
809 |
-
if check(obj, unicode):
|
810 |
-
obj = obj.encode("utf-8", self._unicode_errors)
|
811 |
-
n = len(obj)
|
812 |
-
if n >= 2**32:
|
813 |
-
raise ValueError("String is too large")
|
814 |
-
self._pack_raw_header(n)
|
815 |
-
return self._buffer.write(obj)
|
816 |
-
if check(obj, memoryview):
|
817 |
-
n = obj.nbytes
|
818 |
-
if n >= 2**32:
|
819 |
-
raise ValueError("Memoryview is too large")
|
820 |
-
self._pack_bin_header(n)
|
821 |
-
return self._buffer.write(obj)
|
822 |
-
if check(obj, float):
|
823 |
-
if self._use_float:
|
824 |
-
return self._buffer.write(struct.pack(">Bf", 0xCA, obj))
|
825 |
-
return self._buffer.write(struct.pack(">Bd", 0xCB, obj))
|
826 |
-
if check(obj, (ExtType, Timestamp)):
|
827 |
-
if check(obj, Timestamp):
|
828 |
-
code = -1
|
829 |
-
data = obj.to_bytes()
|
830 |
-
else:
|
831 |
-
code = obj.code
|
832 |
-
data = obj.data
|
833 |
-
assert isinstance(code, int)
|
834 |
-
assert isinstance(data, bytes)
|
835 |
-
L = len(data)
|
836 |
-
if L == 1:
|
837 |
-
self._buffer.write(b"\xd4")
|
838 |
-
elif L == 2:
|
839 |
-
self._buffer.write(b"\xd5")
|
840 |
-
elif L == 4:
|
841 |
-
self._buffer.write(b"\xd6")
|
842 |
-
elif L == 8:
|
843 |
-
self._buffer.write(b"\xd7")
|
844 |
-
elif L == 16:
|
845 |
-
self._buffer.write(b"\xd8")
|
846 |
-
elif L <= 0xFF:
|
847 |
-
self._buffer.write(struct.pack(">BB", 0xC7, L))
|
848 |
-
elif L <= 0xFFFF:
|
849 |
-
self._buffer.write(struct.pack(">BH", 0xC8, L))
|
850 |
-
else:
|
851 |
-
self._buffer.write(struct.pack(">BI", 0xC9, L))
|
852 |
-
self._buffer.write(struct.pack("b", code))
|
853 |
-
self._buffer.write(data)
|
854 |
-
return
|
855 |
-
if check(obj, list_types):
|
856 |
-
n = len(obj)
|
857 |
-
self._pack_array_header(n)
|
858 |
-
for i in xrange(n):
|
859 |
-
self._pack(obj[i], nest_limit - 1)
|
860 |
-
return
|
861 |
-
if check(obj, dict):
|
862 |
-
return self._pack_map_pairs(
|
863 |
-
len(obj), dict_iteritems(obj), nest_limit - 1
|
864 |
-
)
|
865 |
-
|
866 |
-
if self._datetime and check(obj, _DateTime) and obj.tzinfo is not None:
|
867 |
-
obj = Timestamp.from_datetime(obj)
|
868 |
-
default_used = 1
|
869 |
-
continue
|
870 |
-
|
871 |
-
if not default_used and self._default is not None:
|
872 |
-
obj = self._default(obj)
|
873 |
-
default_used = 1
|
874 |
-
continue
|
875 |
-
|
876 |
-
if self._datetime and check(obj, _DateTime):
|
877 |
-
raise ValueError("Cannot serialize %r where tzinfo=None" % (obj,))
|
878 |
-
|
879 |
-
raise TypeError("Cannot serialize %r" % (obj,))
|
880 |
-
|
881 |
-
def pack(self, obj):
|
882 |
-
try:
|
883 |
-
self._pack(obj)
|
884 |
-
except:
|
885 |
-
self._buffer = StringIO() # force reset
|
886 |
-
raise
|
887 |
-
if self._autoreset:
|
888 |
-
ret = self._buffer.getvalue()
|
889 |
-
self._buffer = StringIO()
|
890 |
-
return ret
|
891 |
-
|
892 |
-
def pack_map_pairs(self, pairs):
|
893 |
-
self._pack_map_pairs(len(pairs), pairs)
|
894 |
-
if self._autoreset:
|
895 |
-
ret = self._buffer.getvalue()
|
896 |
-
self._buffer = StringIO()
|
897 |
-
return ret
|
898 |
-
|
899 |
-
def pack_array_header(self, n):
|
900 |
-
if n >= 2**32:
|
901 |
-
raise ValueError
|
902 |
-
self._pack_array_header(n)
|
903 |
-
if self._autoreset:
|
904 |
-
ret = self._buffer.getvalue()
|
905 |
-
self._buffer = StringIO()
|
906 |
-
return ret
|
907 |
-
|
908 |
-
def pack_map_header(self, n):
|
909 |
-
if n >= 2**32:
|
910 |
-
raise ValueError
|
911 |
-
self._pack_map_header(n)
|
912 |
-
if self._autoreset:
|
913 |
-
ret = self._buffer.getvalue()
|
914 |
-
self._buffer = StringIO()
|
915 |
-
return ret
|
916 |
-
|
917 |
-
def pack_ext_type(self, typecode, data):
|
918 |
-
if not isinstance(typecode, int):
|
919 |
-
raise TypeError("typecode must have int type.")
|
920 |
-
if not 0 <= typecode <= 127:
|
921 |
-
raise ValueError("typecode should be 0-127")
|
922 |
-
if not isinstance(data, bytes):
|
923 |
-
raise TypeError("data must have bytes type")
|
924 |
-
L = len(data)
|
925 |
-
if L > 0xFFFFFFFF:
|
926 |
-
raise ValueError("Too large data")
|
927 |
-
if L == 1:
|
928 |
-
self._buffer.write(b"\xd4")
|
929 |
-
elif L == 2:
|
930 |
-
self._buffer.write(b"\xd5")
|
931 |
-
elif L == 4:
|
932 |
-
self._buffer.write(b"\xd6")
|
933 |
-
elif L == 8:
|
934 |
-
self._buffer.write(b"\xd7")
|
935 |
-
elif L == 16:
|
936 |
-
self._buffer.write(b"\xd8")
|
937 |
-
elif L <= 0xFF:
|
938 |
-
self._buffer.write(b"\xc7" + struct.pack("B", L))
|
939 |
-
elif L <= 0xFFFF:
|
940 |
-
self._buffer.write(b"\xc8" + struct.pack(">H", L))
|
941 |
-
else:
|
942 |
-
self._buffer.write(b"\xc9" + struct.pack(">I", L))
|
943 |
-
self._buffer.write(struct.pack("B", typecode))
|
944 |
-
self._buffer.write(data)
|
945 |
-
|
946 |
-
def _pack_array_header(self, n):
|
947 |
-
if n <= 0x0F:
|
948 |
-
return self._buffer.write(struct.pack("B", 0x90 + n))
|
949 |
-
if n <= 0xFFFF:
|
950 |
-
return self._buffer.write(struct.pack(">BH", 0xDC, n))
|
951 |
-
if n <= 0xFFFFFFFF:
|
952 |
-
return self._buffer.write(struct.pack(">BI", 0xDD, n))
|
953 |
-
raise ValueError("Array is too large")
|
954 |
-
|
955 |
-
def _pack_map_header(self, n):
|
956 |
-
if n <= 0x0F:
|
957 |
-
return self._buffer.write(struct.pack("B", 0x80 + n))
|
958 |
-
if n <= 0xFFFF:
|
959 |
-
return self._buffer.write(struct.pack(">BH", 0xDE, n))
|
960 |
-
if n <= 0xFFFFFFFF:
|
961 |
-
return self._buffer.write(struct.pack(">BI", 0xDF, n))
|
962 |
-
raise ValueError("Dict is too large")
|
963 |
-
|
964 |
-
def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
|
965 |
-
self._pack_map_header(n)
|
966 |
-
for (k, v) in pairs:
|
967 |
-
self._pack(k, nest_limit - 1)
|
968 |
-
self._pack(v, nest_limit - 1)
|
969 |
-
|
970 |
-
def _pack_raw_header(self, n):
|
971 |
-
if n <= 0x1F:
|
972 |
-
self._buffer.write(struct.pack("B", 0xA0 + n))
|
973 |
-
elif self._use_bin_type and n <= 0xFF:
|
974 |
-
self._buffer.write(struct.pack(">BB", 0xD9, n))
|
975 |
-
elif n <= 0xFFFF:
|
976 |
-
self._buffer.write(struct.pack(">BH", 0xDA, n))
|
977 |
-
elif n <= 0xFFFFFFFF:
|
978 |
-
self._buffer.write(struct.pack(">BI", 0xDB, n))
|
979 |
-
else:
|
980 |
-
raise ValueError("Raw is too large")
|
981 |
-
|
982 |
-
def _pack_bin_header(self, n):
|
983 |
-
if not self._use_bin_type:
|
984 |
-
return self._pack_raw_header(n)
|
985 |
-
elif n <= 0xFF:
|
986 |
-
return self._buffer.write(struct.pack(">BB", 0xC4, n))
|
987 |
-
elif n <= 0xFFFF:
|
988 |
-
return self._buffer.write(struct.pack(">BH", 0xC5, n))
|
989 |
-
elif n <= 0xFFFFFFFF:
|
990 |
-
return self._buffer.write(struct.pack(">BI", 0xC6, n))
|
991 |
-
else:
|
992 |
-
raise ValueError("Bin is too large")
|
993 |
-
|
994 |
-
def bytes(self):
|
995 |
-
"""Return internal buffer contents as bytes object"""
|
996 |
-
return self._buffer.getvalue()
|
997 |
-
|
998 |
-
def reset(self):
|
999 |
-
"""Reset internal buffer.
|
1000 |
-
|
1001 |
-
This method is useful only when autoreset=False.
|
1002 |
-
"""
|
1003 |
-
self._buffer = StringIO()
|
1004 |
-
|
1005 |
-
def getbuffer(self):
|
1006 |
-
"""Return view of internal buffer."""
|
1007 |
-
if USING_STRINGBUILDER or PY2:
|
1008 |
-
return memoryview(self.bytes())
|
1009 |
-
else:
|
1010 |
-
return self._buffer.getbuffer()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_structures.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
|
6 |
-
class InfinityType:
|
7 |
-
def __repr__(self) -> str:
|
8 |
-
return "Infinity"
|
9 |
-
|
10 |
-
def __hash__(self) -> int:
|
11 |
-
return hash(repr(self))
|
12 |
-
|
13 |
-
def __lt__(self, other: object) -> bool:
|
14 |
-
return False
|
15 |
-
|
16 |
-
def __le__(self, other: object) -> bool:
|
17 |
-
return False
|
18 |
-
|
19 |
-
def __eq__(self, other: object) -> bool:
|
20 |
-
return isinstance(other, self.__class__)
|
21 |
-
|
22 |
-
def __gt__(self, other: object) -> bool:
|
23 |
-
return True
|
24 |
-
|
25 |
-
def __ge__(self, other: object) -> bool:
|
26 |
-
return True
|
27 |
-
|
28 |
-
def __neg__(self: object) -> "NegativeInfinityType":
|
29 |
-
return NegativeInfinity
|
30 |
-
|
31 |
-
|
32 |
-
Infinity = InfinityType()
|
33 |
-
|
34 |
-
|
35 |
-
class NegativeInfinityType:
|
36 |
-
def __repr__(self) -> str:
|
37 |
-
return "-Infinity"
|
38 |
-
|
39 |
-
def __hash__(self) -> int:
|
40 |
-
return hash(repr(self))
|
41 |
-
|
42 |
-
def __lt__(self, other: object) -> bool:
|
43 |
-
return True
|
44 |
-
|
45 |
-
def __le__(self, other: object) -> bool:
|
46 |
-
return True
|
47 |
-
|
48 |
-
def __eq__(self, other: object) -> bool:
|
49 |
-
return isinstance(other, self.__class__)
|
50 |
-
|
51 |
-
def __gt__(self, other: object) -> bool:
|
52 |
-
return False
|
53 |
-
|
54 |
-
def __ge__(self, other: object) -> bool:
|
55 |
-
return False
|
56 |
-
|
57 |
-
def __neg__(self: object) -> InfinityType:
|
58 |
-
return Infinity
|
59 |
-
|
60 |
-
|
61 |
-
NegativeInfinity = NegativeInfinityType()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boilin/URetinex-Net/utils.py
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
import torchvision
|
2 |
-
from torch.nn import init
|
3 |
-
import numpy as np
|
4 |
-
import os
|
5 |
-
import time
|
6 |
-
import torch
|
7 |
-
from PIL import Image
|
8 |
-
import glob
|
9 |
-
|
10 |
-
def save_TensorImg(img_tensor, path, nrow=1):
|
11 |
-
torchvision.utils.save_image(img_tensor, path, nrow=nrow)
|
12 |
-
|
13 |
-
def np_save_TensorImg(img_tensor, path):
|
14 |
-
img = np.squeeze(img_tensor.cpu().permute(0, 2, 3, 1).numpy())
|
15 |
-
im = Image.fromarray(np.clip(img*255, 0, 255.0).astype('uint8'))
|
16 |
-
# print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
|
17 |
-
# print(type(im))
|
18 |
-
# print(im)
|
19 |
-
im.save(path, 'png')
|
20 |
-
|
21 |
-
|
22 |
-
# 这个函数复制自上面这一个函数,目的是要返回一个图片 numpy 数组
|
23 |
-
def result_for_gradio(img_tensor):
|
24 |
-
img = np.squeeze(img_tensor.cpu().permute(0, 2, 3, 1).numpy())
|
25 |
-
# print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
|
26 |
-
|
27 |
-
im=np.clip(img*255, 0, 255.0).astype('uint8')
|
28 |
-
# print(im)
|
29 |
-
# im = Image.fromarray(np.clip(img*255, 0, 255.0).astype('uint8'))
|
30 |
-
return im
|
31 |
-
|
32 |
-
def define_modelR(opts):
|
33 |
-
if opts.R_model == "HalfDnCNNSE":
|
34 |
-
from network.restoration import HalfDnCNNSE
|
35 |
-
model_R = HalfDnCNNSE(opts)
|
36 |
-
return model_R
|
37 |
-
|
38 |
-
def define_modelL(opts):
|
39 |
-
if opts.L_model == "Illumination_Alone":
|
40 |
-
from network.illumination_enhance import Illumination_Alone
|
41 |
-
model_L = Illumination_Alone(opts)
|
42 |
-
return model_L
|
43 |
-
|
44 |
-
def define_modelA(opts):
|
45 |
-
if opts.A_model == "naive":
|
46 |
-
from network.illumination_adjustment import Adjust_naive
|
47 |
-
model_A = Adjust_naive(opts)
|
48 |
-
return model_A
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
def load_initialize(model, decom_model_path):
|
53 |
-
if os.path.exists(decom_model_path):
|
54 |
-
# torch.load with map_location=torch.device('cpu')
|
55 |
-
checkpoint_Decom_low = torch.load(decom_model_path,map_location ='cpu')
|
56 |
-
model.load_state_dict(checkpoint_Decom_low['state_dict']['model_R'])
|
57 |
-
# to freeze the params of Decomposition Model
|
58 |
-
for param in model.parameters():
|
59 |
-
param.requires_grad = False
|
60 |
-
return model
|
61 |
-
else:
|
62 |
-
print("pretrained Initialize Model does not exist, check ---> %s " % decom_model_path)
|
63 |
-
exit()
|
64 |
-
|
65 |
-
def load_unfolding(unfolding_model_path):
|
66 |
-
if os.path.exists(unfolding_model_path):
|
67 |
-
checkpoint = torch.load(unfolding_model_path,map_location ='cpu')
|
68 |
-
old_opts = checkpoint["opts"]
|
69 |
-
model_R = define_modelR(old_opts)
|
70 |
-
model_L = define_modelL(old_opts)
|
71 |
-
model_R.load_state_dict(checkpoint['state_dict']['model_R'])
|
72 |
-
model_L.load_state_dict(checkpoint['state_dict']['model_L'])
|
73 |
-
for param_R in model_R.parameters():
|
74 |
-
param_R.requires_grad = False
|
75 |
-
for param_L in model_L.parameters():
|
76 |
-
param_L.requires_grad = False
|
77 |
-
return old_opts, model_R, model_L
|
78 |
-
else:
|
79 |
-
print("pretrained Unfolding Model does not exist, check ---> %s"%unfolding_model_path)
|
80 |
-
exit()
|
81 |
-
|
82 |
-
def load_adjustment(adjust_model_path):
|
83 |
-
if os.path.exists(adjust_model_path):
|
84 |
-
checkpoint_Adjust = torch.load(adjust_model_path,map_location ='cpu')
|
85 |
-
model_A = define_modelA(checkpoint_Adjust['opts'])
|
86 |
-
model_A.load_state_dict(checkpoint_Adjust['state_dict']['model_A'])
|
87 |
-
print(" ===========> loading pretrained Illumination Adjustment Model from: %s " % adjust_model_path)
|
88 |
-
# to freeze the params of Decomposition Model
|
89 |
-
for param in model_A.parameters():
|
90 |
-
param.requires_grad = False
|
91 |
-
return model_A
|
92 |
-
else:
|
93 |
-
print("pretrained Adjustment Model does not exist, check ---> %s"%adjust_model_path)
|
94 |
-
exit()
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
def param_all(model, net_input):
|
100 |
-
import torchsummary
|
101 |
-
shape = net_input.shape
|
102 |
-
torchsummary.summary(model, (shape[1], shape[2], shape[3]))
|
103 |
-
|
104 |
-
def param_self_compute(model):
|
105 |
-
parmas = 0
|
106 |
-
for p in model.parameters():
|
107 |
-
#print(p)
|
108 |
-
parmas += p.numel()
|
109 |
-
return parmas
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/include/pybind11/functional.h
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
pybind11/functional.h: std::function<> support
|
3 |
-
|
4 |
-
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
|
10 |
-
#pragma once
|
11 |
-
|
12 |
-
#include "pybind11.h"
|
13 |
-
#include <functional>
|
14 |
-
|
15 |
-
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
16 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
17 |
-
|
18 |
-
template <typename Return, typename... Args>
|
19 |
-
struct type_caster<std::function<Return(Args...)>> {
|
20 |
-
using type = std::function<Return(Args...)>;
|
21 |
-
using retval_type = conditional_t<std::is_same<Return, void>::value, void_type, Return>;
|
22 |
-
using function_type = Return (*) (Args...);
|
23 |
-
|
24 |
-
public:
|
25 |
-
bool load(handle src, bool convert) {
|
26 |
-
if (src.is_none()) {
|
27 |
-
// Defer accepting None to other overloads (if we aren't in convert mode):
|
28 |
-
if (!convert) return false;
|
29 |
-
return true;
|
30 |
-
}
|
31 |
-
|
32 |
-
if (!isinstance<function>(src))
|
33 |
-
return false;
|
34 |
-
|
35 |
-
auto func = reinterpret_borrow<function>(src);
|
36 |
-
|
37 |
-
/*
|
38 |
-
When passing a C++ function as an argument to another C++
|
39 |
-
function via Python, every function call would normally involve
|
40 |
-
a full C++ -> Python -> C++ roundtrip, which can be prohibitive.
|
41 |
-
Here, we try to at least detect the case where the function is
|
42 |
-
stateless (i.e. function pointer or lambda function without
|
43 |
-
captured variables), in which case the roundtrip can be avoided.
|
44 |
-
*/
|
45 |
-
if (auto cfunc = func.cpp_function()) {
|
46 |
-
auto c = reinterpret_borrow<capsule>(PyCFunction_GET_SELF(cfunc.ptr()));
|
47 |
-
auto rec = (function_record *) c;
|
48 |
-
|
49 |
-
if (rec && rec->is_stateless &&
|
50 |
-
same_type(typeid(function_type), *reinterpret_cast<const std::type_info *>(rec->data[1]))) {
|
51 |
-
struct capture { function_type f; };
|
52 |
-
value = ((capture *) &rec->data)->f;
|
53 |
-
return true;
|
54 |
-
}
|
55 |
-
}
|
56 |
-
|
57 |
-
// ensure GIL is held during functor destruction
|
58 |
-
struct func_handle {
|
59 |
-
function f;
|
60 |
-
func_handle(function&& f_) : f(std::move(f_)) {}
|
61 |
-
func_handle(const func_handle&) = default;
|
62 |
-
~func_handle() {
|
63 |
-
gil_scoped_acquire acq;
|
64 |
-
function kill_f(std::move(f));
|
65 |
-
}
|
66 |
-
};
|
67 |
-
|
68 |
-
// to emulate 'move initialization capture' in C++11
|
69 |
-
struct func_wrapper {
|
70 |
-
func_handle hfunc;
|
71 |
-
func_wrapper(func_handle&& hf): hfunc(std::move(hf)) {}
|
72 |
-
Return operator()(Args... args) const {
|
73 |
-
gil_scoped_acquire acq;
|
74 |
-
object retval(hfunc.f(std::forward<Args>(args)...));
|
75 |
-
/* Visual studio 2015 parser issue: need parentheses around this expression */
|
76 |
-
return (retval.template cast<Return>());
|
77 |
-
}
|
78 |
-
};
|
79 |
-
|
80 |
-
value = func_wrapper(func_handle(std::move(func)));
|
81 |
-
return true;
|
82 |
-
}
|
83 |
-
|
84 |
-
template <typename Func>
|
85 |
-
static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) {
|
86 |
-
if (!f_)
|
87 |
-
return none().inc_ref();
|
88 |
-
|
89 |
-
auto result = f_.template target<function_type>();
|
90 |
-
if (result)
|
91 |
-
return cpp_function(*result, policy).release();
|
92 |
-
else
|
93 |
-
return cpp_function(std::forward<Func>(f_), policy).release();
|
94 |
-
}
|
95 |
-
|
96 |
-
PYBIND11_TYPE_CASTER(type, _("Callable[[") + concat(make_caster<Args>::name...) + _("], ")
|
97 |
-
+ make_caster<retval_type>::name + _("]"));
|
98 |
-
};
|
99 |
-
|
100 |
-
PYBIND11_NAMESPACE_END(detail)
|
101 |
-
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_constants_and_functions.cpp
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
tests/test_constants_and_functions.cpp -- global constants and functions, enumerations, raw byte strings
|
3 |
-
|
4 |
-
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
|
10 |
-
#include "pybind11_tests.h"
|
11 |
-
|
12 |
-
enum MyEnum { EFirstEntry = 1, ESecondEntry };
|
13 |
-
|
14 |
-
std::string test_function1() {
|
15 |
-
return "test_function()";
|
16 |
-
}
|
17 |
-
|
18 |
-
std::string test_function2(MyEnum k) {
|
19 |
-
return "test_function(enum=" + std::to_string(k) + ")";
|
20 |
-
}
|
21 |
-
|
22 |
-
std::string test_function3(int i) {
|
23 |
-
return "test_function(" + std::to_string(i) + ")";
|
24 |
-
}
|
25 |
-
|
26 |
-
py::str test_function4() { return "test_function()"; }
|
27 |
-
py::str test_function4(char *) { return "test_function(char *)"; }
|
28 |
-
py::str test_function4(int, float) { return "test_function(int, float)"; }
|
29 |
-
py::str test_function4(float, int) { return "test_function(float, int)"; }
|
30 |
-
|
31 |
-
py::bytes return_bytes() {
|
32 |
-
const char *data = "\x01\x00\x02\x00";
|
33 |
-
return std::string(data, 4);
|
34 |
-
}
|
35 |
-
|
36 |
-
std::string print_bytes(py::bytes bytes) {
|
37 |
-
std::string ret = "bytes[";
|
38 |
-
const auto value = static_cast<std::string>(bytes);
|
39 |
-
for (size_t i = 0; i < value.length(); ++i) {
|
40 |
-
ret += std::to_string(static_cast<int>(value[i])) + " ";
|
41 |
-
}
|
42 |
-
ret.back() = ']';
|
43 |
-
return ret;
|
44 |
-
}
|
45 |
-
|
46 |
-
// Test that we properly handle C++17 exception specifiers (which are part of the function signature
|
47 |
-
// in C++17). These should all still work before C++17, but don't affect the function signature.
|
48 |
-
namespace test_exc_sp {
|
49 |
-
int f1(int x) noexcept { return x+1; }
|
50 |
-
int f2(int x) noexcept(true) { return x+2; }
|
51 |
-
int f3(int x) noexcept(false) { return x+3; }
|
52 |
-
#if defined(__GNUG__)
|
53 |
-
# pragma GCC diagnostic push
|
54 |
-
# pragma GCC diagnostic ignored "-Wdeprecated"
|
55 |
-
#endif
|
56 |
-
int f4(int x) throw() { return x+4; } // Deprecated equivalent to noexcept(true)
|
57 |
-
#if defined(__GNUG__)
|
58 |
-
# pragma GCC diagnostic pop
|
59 |
-
#endif
|
60 |
-
struct C {
|
61 |
-
int m1(int x) noexcept { return x-1; }
|
62 |
-
int m2(int x) const noexcept { return x-2; }
|
63 |
-
int m3(int x) noexcept(true) { return x-3; }
|
64 |
-
int m4(int x) const noexcept(true) { return x-4; }
|
65 |
-
int m5(int x) noexcept(false) { return x-5; }
|
66 |
-
int m6(int x) const noexcept(false) { return x-6; }
|
67 |
-
#if defined(__GNUG__)
|
68 |
-
# pragma GCC diagnostic push
|
69 |
-
# pragma GCC diagnostic ignored "-Wdeprecated"
|
70 |
-
#endif
|
71 |
-
int m7(int x) throw() { return x-7; }
|
72 |
-
int m8(int x) const throw() { return x-8; }
|
73 |
-
#if defined(__GNUG__)
|
74 |
-
# pragma GCC diagnostic pop
|
75 |
-
#endif
|
76 |
-
};
|
77 |
-
}
|
78 |
-
|
79 |
-
|
80 |
-
TEST_SUBMODULE(constants_and_functions, m) {
|
81 |
-
// test_constants
|
82 |
-
m.attr("some_constant") = py::int_(14);
|
83 |
-
|
84 |
-
// test_function_overloading
|
85 |
-
m.def("test_function", &test_function1);
|
86 |
-
m.def("test_function", &test_function2);
|
87 |
-
m.def("test_function", &test_function3);
|
88 |
-
|
89 |
-
#if defined(PYBIND11_OVERLOAD_CAST)
|
90 |
-
m.def("test_function", py::overload_cast<>(&test_function4));
|
91 |
-
m.def("test_function", py::overload_cast<char *>(&test_function4));
|
92 |
-
m.def("test_function", py::overload_cast<int, float>(&test_function4));
|
93 |
-
m.def("test_function", py::overload_cast<float, int>(&test_function4));
|
94 |
-
#else
|
95 |
-
m.def("test_function", static_cast<py::str (*)()>(&test_function4));
|
96 |
-
m.def("test_function", static_cast<py::str (*)(char *)>(&test_function4));
|
97 |
-
m.def("test_function", static_cast<py::str (*)(int, float)>(&test_function4));
|
98 |
-
m.def("test_function", static_cast<py::str (*)(float, int)>(&test_function4));
|
99 |
-
#endif
|
100 |
-
|
101 |
-
py::enum_<MyEnum>(m, "MyEnum")
|
102 |
-
.value("EFirstEntry", EFirstEntry)
|
103 |
-
.value("ESecondEntry", ESecondEntry)
|
104 |
-
.export_values();
|
105 |
-
|
106 |
-
// test_bytes
|
107 |
-
m.def("return_bytes", &return_bytes);
|
108 |
-
m.def("print_bytes", &print_bytes);
|
109 |
-
|
110 |
-
// test_exception_specifiers
|
111 |
-
using namespace test_exc_sp;
|
112 |
-
py::class_<C>(m, "C")
|
113 |
-
.def(py::init<>())
|
114 |
-
.def("m1", &C::m1)
|
115 |
-
.def("m2", &C::m2)
|
116 |
-
.def("m3", &C::m3)
|
117 |
-
.def("m4", &C::m4)
|
118 |
-
.def("m5", &C::m5)
|
119 |
-
.def("m6", &C::m6)
|
120 |
-
.def("m7", &C::m7)
|
121 |
-
.def("m8", &C::m8)
|
122 |
-
;
|
123 |
-
m.def("f1", f1);
|
124 |
-
m.def("f2", f2);
|
125 |
-
m.def("f3", f3);
|
126 |
-
m.def("f4", f4);
|
127 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Text2Human/Text2Human/ui_demo.py
DELETED
@@ -1,285 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
from PIL import Image
|
7 |
-
from PyQt5.QtCore import *
|
8 |
-
from PyQt5.QtGui import *
|
9 |
-
from PyQt5.QtWidgets import *
|
10 |
-
|
11 |
-
from models.sample_model import SampleFromPoseModel
|
12 |
-
from ui.mouse_event import GraphicsScene
|
13 |
-
from ui.ui import Ui_Form
|
14 |
-
from utils.language_utils import (generate_shape_attributes,
|
15 |
-
generate_texture_attributes)
|
16 |
-
from utils.options import dict_to_nonedict, parse
|
17 |
-
|
18 |
-
color_list = [(0, 0, 0), (255, 250, 250), (220, 220, 220), (250, 235, 215),
|
19 |
-
(255, 250, 205), (211, 211, 211), (70, 130, 180),
|
20 |
-
(127, 255, 212), (0, 100, 0), (50, 205, 50), (255, 255, 0),
|
21 |
-
(245, 222, 179), (255, 140, 0), (255, 0, 0), (16, 78, 139),
|
22 |
-
(144, 238, 144), (50, 205, 174), (50, 155, 250), (160, 140, 88),
|
23 |
-
(213, 140, 88), (90, 140, 90), (185, 210, 205), (130, 165, 180),
|
24 |
-
(225, 141, 151)]
|
25 |
-
|
26 |
-
|
27 |
-
class Ex(QWidget, Ui_Form):
|
28 |
-
|
29 |
-
def __init__(self, opt):
|
30 |
-
super(Ex, self).__init__()
|
31 |
-
self.setupUi(self)
|
32 |
-
self.show()
|
33 |
-
|
34 |
-
self.output_img = None
|
35 |
-
|
36 |
-
self.mat_img = None
|
37 |
-
|
38 |
-
self.mode = 0
|
39 |
-
self.size = 6
|
40 |
-
self.mask = None
|
41 |
-
self.mask_m = None
|
42 |
-
self.img = None
|
43 |
-
|
44 |
-
# about UI
|
45 |
-
self.mouse_clicked = False
|
46 |
-
self.scene = QGraphicsScene()
|
47 |
-
self.graphicsView.setScene(self.scene)
|
48 |
-
self.graphicsView.setAlignment(Qt.AlignTop | Qt.AlignLeft)
|
49 |
-
self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
|
50 |
-
self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
|
51 |
-
|
52 |
-
self.ref_scene = GraphicsScene(self.mode, self.size)
|
53 |
-
self.graphicsView_2.setScene(self.ref_scene)
|
54 |
-
self.graphicsView_2.setAlignment(Qt.AlignTop | Qt.AlignLeft)
|
55 |
-
self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
|
56 |
-
self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
|
57 |
-
|
58 |
-
self.result_scene = QGraphicsScene()
|
59 |
-
self.graphicsView_3.setScene(self.result_scene)
|
60 |
-
self.graphicsView_3.setAlignment(Qt.AlignTop | Qt.AlignLeft)
|
61 |
-
self.graphicsView_3.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
|
62 |
-
self.graphicsView_3.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
|
63 |
-
|
64 |
-
self.dlg = QColorDialog(self.graphicsView)
|
65 |
-
self.color = None
|
66 |
-
|
67 |
-
self.sample_model = SampleFromPoseModel(opt)
|
68 |
-
|
69 |
-
def open_densepose(self):
|
70 |
-
fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
|
71 |
-
QDir.currentPath())
|
72 |
-
if fileName:
|
73 |
-
image = QPixmap(fileName)
|
74 |
-
mat_img = Image.open(fileName)
|
75 |
-
self.pose_img = mat_img.copy()
|
76 |
-
if image.isNull():
|
77 |
-
QMessageBox.information(self, "Image Viewer",
|
78 |
-
"Cannot load %s." % fileName)
|
79 |
-
return
|
80 |
-
image = image.scaled(self.graphicsView.size(),
|
81 |
-
Qt.IgnoreAspectRatio)
|
82 |
-
|
83 |
-
if len(self.scene.items()) > 0:
|
84 |
-
self.scene.removeItem(self.scene.items()[-1])
|
85 |
-
self.scene.addPixmap(image)
|
86 |
-
|
87 |
-
self.ref_scene.clear()
|
88 |
-
self.result_scene.clear()
|
89 |
-
|
90 |
-
# load pose to model
|
91 |
-
self.pose_img = np.array(
|
92 |
-
self.pose_img.resize(
|
93 |
-
size=(256, 512),
|
94 |
-
resample=Image.LANCZOS))[:, :, 2:].transpose(
|
95 |
-
2, 0, 1).astype(np.float32)
|
96 |
-
self.pose_img = self.pose_img / 12. - 1
|
97 |
-
|
98 |
-
self.pose_img = torch.from_numpy(self.pose_img).unsqueeze(1)
|
99 |
-
|
100 |
-
self.sample_model.feed_pose_data(self.pose_img)
|
101 |
-
|
102 |
-
def generate_parsing(self):
|
103 |
-
self.ref_scene.reset_items()
|
104 |
-
self.ref_scene.reset()
|
105 |
-
|
106 |
-
shape_texts = self.message_box_1.text()
|
107 |
-
|
108 |
-
shape_attributes = generate_shape_attributes(shape_texts)
|
109 |
-
shape_attributes = torch.LongTensor(shape_attributes).unsqueeze(0)
|
110 |
-
self.sample_model.feed_shape_attributes(shape_attributes)
|
111 |
-
|
112 |
-
self.sample_model.generate_parsing_map()
|
113 |
-
self.sample_model.generate_quantized_segm()
|
114 |
-
|
115 |
-
self.colored_segm = self.sample_model.palette_result(
|
116 |
-
self.sample_model.segm[0].cpu())
|
117 |
-
|
118 |
-
self.mask_m = cv2.cvtColor(
|
119 |
-
cv2.cvtColor(self.colored_segm, cv2.COLOR_RGB2BGR),
|
120 |
-
cv2.COLOR_BGR2RGB)
|
121 |
-
|
122 |
-
qim = QImage(self.colored_segm.data.tobytes(),
|
123 |
-
self.colored_segm.shape[1], self.colored_segm.shape[0],
|
124 |
-
QImage.Format_RGB888)
|
125 |
-
|
126 |
-
image = QPixmap.fromImage(qim)
|
127 |
-
|
128 |
-
image = image.scaled(self.graphicsView.size(), Qt.IgnoreAspectRatio)
|
129 |
-
|
130 |
-
if len(self.ref_scene.items()) > 0:
|
131 |
-
self.ref_scene.removeItem(self.ref_scene.items()[-1])
|
132 |
-
self.ref_scene.addPixmap(image)
|
133 |
-
|
134 |
-
self.result_scene.clear()
|
135 |
-
|
136 |
-
def generate_human(self):
|
137 |
-
for i in range(24):
|
138 |
-
self.mask_m = self.make_mask(self.mask_m,
|
139 |
-
self.ref_scene.mask_points[i],
|
140 |
-
self.ref_scene.size_points[i],
|
141 |
-
color_list[i])
|
142 |
-
|
143 |
-
seg_map = np.full(self.mask_m.shape[:-1], -1)
|
144 |
-
|
145 |
-
# convert rgb to num
|
146 |
-
for index, color in enumerate(color_list):
|
147 |
-
seg_map[np.sum(self.mask_m == color, axis=2) == 3] = index
|
148 |
-
assert (seg_map != -1).all()
|
149 |
-
|
150 |
-
self.sample_model.segm = torch.from_numpy(seg_map).unsqueeze(
|
151 |
-
0).unsqueeze(0).to(self.sample_model.device)
|
152 |
-
self.sample_model.generate_quantized_segm()
|
153 |
-
|
154 |
-
texture_texts = self.message_box_2.text()
|
155 |
-
texture_attributes = generate_texture_attributes(texture_texts)
|
156 |
-
|
157 |
-
texture_attributes = torch.LongTensor(texture_attributes)
|
158 |
-
|
159 |
-
self.sample_model.feed_texture_attributes(texture_attributes)
|
160 |
-
|
161 |
-
self.sample_model.generate_texture_map()
|
162 |
-
result = self.sample_model.sample_and_refine()
|
163 |
-
result = result.permute(0, 2, 3, 1)
|
164 |
-
result = result.detach().cpu().numpy()
|
165 |
-
result = result * 255
|
166 |
-
|
167 |
-
result = np.asarray(result[0, :, :, :], dtype=np.uint8)
|
168 |
-
|
169 |
-
self.output_img = result
|
170 |
-
|
171 |
-
qim = QImage(result.data.tobytes(), result.shape[1], result.shape[0],
|
172 |
-
QImage.Format_RGB888)
|
173 |
-
image = QPixmap.fromImage(qim)
|
174 |
-
|
175 |
-
image = image.scaled(self.graphicsView.size(), Qt.IgnoreAspectRatio)
|
176 |
-
|
177 |
-
if len(self.result_scene.items()) > 0:
|
178 |
-
self.result_scene.removeItem(self.result_scene.items()[-1])
|
179 |
-
self.result_scene.addPixmap(image)
|
180 |
-
|
181 |
-
def top_mode(self):
|
182 |
-
self.ref_scene.mode = 1
|
183 |
-
|
184 |
-
def skin_mode(self):
|
185 |
-
self.ref_scene.mode = 15
|
186 |
-
|
187 |
-
def outer_mode(self):
|
188 |
-
self.ref_scene.mode = 2
|
189 |
-
|
190 |
-
def face_mode(self):
|
191 |
-
self.ref_scene.mode = 14
|
192 |
-
|
193 |
-
def skirt_mode(self):
|
194 |
-
self.ref_scene.mode = 3
|
195 |
-
|
196 |
-
def hair_mode(self):
|
197 |
-
self.ref_scene.mode = 13
|
198 |
-
|
199 |
-
def dress_mode(self):
|
200 |
-
self.ref_scene.mode = 4
|
201 |
-
|
202 |
-
def headwear_mode(self):
|
203 |
-
self.ref_scene.mode = 7
|
204 |
-
|
205 |
-
def pants_mode(self):
|
206 |
-
self.ref_scene.mode = 5
|
207 |
-
|
208 |
-
def eyeglass_mode(self):
|
209 |
-
self.ref_scene.mode = 8
|
210 |
-
|
211 |
-
def rompers_mode(self):
|
212 |
-
self.ref_scene.mode = 21
|
213 |
-
|
214 |
-
def footwear_mode(self):
|
215 |
-
self.ref_scene.mode = 11
|
216 |
-
|
217 |
-
def leggings_mode(self):
|
218 |
-
self.ref_scene.mode = 6
|
219 |
-
|
220 |
-
def ring_mode(self):
|
221 |
-
self.ref_scene.mode = 16
|
222 |
-
|
223 |
-
def belt_mode(self):
|
224 |
-
self.ref_scene.mode = 10
|
225 |
-
|
226 |
-
def neckwear_mode(self):
|
227 |
-
self.ref_scene.mode = 9
|
228 |
-
|
229 |
-
def wrist_mode(self):
|
230 |
-
self.ref_scene.mode = 17
|
231 |
-
|
232 |
-
def socks_mode(self):
|
233 |
-
self.ref_scene.mode = 18
|
234 |
-
|
235 |
-
def tie_mode(self):
|
236 |
-
self.ref_scene.mode = 23
|
237 |
-
|
238 |
-
def earstuds_mode(self):
|
239 |
-
self.ref_scene.mode = 22
|
240 |
-
|
241 |
-
def necklace_mode(self):
|
242 |
-
self.ref_scene.mode = 20
|
243 |
-
|
244 |
-
def bag_mode(self):
|
245 |
-
self.ref_scene.mode = 12
|
246 |
-
|
247 |
-
def glove_mode(self):
|
248 |
-
self.ref_scene.mode = 19
|
249 |
-
|
250 |
-
def background_mode(self):
|
251 |
-
self.ref_scene.mode = 0
|
252 |
-
|
253 |
-
def make_mask(self, mask, pts, sizes, color):
|
254 |
-
if len(pts) > 0:
|
255 |
-
for idx, pt in enumerate(pts):
|
256 |
-
cv2.line(mask, pt['prev'], pt['curr'], color, sizes[idx])
|
257 |
-
return mask
|
258 |
-
|
259 |
-
def save_img(self):
|
260 |
-
if type(self.output_img):
|
261 |
-
fileName, _ = QFileDialog.getSaveFileName(self, "Save File",
|
262 |
-
QDir.currentPath())
|
263 |
-
cv2.imwrite(fileName + '.png', self.output_img[:, :, ::-1])
|
264 |
-
|
265 |
-
def undo(self):
|
266 |
-
self.scene.undo()
|
267 |
-
|
268 |
-
def clear(self):
|
269 |
-
|
270 |
-
self.ref_scene.reset_items()
|
271 |
-
self.ref_scene.reset()
|
272 |
-
|
273 |
-
self.ref_scene.clear()
|
274 |
-
|
275 |
-
self.result_scene.clear()
|
276 |
-
|
277 |
-
|
278 |
-
if __name__ == '__main__':
|
279 |
-
|
280 |
-
app = QApplication(sys.argv)
|
281 |
-
opt = './configs/sample_from_pose.yml'
|
282 |
-
opt = parse(opt, is_train=False)
|
283 |
-
opt = dict_to_nonedict(opt)
|
284 |
-
ex = Ex(opt)
|
285 |
-
sys.exit(app.exec_())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/clip_swin.py
DELETED
@@ -1,289 +0,0 @@
|
|
1 |
-
from collections import OrderedDict
|
2 |
-
from typing import Tuple, Union
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
import torch.nn.functional as F
|
9 |
-
from torch import nn
|
10 |
-
|
11 |
-
from timm.models.layers import DropPath, trunc_normal_
|
12 |
-
from .backbone import Backbone
|
13 |
-
from .build import BACKBONE_REGISTRY
|
14 |
-
from .det_swin import SwinTransformer
|
15 |
-
from ..text_encoder import build_text_encoder
|
16 |
-
from ..text_encoder import build_tokenizer
|
17 |
-
|
18 |
-
class LayerNorm(nn.Module):
|
19 |
-
def __init__(self, hidden_size, eps=1e-12):
|
20 |
-
"""Construct a layernorm module in the TF style (epsilon inside the square root).
|
21 |
-
"""
|
22 |
-
super(LayerNorm, self).__init__()
|
23 |
-
self.weight = nn.Parameter(torch.ones(hidden_size))
|
24 |
-
self.bias = nn.Parameter(torch.zeros(hidden_size))
|
25 |
-
self.variance_epsilon = eps
|
26 |
-
|
27 |
-
def forward(self, x):
|
28 |
-
pdtype = x.dtype
|
29 |
-
x = x.float()
|
30 |
-
u = x.mean(-1, keepdim=True)
|
31 |
-
s = (x - u).pow(2).mean(-1, keepdim=True)
|
32 |
-
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
|
33 |
-
return self.weight * x.to(pdtype) + self.bias
|
34 |
-
|
35 |
-
|
36 |
-
class QuickGELU(nn.Module):
|
37 |
-
def forward(self, x: torch.Tensor):
|
38 |
-
return x * torch.sigmoid(1.702 * x)
|
39 |
-
|
40 |
-
|
41 |
-
class ResidualAttentionBlock(nn.Module):
|
42 |
-
def __init__(self,
|
43 |
-
d_model: int,
|
44 |
-
n_head: int,
|
45 |
-
attn_mask: torch.Tensor = None,
|
46 |
-
drop_path: float = 0.0):
|
47 |
-
super().__init__()
|
48 |
-
|
49 |
-
self.attn = nn.MultiheadAttention(d_model, n_head)
|
50 |
-
self.ln_1 = LayerNorm(d_model)
|
51 |
-
self.mlp = nn.Sequential(OrderedDict([
|
52 |
-
("c_fc", nn.Linear(d_model, d_model * 4)),
|
53 |
-
("gelu", QuickGELU()),
|
54 |
-
("c_proj", nn.Linear(d_model * 4, d_model))
|
55 |
-
]))
|
56 |
-
self.ln_2 = LayerNorm(d_model)
|
57 |
-
self.attn_mask = attn_mask
|
58 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
59 |
-
|
60 |
-
def attention(self, x: torch.Tensor):
|
61 |
-
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) \
|
62 |
-
if self.attn_mask is not None else None
|
63 |
-
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
|
64 |
-
|
65 |
-
def forward(self, x: torch.Tensor):
|
66 |
-
x = x + self.drop_path(self.attention(self.ln_1(x)))
|
67 |
-
x = x + self.drop_path(self.mlp(self.ln_2(x)))
|
68 |
-
return x
|
69 |
-
|
70 |
-
|
71 |
-
class Transformer(nn.Module):
|
72 |
-
def __init__(self,
|
73 |
-
context_length: int,
|
74 |
-
vocab_size: int,
|
75 |
-
width: int,
|
76 |
-
layers: int,
|
77 |
-
heads: int,
|
78 |
-
drop_path: float = 0.0):
|
79 |
-
super().__init__()
|
80 |
-
|
81 |
-
self.token_embedding = nn.Embedding(vocab_size, width)
|
82 |
-
|
83 |
-
self.context_length = context_length
|
84 |
-
self.positional_embedding = nn.Parameter(
|
85 |
-
torch.empty(self.context_length, width)
|
86 |
-
)
|
87 |
-
|
88 |
-
self.width = width
|
89 |
-
self.layers = layers
|
90 |
-
attn_mask = self.build_attention_mask()
|
91 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path, layers)] # stochastic depth decay rule
|
92 |
-
self.resblocks = nn.Sequential(
|
93 |
-
*[
|
94 |
-
ResidualAttentionBlock(width, heads, attn_mask, dpr[i])
|
95 |
-
for i in range(layers)
|
96 |
-
]
|
97 |
-
)
|
98 |
-
|
99 |
-
self.ln_final = LayerNorm(width)
|
100 |
-
|
101 |
-
trunc_normal_(self.positional_embedding, std=.02)
|
102 |
-
# nn.init.normal_(self.token_embedding, std=.02)
|
103 |
-
trunc_normal_(self.token_embedding.weight, std=.02)
|
104 |
-
self.apply(self._init_weights)
|
105 |
-
|
106 |
-
def build_attention_mask(self):
|
107 |
-
# lazily create causal attention mask, with full attention between the vision tokens
|
108 |
-
# pytorch uses additive attention mask; fill with -inf
|
109 |
-
mask = torch.empty(self.context_length, self.context_length)
|
110 |
-
mask.fill_(float("-inf"))
|
111 |
-
mask.triu_(1) # zero out the lower diagonal
|
112 |
-
return mask
|
113 |
-
|
114 |
-
def _init_weights(self, m):
|
115 |
-
if isinstance(m, (nn.Linear, nn.Conv2d)):
|
116 |
-
trunc_normal_(m.weight, std=0.02)
|
117 |
-
if m.bias is not None:
|
118 |
-
nn.init.constant_(m.bias, 0)
|
119 |
-
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
|
120 |
-
nn.init.constant_(m.bias, 0)
|
121 |
-
|
122 |
-
@torch.jit.ignore
|
123 |
-
def no_weight_decay(self):
|
124 |
-
return {
|
125 |
-
'positional_embedding',
|
126 |
-
'token_embedding',
|
127 |
-
}
|
128 |
-
|
129 |
-
def forward(self, text: torch.Tensor):
|
130 |
-
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
|
131 |
-
x = x + self.positional_embedding
|
132 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
133 |
-
x = self.resblocks(x)
|
134 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
135 |
-
|
136 |
-
x = self.ln_final(x)
|
137 |
-
|
138 |
-
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)]
|
139 |
-
|
140 |
-
return x
|
141 |
-
|
142 |
-
class CLIP(Backbone):
|
143 |
-
def __init__(self, config: dict):
|
144 |
-
super().__init__()
|
145 |
-
spec_text = config['MODEL']['SPEC']['TEXT']
|
146 |
-
assert spec_text['TOKENIZER'] == 'clip', 'Only support clip tokenizer'
|
147 |
-
self.tokenizer_style = spec_text['TOKENIZER']
|
148 |
-
self.tokenizer = build_tokenizer(spec_text)
|
149 |
-
|
150 |
-
self.text_encoder = build_text_encoder(spec_text, self.tokenizer, True)
|
151 |
-
|
152 |
-
embed_dim = config['MODEL']['SPEC']['EMBED_DIM']
|
153 |
-
self.text_projection = nn.Parameter(
|
154 |
-
torch.empty(spec_text['WIDTH'], embed_dim)
|
155 |
-
)
|
156 |
-
|
157 |
-
spec_vision = config['MODEL']['SPEC']['VISION']
|
158 |
-
self.image_encoder = SwinTransformer(
|
159 |
-
patch_size=spec_vision['PATCH_SIZE'],
|
160 |
-
in_chans=spec_vision['IN_CHANS'],
|
161 |
-
embed_dim=spec_vision['EMBED_DIM'],
|
162 |
-
depths=spec_vision['DEPTHS'],
|
163 |
-
num_heads=spec_vision['NUM_HEADS'],
|
164 |
-
window_size=spec_vision['WINDOW_SIZE'],
|
165 |
-
mlp_ratio=spec_vision['MLP_RATIO'],
|
166 |
-
qkv_bias=spec_vision['QKV_BIAS'],
|
167 |
-
qk_scale=spec_vision.get('QK_SCALE', None),
|
168 |
-
drop_rate=spec_vision['DROP_RATE'],
|
169 |
-
attn_drop_rate=spec_vision['ATTN_DROP_RATE'],
|
170 |
-
drop_path_rate=spec_vision['DROP_PATH_RATE'],
|
171 |
-
ape=spec_vision['APE'],
|
172 |
-
patch_norm=spec_vision['PATCH_NORM'],
|
173 |
-
out_indices=(0, 1, 2, 3),
|
174 |
-
frozen_stages=-1,
|
175 |
-
use_checkpoint=False,
|
176 |
-
)
|
177 |
-
|
178 |
-
width = spec_vision['EMBED_DIM'] * 2 ** (len(spec_vision['DEPTHS']) - 1)
|
179 |
-
self.image_projection = nn.Parameter(
|
180 |
-
torch.empty(width, embed_dim)
|
181 |
-
)
|
182 |
-
# self.logit_scale = nn.Parameter(torch.FloatTensor([np.log(1 / 0.07)]))
|
183 |
-
self.logit_scale = nn.Parameter(torch.ones([]))
|
184 |
-
|
185 |
-
trunc_normal_(self.text_projection, std=.02)
|
186 |
-
trunc_normal_(self.image_projection, std=.02)
|
187 |
-
|
188 |
-
def init_weights(self, pretrained='', pretrained_layers=[], verbose=True):
|
189 |
-
if os.path.isfile(pretrained):
|
190 |
-
pretrained_dict = torch.load(pretrained, map_location='cpu')
|
191 |
-
logger.info(f'=> loading pretrained model {pretrained}')
|
192 |
-
model_dict = self.state_dict()
|
193 |
-
pretrained_dict = {
|
194 |
-
k: v for k, v in pretrained_dict.items()
|
195 |
-
if k in model_dict.keys()
|
196 |
-
}
|
197 |
-
need_init_state_dict = {}
|
198 |
-
for k, v in pretrained_dict.items():
|
199 |
-
need_init = (
|
200 |
-
k.split('.')[0] in pretrained_layers
|
201 |
-
or pretrained_layers[0] is '*'
|
202 |
-
)
|
203 |
-
if need_init:
|
204 |
-
if verbose:
|
205 |
-
logging.info(f'=> init {k} from {pretrained}')
|
206 |
-
need_init_state_dict[k] = v
|
207 |
-
self.load_state_dict(need_init_state_dict, strict=False)
|
208 |
-
|
209 |
-
@torch.jit.ignore
|
210 |
-
def no_weight_decay(self):
|
211 |
-
no_weight_decay = {'logit_scale'}
|
212 |
-
for k in self.text_encoder.no_weight_decay():
|
213 |
-
no_weight_decay.add('text.'+k)
|
214 |
-
|
215 |
-
for k in self.image_encoder.no_weight_decay():
|
216 |
-
no_weight_decay.add('visual.'+k)
|
217 |
-
|
218 |
-
return no_weight_decay
|
219 |
-
|
220 |
-
@torch.jit.ignore
|
221 |
-
def no_weight_decay_keywords(self):
|
222 |
-
return {'relative_position_bias_table'}
|
223 |
-
|
224 |
-
@property
|
225 |
-
def dtype(self):
|
226 |
-
return self.image_encoder.conv1.weight.dtype
|
227 |
-
|
228 |
-
def encode_image(self, image, norm=True):
|
229 |
-
x = self.image_encoder(image)
|
230 |
-
return x
|
231 |
-
|
232 |
-
def encode_text(self, text, norm=True):
|
233 |
-
assert isinstance(text, str), "only support single query"
|
234 |
-
tokens = self.tokenizer(
|
235 |
-
text, padding='max_length', truncation=True, max_length=77, return_tensors='pt'
|
236 |
-
)
|
237 |
-
tokens = {key:(val.cuda() if next(self.parameters()).is_cuda else val) for key,val in tokens.items()}
|
238 |
-
x = self.text_encoder(**tokens)
|
239 |
-
x = x['last_hidden_state']
|
240 |
-
x = x[torch.arange(x.size(0)), tokens['input_ids'].argmax(dim=-1)]
|
241 |
-
|
242 |
-
x = x @ self.text_projection
|
243 |
-
if norm:
|
244 |
-
x = x / x.norm(dim=-1, keepdim=True)
|
245 |
-
return x
|
246 |
-
|
247 |
-
def forward(self, image):
|
248 |
-
features_image = self.image_encoder(image)
|
249 |
-
return features_image
|
250 |
-
|
251 |
-
|
252 |
-
@BACKBONE_REGISTRY.register()
|
253 |
-
def build_clip_swin_backbone(cfg, input_shape):
|
254 |
-
"""
|
255 |
-
Create a CLIP Swin instance from config.
|
256 |
-
|
257 |
-
Returns:
|
258 |
-
SwinTransformer: a :class:`SwinTransformer` instance.
|
259 |
-
"""
|
260 |
-
spec_vision = cfg.MODEL.CLIP.VISION
|
261 |
-
return SwinTransformer(
|
262 |
-
patch_size=spec_vision['PATCH_SIZE'],
|
263 |
-
in_chans=spec_vision['IN_CHANS'],
|
264 |
-
embed_dim=spec_vision['EMBED_DIM'],
|
265 |
-
depths=spec_vision['DEPTHS'],
|
266 |
-
num_heads=spec_vision['NUM_HEADS'],
|
267 |
-
window_size=spec_vision['WINDOW_SIZE'],
|
268 |
-
mlp_ratio=spec_vision['MLP_RATIO'],
|
269 |
-
qkv_bias=spec_vision['QKV_BIAS'],
|
270 |
-
qk_scale=spec_vision.get('QK_SCALE', None),
|
271 |
-
drop_rate=spec_vision['DROP_RATE'],
|
272 |
-
attn_drop_rate=spec_vision['ATTN_DROP_RATE'],
|
273 |
-
drop_path_rate=spec_vision['DROP_PATH_RATE'],
|
274 |
-
ape=spec_vision['APE'],
|
275 |
-
patch_norm=spec_vision['PATCH_NORM'],
|
276 |
-
out_indices=(0, 1, 2, 3),
|
277 |
-
frozen_stages=-1,
|
278 |
-
use_checkpoint=False,
|
279 |
-
)
|
280 |
-
|
281 |
-
@BACKBONE_REGISTRY.register()
|
282 |
-
def build_clip_swin(cfg, input_shape):
|
283 |
-
"""
|
284 |
-
Create a CLIP Swin instance from config.
|
285 |
-
|
286 |
-
Returns:
|
287 |
-
SwinTransformer: a :class:`SwinTransformer` instance.
|
288 |
-
"""
|
289 |
-
return CLIP(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChevyWithAI/rvc-aicover/infer_pack/transforms.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(
|
13 |
-
inputs,
|
14 |
-
unnormalized_widths,
|
15 |
-
unnormalized_heights,
|
16 |
-
unnormalized_derivatives,
|
17 |
-
inverse=False,
|
18 |
-
tails=None,
|
19 |
-
tail_bound=1.0,
|
20 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
21 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
22 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
23 |
-
):
|
24 |
-
if tails is None:
|
25 |
-
spline_fn = rational_quadratic_spline
|
26 |
-
spline_kwargs = {}
|
27 |
-
else:
|
28 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
29 |
-
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
|
30 |
-
|
31 |
-
outputs, logabsdet = spline_fn(
|
32 |
-
inputs=inputs,
|
33 |
-
unnormalized_widths=unnormalized_widths,
|
34 |
-
unnormalized_heights=unnormalized_heights,
|
35 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
36 |
-
inverse=inverse,
|
37 |
-
min_bin_width=min_bin_width,
|
38 |
-
min_bin_height=min_bin_height,
|
39 |
-
min_derivative=min_derivative,
|
40 |
-
**spline_kwargs
|
41 |
-
)
|
42 |
-
return outputs, logabsdet
|
43 |
-
|
44 |
-
|
45 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
46 |
-
bin_locations[..., -1] += eps
|
47 |
-
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
|
48 |
-
|
49 |
-
|
50 |
-
def unconstrained_rational_quadratic_spline(
|
51 |
-
inputs,
|
52 |
-
unnormalized_widths,
|
53 |
-
unnormalized_heights,
|
54 |
-
unnormalized_derivatives,
|
55 |
-
inverse=False,
|
56 |
-
tails="linear",
|
57 |
-
tail_bound=1.0,
|
58 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
59 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
60 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
61 |
-
):
|
62 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
63 |
-
outside_interval_mask = ~inside_interval_mask
|
64 |
-
|
65 |
-
outputs = torch.zeros_like(inputs)
|
66 |
-
logabsdet = torch.zeros_like(inputs)
|
67 |
-
|
68 |
-
if tails == "linear":
|
69 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
70 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
71 |
-
unnormalized_derivatives[..., 0] = constant
|
72 |
-
unnormalized_derivatives[..., -1] = constant
|
73 |
-
|
74 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
75 |
-
logabsdet[outside_interval_mask] = 0
|
76 |
-
else:
|
77 |
-
raise RuntimeError("{} tails are not implemented.".format(tails))
|
78 |
-
|
79 |
-
(
|
80 |
-
outputs[inside_interval_mask],
|
81 |
-
logabsdet[inside_interval_mask],
|
82 |
-
) = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound,
|
89 |
-
right=tail_bound,
|
90 |
-
bottom=-tail_bound,
|
91 |
-
top=tail_bound,
|
92 |
-
min_bin_width=min_bin_width,
|
93 |
-
min_bin_height=min_bin_height,
|
94 |
-
min_derivative=min_derivative,
|
95 |
-
)
|
96 |
-
|
97 |
-
return outputs, logabsdet
|
98 |
-
|
99 |
-
|
100 |
-
def rational_quadratic_spline(
|
101 |
-
inputs,
|
102 |
-
unnormalized_widths,
|
103 |
-
unnormalized_heights,
|
104 |
-
unnormalized_derivatives,
|
105 |
-
inverse=False,
|
106 |
-
left=0.0,
|
107 |
-
right=1.0,
|
108 |
-
bottom=0.0,
|
109 |
-
top=1.0,
|
110 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
111 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
112 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
113 |
-
):
|
114 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
115 |
-
raise ValueError("Input to a transform is not within its domain")
|
116 |
-
|
117 |
-
num_bins = unnormalized_widths.shape[-1]
|
118 |
-
|
119 |
-
if min_bin_width * num_bins > 1.0:
|
120 |
-
raise ValueError("Minimal bin width too large for the number of bins")
|
121 |
-
if min_bin_height * num_bins > 1.0:
|
122 |
-
raise ValueError("Minimal bin height too large for the number of bins")
|
123 |
-
|
124 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
125 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
126 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
127 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
|
128 |
-
cumwidths = (right - left) * cumwidths + left
|
129 |
-
cumwidths[..., 0] = left
|
130 |
-
cumwidths[..., -1] = right
|
131 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
132 |
-
|
133 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
134 |
-
|
135 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
136 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
137 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
138 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
|
139 |
-
cumheights = (top - bottom) * cumheights + bottom
|
140 |
-
cumheights[..., 0] = bottom
|
141 |
-
cumheights[..., -1] = top
|
142 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
143 |
-
|
144 |
-
if inverse:
|
145 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
146 |
-
else:
|
147 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
148 |
-
|
149 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
150 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
153 |
-
delta = heights / widths
|
154 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
155 |
-
|
156 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
157 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
158 |
-
|
159 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
160 |
-
|
161 |
-
if inverse:
|
162 |
-
a = (inputs - input_cumheights) * (
|
163 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
164 |
-
) + input_heights * (input_delta - input_derivatives)
|
165 |
-
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
|
166 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
167 |
-
)
|
168 |
-
c = -input_delta * (inputs - input_cumheights)
|
169 |
-
|
170 |
-
discriminant = b.pow(2) - 4 * a * c
|
171 |
-
assert (discriminant >= 0).all()
|
172 |
-
|
173 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
174 |
-
outputs = root * input_bin_widths + input_cumwidths
|
175 |
-
|
176 |
-
theta_one_minus_theta = root * (1 - root)
|
177 |
-
denominator = input_delta + (
|
178 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
179 |
-
* theta_one_minus_theta
|
180 |
-
)
|
181 |
-
derivative_numerator = input_delta.pow(2) * (
|
182 |
-
input_derivatives_plus_one * root.pow(2)
|
183 |
-
+ 2 * input_delta * theta_one_minus_theta
|
184 |
-
+ input_derivatives * (1 - root).pow(2)
|
185 |
-
)
|
186 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
187 |
-
|
188 |
-
return outputs, -logabsdet
|
189 |
-
else:
|
190 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
191 |
-
theta_one_minus_theta = theta * (1 - theta)
|
192 |
-
|
193 |
-
numerator = input_heights * (
|
194 |
-
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
|
195 |
-
)
|
196 |
-
denominator = input_delta + (
|
197 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
198 |
-
* theta_one_minus_theta
|
199 |
-
)
|
200 |
-
outputs = input_cumheights + numerator / denominator
|
201 |
-
|
202 |
-
derivative_numerator = input_delta.pow(2) * (
|
203 |
-
input_derivatives_plus_one * theta.pow(2)
|
204 |
-
+ 2 * input_delta * theta_one_minus_theta
|
205 |
-
+ input_derivatives * (1 - theta).pow(2)
|
206 |
-
)
|
207 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
208 |
-
|
209 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chitranshu/Dashboard-Uber/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Uber-Dashboard
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: gray
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/my_wife/__init__.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
|
6 |
-
from meme_generator import add_meme
|
7 |
-
|
8 |
-
img_dir = Path(__file__).parent / "images"
|
9 |
-
|
10 |
-
|
11 |
-
def my_wife(images: List[BuildImage], texts, args):
|
12 |
-
img = images[0].convert("RGBA").resize_width(400)
|
13 |
-
img_w, img_h = img.size
|
14 |
-
frame = BuildImage.new("RGBA", (650, img_h + 500), "white")
|
15 |
-
frame.paste(img, (int(325 - img_w / 2), 105), alpha=True)
|
16 |
-
|
17 |
-
text = "如果你的老婆长这样"
|
18 |
-
frame.draw_text(
|
19 |
-
(27, 12, 27 + 596, 12 + 79),
|
20 |
-
text,
|
21 |
-
max_fontsize=70,
|
22 |
-
min_fontsize=30,
|
23 |
-
allow_wrap=True,
|
24 |
-
lines_align="center",
|
25 |
-
weight="bold",
|
26 |
-
)
|
27 |
-
text = "那么这就不是你的老婆\n这是我的老婆"
|
28 |
-
frame.draw_text(
|
29 |
-
(27, img_h + 120, 27 + 593, img_h + 120 + 135),
|
30 |
-
text,
|
31 |
-
max_fontsize=70,
|
32 |
-
min_fontsize=30,
|
33 |
-
allow_wrap=True,
|
34 |
-
weight="bold",
|
35 |
-
)
|
36 |
-
text = "滚去找你\n自己的老婆去"
|
37 |
-
frame.draw_text(
|
38 |
-
(27, img_h + 295, 27 + 374, img_h + 295 + 135),
|
39 |
-
text,
|
40 |
-
max_fontsize=70,
|
41 |
-
min_fontsize=30,
|
42 |
-
allow_wrap=True,
|
43 |
-
lines_align="center",
|
44 |
-
weight="bold",
|
45 |
-
)
|
46 |
-
|
47 |
-
img_point = BuildImage.open(img_dir / "1.png").resize_width(200)
|
48 |
-
frame.paste(img_point, (421, img_h + 270))
|
49 |
-
|
50 |
-
return frame.save_jpg()
|
51 |
-
|
52 |
-
|
53 |
-
add_meme("my_wife", my_wife, min_images=1, max_images=1, keywords=["我老婆", "这是我老婆"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.b4/g4f/Provider/Providers/GetGpt.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import uuid
|
4 |
-
import requests
|
5 |
-
from Crypto.Cipher import AES
|
6 |
-
from ...typing import sha256, Dict, get_type_hints
|
7 |
-
|
8 |
-
url = 'https://chat.getgpt.world/'
|
9 |
-
model = ['gpt-3.5-turbo']
|
10 |
-
supports_stream = True
|
11 |
-
needs_auth = False
|
12 |
-
|
13 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
-
def encrypt(e):
|
15 |
-
t = os.urandom(8).hex().encode('utf-8')
|
16 |
-
n = os.urandom(8).hex().encode('utf-8')
|
17 |
-
r = e.encode('utf-8')
|
18 |
-
cipher = AES.new(t, AES.MODE_CBC, n)
|
19 |
-
ciphertext = cipher.encrypt(pad_data(r))
|
20 |
-
return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
|
21 |
-
|
22 |
-
def pad_data(data: bytes) -> bytes:
|
23 |
-
block_size = AES.block_size
|
24 |
-
padding_size = block_size - len(data) % block_size
|
25 |
-
padding = bytes([padding_size] * padding_size)
|
26 |
-
return data + padding
|
27 |
-
|
28 |
-
headers = {
|
29 |
-
'Content-Type': 'application/json',
|
30 |
-
'Referer': 'https://chat.getgpt.world/',
|
31 |
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
|
32 |
-
}
|
33 |
-
|
34 |
-
data = json.dumps({
|
35 |
-
'messages': messages,
|
36 |
-
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
37 |
-
'max_tokens': kwargs.get('max_tokens', 4000),
|
38 |
-
'model': 'gpt-3.5-turbo',
|
39 |
-
'presence_penalty': kwargs.get('presence_penalty', 0),
|
40 |
-
'temperature': kwargs.get('temperature', 1),
|
41 |
-
'top_p': kwargs.get('top_p', 1),
|
42 |
-
'stream': True,
|
43 |
-
'uuid': str(uuid.uuid4())
|
44 |
-
})
|
45 |
-
|
46 |
-
res = requests.post('https://chat.getgpt.world/api/chat/stream',
|
47 |
-
headers=headers, json={'signature': encrypt(data)}, stream=True)
|
48 |
-
|
49 |
-
for line in res.iter_lines():
|
50 |
-
if b'content' in line:
|
51 |
-
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
52 |
-
yield (line_json['choices'][0]['delta']['content'])
|
53 |
-
|
54 |
-
|
55 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
56 |
-
'(%s)' % ', '.join(
|
57 |
-
[f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|