Commit
·
7ac1142
1
Parent(s):
7cbde72
Update parquet files (step 105 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xeureka/ehartford-WizardLM-13B-Uncensored/app.py +0 -3
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/A Plague Tale Innocence Update V1 04-CODEX.md +0 -23
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Best Alternatives to Cracked Netflix App Download for Android in 2023.md +0 -26
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Flash Player Activex Is Not Installed.md +0 -49
- spaces/1gistliPinn/ChatGPT4/Examples/120718ALBUM SEKAI NO OWARI ENTERTAINMENT 320Krar __EXCLUSIVE__.md +0 -10
- spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.38 2012 Eng.rar.md +0 -18
- spaces/1gistliPinn/ChatGPT4/Examples/Breaking Bad Season 2 __EXCLUSIVE__ Download 1080p.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Captain Sim Weather Radar Crack ((FREE)).md +0 -78
- spaces/1gistliPinn/ChatGPT4/Examples/Fifa 2008 Crack Indir Oyuncehennemi Tek Romagna U8110 Roswel.md +0 -6
- spaces/1line/AutoGPT/autogpt/commands/web_requests.py +0 -190
- spaces/1line/AutoGPT/autogpt/speech/__init__.py +0 -4
- spaces/1phancelerku/anime-remove-background/3D Paint Brush Free Download Tips and Tricks for 3D Artists.md +0 -164
- spaces/1phancelerku/anime-remove-background/And Blue Download The Ultimate Guide to Playing Android Games on PC.md +0 -126
- spaces/1phancelerku/anime-remove-background/Android 11 Emojis Download How to Get the Latest Unicode 13 Emojis on Any Device.md +0 -158
- spaces/2023Liu2023/bingo/src/components/ui/codeblock.tsx +0 -142
- spaces/2023Liu2023/bingo/src/pages/api/create.ts +0 -31
- spaces/AIConsultant/MusicGen/audiocraft/adversarial/discriminators/__init__.py +0 -10
- spaces/AIDHD/audio-video-transcriber/app.py +0 -388
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/latent_diffusion/util.py +0 -295
- spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps.py +0 -194
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/utils.py +0 -369
- spaces/AIatUIUC/CodeLATS/executors/__init__.py +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/input/Input.js +0 -64
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/listpanel/CreateListPanel.js +0 -64
- spaces/Alpaca233/SadTalker/src/face3d/options/base_options.py +0 -169
- spaces/Altinas/vits-uma-genshin-honkais/README.md +0 -11
- spaces/Amrrs/DragGan-Inversion/PTI/utils/__init__.py +0 -0
- spaces/Amrrs/DragGan-Inversion/viz/renderer.py +0 -442
- spaces/Ananthap4/itineraryGenerator/app.py +0 -28
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/diffusion_pipeline.md +0 -36
- spaces/Andy1621/uniformer_image_detection/configs/_base_/schedules/schedule_2x.py +0 -11
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/__init__.py +0 -69
- spaces/Anonymous-sub/Rerender/ControlNet/tutorial_train_sd21.py +0 -35
- spaces/Apex-X/ROOPOK/roop/capturer.py +0 -22
- spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/__init__.py +0 -0
- spaces/Arnx/MusicGenXvAKN/audiocraft/__init__.py +0 -10
- spaces/Arnx/MusicGenXvAKN/audiocraft/modules/rope.py +0 -124
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/easy_install.py +0 -2312
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py +0 -9
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/dlafpn.py +0 -493
- spaces/BaddaAshok0265/AshokGenAI/README.md +0 -12
- spaces/Bart92/RVC_HF/lib/infer_pack/modules/F0Predictor/F0Predictor.py +0 -16
- spaces/Bingyunhu/hoping/README.md +0 -12
- spaces/CVPR/LIVE/atomic.h +0 -139
- spaces/CVPR/LIVE/diffvg.h +0 -156
- spaces/CVPR/LIVE/thrust/thrust/device_malloc.h +0 -103
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/select_system.h +0 -125
- spaces/CVPR/regionclip-demo/detectron2/data/datasets/coco_panoptic.py +0 -228
- spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/twitter.py +0 -26
- spaces/CikeyQI/Yunzai/Yunzai/lib/config/check.js +0 -32
spaces/0xeureka/ehartford-WizardLM-13B-Uncensored/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/ehartford/WizardLM-13B-Uncensored").launch()
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/A Plague Tale Innocence Update V1 04-CODEX.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>A Plague Tale: Innocence Update v1.04-CODEX: What's New and How to Install</h1>
|
3 |
-
<p>A Plague Tale: Innocence is a critically acclaimed action-adventure game that follows the grim tale of young Amicia and her little brother Hugo, in a heartrending journey through the darkest hours of history. Hunted by Inquisition soldiers and surrounded by unstoppable swarms of rats, Amicia and Hugo will come to know and trust each other. As they struggle to survive against overwhelming odds, they will fight to find purpose in this brutal, unforgiving world.</p>
|
4 |
-
<h2>A Plague Tale Innocence Update v1 04-CODEX</h2><br /><p><b><b>Download</b> ––– <a href="https://byltly.com/2uKxIh">https://byltly.com/2uKxIh</a></b></p><br /><br />
|
5 |
-
<p>The game was released on May 14, 2019 by Focus Home Interactive and developed by Asobo Studio. It received positive reviews from critics and players alike, praising its story, characters, graphics, gameplay, and atmosphere. The game has also won several awards and nominations, such as the Best Narrative award at The Game Awards 2019.</p>
|
6 |
-
<p>On June 19, 2019, CODEX released an update for the game, v1.04, that fixes some bugs and improves performance. The update also adds support for more languages, such as Arabic, Polish, Czech, Korean, and Simplified Chinese. The update is about 40 MB in size and requires the base game and the previous update v1.03 to be installed.</p>
|
7 |
-
<p>To install the update, follow these steps:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Download the update from one of the links provided in the web search results[^1^] [^2^] [^3^].</li>
|
10 |
-
<li>Extract the contents of the zip file to a folder on your PC.</li>
|
11 |
-
<li>Run setup.exe and install the update to the same folder where you installed the base game.</li>
|
12 |
-
<li>Copy the crack files from the CODEX folder to the game folder, replacing the existing ones.</li>
|
13 |
-
<li>Block the game's exe in your firewall to prevent it from going online.</li>
|
14 |
-
<li>Launch the game and enjoy!</li>
|
15 |
-
</ol>
|
16 |
-
<p>Note: If you install games to your system drive, it may be necessary to run this game with admin privileges instead.</p>
|
17 |
-
<p></p><p>A Plague Tale: Innocence is not only a captivating story-driven game, but also a thrilling gameplay experience that combines stealth, action, and puzzle-solving. The game puts the player in the shoes of Amicia de Rune, a young noble girl who must protect her sick brother Hugo from the horrors of the plague and the Inquisition in 14th century France.</p>
|
18 |
-
<p>The game features a dynamic gameplay system that revolves around the use of Amicia's sling and various alchemical substances that she can craft on the fly. The sling can be used to distract, stun, or kill enemies, as well as to manipulate the environment and create new paths. The alchemical substances can have different effects on the enemies and the rats, such as attracting them, repelling them, or setting them on fire.</p>
|
19 |
-
<p>The game also challenges the player to deal with the swarms of rats that infest the land and devour anything in their way. The rats are afraid of light sources, such as torches and braziers, which can be used to create safe zones or to trap enemies. However, some enemies can also use light sources to their advantage, forcing the player to find alternative ways to deal with them.</p>
|
20 |
-
<p>The game is not a linear experience, but rather offers some degree of exploration and choice. The player can find collectibles and secrets that reveal more about the world and the characters, as well as upgrade Amicia's equipment and skills. The player can also choose how to approach each situation, whether by using stealth or violence, or by finding creative solutions.</p>
|
21 |
-
<p>The game also features a strong emotional bond between Amicia and Hugo, who depend on each other for survival. The player can interact with Hugo and other characters they meet along their journey, and witness how their relationship evolves over time. The game also has several moments of high tension and drama that will keep the player on the edge of their seat.</p> 81aa517590<br />
|
22 |
-
<br />
|
23 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Best Alternatives to Cracked Netflix App Download for Android in 2023.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Get Cracked Netflix App Download for Android in 2023</h1>
|
3 |
-
<p>If you are looking for a way to watch Netflix for free on your Android device, you might have heard of cracked Netflix app download for Android. This is a modified version of the official Netflix app that bypasses the subscription and login requirements and lets you access the entire Netflix library without paying anything.</p>
|
4 |
-
<p>However, before you rush to download and install the cracked Netflix app on your Android device, you should be aware of the risks and drawbacks of doing so. In this article, we will explain what cracked Netflix app download for Android is, how it works, and why you should avoid it.</p>
|
5 |
-
<h2>cracked netflix app download for android</h2><br /><p><b><b>DOWNLOAD</b> ✏ ✏ ✏ <a href="https://byltly.com/2uKvv2">https://byltly.com/2uKvv2</a></b></p><br /><br />
|
6 |
-
<h2>What is Cracked Netflix App Download for Android?</h2>
|
7 |
-
<p>Cracked Netflix app download for Android is a term used to describe a hacked or modified version of the official Netflix app that allows users to watch Netflix content for free. The cracked app usually comes from third-party sources that are not verified or trusted by Google Play Store or Netflix.</p>
|
8 |
-
<p>The cracked app works by spoofing the Netflix servers and tricking them into thinking that the user has a valid subscription and login credentials. This way, the user can access any Netflix content without paying anything or logging in with their own account.</p>
|
9 |
-
<h2>How Does Cracked Netflix App Download for Android Work?</h2>
|
10 |
-
<p>The cracked Netflix app download for Android works by using a technique called reverse engineering. This means that the hackers or developers of the cracked app analyze the code and structure of the official Netflix app and modify it to remove or bypass the security and authentication features.</p>
|
11 |
-
<p>The modified app then connects to a proxy server that acts as a middleman between the user and the Netflix servers. The proxy server sends fake requests and responses to the Netflix servers and makes them believe that the user has a valid subscription and login credentials. This way, the user can watch any Netflix content without paying anything or logging in with their own account.</p>
|
12 |
-
<h2>Why Should You Avoid Cracked Netflix App Download for Android?</h2>
|
13 |
-
<p>While cracked Netflix app download for Android might sound tempting, it is not worth the risk. Here are some of the reasons why you should avoid using cracked Netflix app on your Android device:</p>
|
14 |
-
<ul>
|
15 |
-
<li><b>It is illegal.</b> Using cracked Netflix app download for Android is a violation of Netflix's terms of service and copyright laws. You are essentially stealing content from Netflix and depriving them of their revenue. If you are caught using cracked Netflix app, you could face legal consequences such as fines or lawsuits.</li>
|
16 |
-
<li><b>It is unsafe.</b> Using cracked Netflix app download for Android exposes you to various security and privacy risks. The cracked app could contain malware, spyware, adware, or viruses that could harm your device or steal your personal information. The proxy server that connects you to the Netflix servers could also monitor your online activity and collect your data. You could also be targeted by hackers or cybercriminals who could exploit your device or account.</li>
|
17 |
-
<li><b>It is unreliable.</b> Using cracked Netflix app download for Android does not guarantee a smooth or quality streaming experience. The cracked app could have bugs, glitches, errors, or compatibility issues that could affect its performance or functionality. The proxy server that connects you to the Netflix servers could also be slow, unstable, or blocked by Netflix at any time. You could experience buffering, lagging, freezing, crashing, or error messages while watching Netflix content.</li>
|
18 |
-
<li><b>It is unethical.</b> Using cracked Netflix app download for Android is unfair to the creators and producers of the Netflix content. They work hard to create and deliver high-quality content for their audience and deserve to be compensated for their efforts. By using cracked Netflix app, you are disrespecting their work and undermining their value.</li>
|
19 |
-
</ul>
|
20 |
-
<h2>What Are Some Alternatives to Cracked Netflix App Download for Android?</h2>
|
21 |
-
<p>If you want to watch Netflix content on your Android device without paying anything, there are some legal and safe alternatives to cracked Netflix app download for Android. Here are some of them:</p>
|
22 |
-
<ul>
|
23 |
-
<li><b>Free trial.</b> If you are a new user, you can sign up for a free trial of Netflix and enjoy unlimited access to its content for a limited period of time (usually 30 days). You can cancel your subscription anytime before the trial ends and avoid being charged.</li>
|
24 |
-
<li><b>Shared account.</b> If you have a friend or family member who has a paid subscription of Netflix, you can ask</p> ddb901b051<br />
|
25 |
-
<br />
|
26 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Flash Player Activex Is Not Installed.md
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Fix Flash Player Activex Is Not Installed Error on Windows 10</h1>
|
3 |
-
<p>If you are trying to play a Flash-based game or video on your Windows 10 computer, you might encounter an error message that says "Flash Player Activex Is Not Installed". This error means that your browser does not have the necessary plugin to run Flash content. In this article, we will show you how to fix this error and enable Flash Player on your browser.</p>
|
4 |
-
<h2>What is Flash Player Activex?</h2>
|
5 |
-
<p>Flash Player Activex is a version of Adobe Flash Player that is designed for Internet Explorer and other browsers that use the ActiveX technology. Flash Player Activex allows you to view interactive web content such as games, animations, videos, and advertisements that are created with Adobe Flash.</p>
|
6 |
-
<h2>Flash Player Activex Is Not Installed</h2><br /><p><b><b>Download Zip</b> ✏ <a href="https://byltly.com/2uKA6N">https://byltly.com/2uKA6N</a></b></p><br /><br />
|
7 |
-
<h2>Why is Flash Player Activex Not Installed?</h2>
|
8 |
-
<p>There are several possible reasons why Flash Player Activex is not installed on your Windows 10 computer. Some of them are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Your browser does not support Flash Player Activex. For example, Microsoft Edge and Google Chrome have discontinued the support for Flash Player since December 2020.</li>
|
11 |
-
<li>Your browser has disabled Flash Player Activex by default. For example, Internet Explorer and Firefox require you to enable Flash Player manually for each site that uses it.</li>
|
12 |
-
<li>Your browser has blocked Flash Player Activex due to security or performance issues. For example, some browsers may block Flash content that is outdated, insecure, or consumes too much CPU or memory.</li>
|
13 |
-
<li>Your computer does not have the latest version of Flash Player Activex installed. For example, some websites may require you to update your Flash Player to the newest version to run their content.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>How to Fix Flash Player Activex Is Not Installed Error?</h2>
|
16 |
-
<p>To fix the Flash Player Activex is not installed error, you need to do the following steps:</p>
|
17 |
-
<ol>
|
18 |
-
<li>Check if your browser supports Flash Player Activex. If not, switch to a browser that does, such as Internet Explorer or Firefox.</li>
|
19 |
-
<li>Check if your browser has enabled Flash Player Activex. If not, enable it manually for each site that uses it. You can usually find the option to enable Flash in the address bar or the settings menu of your browser.</li>
|
20 |
-
<li>Check if your browser has blocked Flash Player Activex. If so, unblock it by clicking on the icon or notification that appears in the address bar or the settings menu of your browser.</li>
|
21 |
-
<li>Check if your computer has the latest version of Flash Player Activex installed. If not, download and install it from the official Adobe website: <a href="https://get.adobe.com/flashplayer/">https://get.adobe.com/flashplayer/</a>.</li>
|
22 |
-
</ol>
|
23 |
-
<p>After following these steps, you should be able to fix the Flash Player Activex is not installed error and enjoy the Flash content on your Windows 10 computer.</p>
|
24 |
-
|
25 |
-
<h2>What are the Benefits of Flash Player Activex?</h2>
|
26 |
-
<p>Flash Player Activex has some benefits that make it worth using on your Windows 10 computer. Some of them are:</p>
|
27 |
-
<ul>
|
28 |
-
<li>Flash Player Activex can run rich and interactive web content that other technologies cannot. For example, Flash can create 3D graphics, animations, games, and videos that are engaging and immersive.</li>
|
29 |
-
<li>Flash Player Activex can run on multiple platforms and browsers without requiring additional software or plugins. For example, Flash can run on Windows, Mac, Linux, Android, iOS, and other operating systems and browsers that support ActiveX.</li>
|
30 |
-
<li>Flash Player Activex can run offline and online without requiring an internet connection. For example, Flash can run on your computer or mobile device without needing to download or stream the content from the web.</li>
|
31 |
-
</ul>
|
32 |
-
<h2>What are the Risks of Flash Player Activex?</h2>
|
33 |
-
<p>Flash Player Activex also has some risks that you need to be aware of before using it on your Windows 10 computer. Some of them are:</p>
|
34 |
-
<ul>
|
35 |
-
<li>Flash Player Activex can pose security and privacy threats to your computer and data. For example, Flash can contain malware, viruses, spyware, or other malicious code that can harm your computer or steal your personal information.</li>
|
36 |
-
<li>Flash Player Activex can cause performance and compatibility issues on your computer and browser. For example, Flash can consume a lot of CPU and memory resources, slow down your computer or browser, crash or freeze your browser, or interfere with other plugins or extensions.</li>
|
37 |
-
<li>Flash Player Activex can become obsolete and unsupported in the near future. For example, Adobe has announced that it will stop updating and distributing Flash Player by the end of 2020, and many browsers and websites have already stopped supporting Flash content.</li>
|
38 |
-
</ul>
|
39 |
-
<h2>How to Manage Flash Player Activex on Your Windows 10 Computer?</h2>
|
40 |
-
<p>To manage Flash Player Activex on your Windows 10 computer, you need to do the following steps:</p>
|
41 |
-
<p></p>
|
42 |
-
<ol>
|
43 |
-
<li>Check if you really need Flash Player Activex on your computer. If not, uninstall it from the Control Panel or the Settings app of your Windows 10 computer.</li>
|
44 |
-
<li>Check if you can use alternative technologies to Flash Player Activex. If so, switch to HTML5, WebGL, JavaScript, or other web standards that can run similar content without requiring Flash Player.</li>
|
45 |
-
<li>Check if you can limit the use of Flash Player Activex on your browser. If so, disable it by default and only enable it for trusted sites that require it. You can also use browser extensions or add-ons that can block or control Flash content on your browser.</li>
|
46 |
-
</ol>
|
47 |
-
<p>By managing Flash Player Activex on your Windows 10 computer, you can reduce the risks and improve the benefits of using it.</p> 7b8c122e87<br />
|
48 |
-
<br />
|
49 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/120718ALBUM SEKAI NO OWARI ENTERTAINMENT 320Krar __EXCLUSIVE__.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
<h2>120718ALBUM SEKAI NO OWARI ENTERTAINMENT 320Krar</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://imgfil.com/2uxX98">https://imgfil.com/2uxX98</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
June 6, 2021 - The company has not released a price list after the customs increase. /stories/3380015-120718-album-sekai-no-owari-entertainment-320k-rar. html
|
4 |
-
June 1st.
|
5 |
-
On Monday, June 6, the company did not release a price list after the increase in customs, but I believe that the prices are about the same as last week.
|
6 |
-
Last week, the company raised customs from 300 yen.
|
7 |
-
In fact, the prices have remained the same, but due to the fact that the company's price list does not indicate the customs fee, it is not clear how the prices have changed. 8a78ff9644<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
10 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.38 2012 Eng.rar.md
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
<h2>Autodata 3.38 2012 Eng.rar</h2><br /><p><b><b>Download File</b> ○ <a href="https://imgfil.com/2uy0pa">https://imgfil.com/2uy0pa</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Autodata 3.45 Full Version Final DVD English without keygen incl ... ESItronic Patch Keygen 1Q.2013.rar 1 autodata 2012 3.41 deutsch-adds. ...esi. exe ... cracked 2013 autodata 3.45 keygen autodata 3.45 key ...
|
4 |
-
1 Jul 2016 ...
|
5 |
-
Published on Jul 1, 2016.
|
6 |
-
How to download and activate the program?
|
7 |
-
1 ... ESItronic Patch Keygen 1Q.2013 Autodata 3.45 keygen ...
|
8 |
-
2013 autodata 3.45 full cracked keygen keygen crack Autodata 3.45 keygen
|
9 |
-
Dec 3, 2014 ...
|
10 |
-
Free download Autodata 3.45.
|
11 |
-
Download ...
|
12 |
-
Free download Autodata 3.40.
|
13 |
-
Autodata 3.46 2013 [2012, RUS]-DVD.
|
14 |
-
[Cracked] Autodata 3.45 Full [2013, PC/Eng].
|
15 |
-
Download ... 8a78ff9644<br />
|
16 |
-
<br />
|
17 |
-
<br />
|
18 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Breaking Bad Season 2 __EXCLUSIVE__ Download 1080p.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>the trend at the time was comedy. there were many great scripts being rejected by cable and broadcast television. some examples include a pilot for seth macfarlane's <em>family guy</em> and the amazing ray bradbury pilot <em>rock 'n' roll seduction</em>.</p>
|
3 |
-
<p>so some of these rejected scripts became the basis for the great tv shows of the last 10 years, including <em>community, mad men</em>, and <em>breaking bad</em>. while it's impossible to know which one of these scripts will succeed, the library certainly doesn't lack for future stars of quality television.</p>
|
4 |
-
<h2>Breaking Bad Season 2 Download 1080p</h2><br /><p><b><b>Download File</b> … <a href="https://imgfil.com/2uy17H">https://imgfil.com/2uy17H</a></b></p><br /><br />
|
5 |
-
<p><em>stillness in the wind is a film blog from los angeles for enthusiasts that are interested in finding good movies that get less attention at the box office. you can also find us at<strong> twitter</strong>, <strong>facebook</strong> and <strong>instagram</strong>.</em></p>
|
6 |
-
<p>tmp24 is the second biggest telecasting channel of turkish. initially it broadcasted cartoons, after that it shifted its programming policy towards the turkish-pop culture section. programmes are often shifted without any notice. from 2010 till now it has been providing all the pirated versions of live action movies, series and tv shows. </p>
|
7 |
-
<p>as people watch more and more television, they tend to have to select online tv shows. many of the highly rated tv series are not available on television anymore but the most important thing is, that once it is online they are free to watch wherever they want. many websites have popped up where people can watch their favorite tv shows online. unfortunately, most of these sites take credit cards rather than paypal. on tvtorrents.com, however, you don't need to worry about an account. tvtorrents.com is one of the most trusted movie download websites. the website serves hundreds of tv series and movies daily, and it also allows users to download from free torrent at anytime.</p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Captain Sim Weather Radar Crack ((FREE)).md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Captain Sim Weather Radar Crack: How to Download and Use This Amazing Plugin for FSX</h1>
|
3 |
-
|
4 |
-
<p>If you are looking for a way to enhance your flight simulation experience with realistic weather radar, you might want to check out Captain Sim Weather Radar Crack. This is a plugin that works with any FSX aircraft and allows you to see the weather conditions around you in 3D. In this article, we will show you how to download and use this plugin, as well as some of the features and benefits it offers.</p>
|
5 |
-
<h2>Captain Sim Weather Radar Crack</h2><br /><p><b><b>Download</b> ✑ <a href="https://imgfil.com/2uy1vT">https://imgfil.com/2uy1vT</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<h2>What is Captain Sim Weather Radar Crack?</h2>
|
8 |
-
|
9 |
-
<p>Captain Sim Weather Radar Crack is a plugin developed by Captain Sim, a company that specializes in flight simulation software. It is an expansion for their Weather Radar product, which is compatible with their own aircraft models.</p>
|
10 |
-
|
11 |
-
<p>Captain Sim Weather Radar Crack allows you to use the weather radar with any FSX aircraft, not just Captain Sim's ones. It comes with a WxR Editor tool that lets you install and remove the plugin with one click. You can also adjust some settings such as the range, tilt, gain, and mode of the radar.</p>
|
12 |
-
|
13 |
-
<p>Captain Sim Weather Radar Crack has outstanding visual quality and realism. It shows you the precipitation, clouds, turbulence, and winds aloft in 3D. It is also compatible with Ultimate Weather FX and REX weather add-ons, which enhance the weather effects in FSX.</p>
|
14 |
-
|
15 |
-
<p>Captain Sim Weather Radar Crack is ideal for simulating various weather scenarios such as:</p>
|
16 |
-
<p></p>
|
17 |
-
|
18 |
-
<ul>
|
19 |
-
<li>Storms and thunderstorms</li>
|
20 |
-
<li>Rain and snow</li>
|
21 |
-
<li>Fog and low visibility</li>
|
22 |
-
<li>Wind shear and microbursts</li>
|
23 |
-
<li>And much more!</li>
|
24 |
-
</ul>
|
25 |
-
|
26 |
-
<h2>How to Download Captain Sim Weather Radar Crack?</h2>
|
27 |
-
|
28 |
-
<p>Unfortunately, Captain Sim Weather Radar Crack is not available for purchase from Captain Sim's website. They only sell the original Weather Radar product, which works only with their own aircraft models.</p>
|
29 |
-
|
30 |
-
<p>However, you can still find Captain Sim Weather Radar Crack on some torrent sites. These are websites that allow users to share files through peer-to-peer networks. You will need a torrent client software to download the files from these sites.</p>
|
31 |
-
|
32 |
-
<p>Before you download Captain Sim Weather Radar Crack from a torrent site, you should be aware of the following risks:</p>
|
33 |
-
|
34 |
-
<ul>
|
35 |
-
<li>The files may contain viruses or malware that can harm your computer or steal your personal information.</li>
|
36 |
-
<li>The files may be incomplete or corrupted, which can cause errors or crashes in your software.</li>
|
37 |
-
<li>The files may be illegal or infringe on the intellectual property rights of Captain Sim or Microsoft.</li>
|
38 |
-
<li>You may face legal consequences or penalties for downloading or using pirated software.</li>
|
39 |
-
</ul>
|
40 |
-
|
41 |
-
<p>We do not recommend or endorse downloading Captain Sim Weather Radar Crack from torrent sites. We advise you to use legal and safe alternatives instead.</p>
|
42 |
-
|
43 |
-
<h2>How to Use Captain Sim Weather Radar Crack?</h2>
|
44 |
-
|
45 |
-
<p>If you have downloaded Captain Sim Weather Radar Crack from a torrent site and installed it on your computer, you can use it as follows:</p>
|
46 |
-
|
47 |
-
<ol>
|
48 |
-
<li>Select an aircraft in FSX that you want to use the weather radar with.</li>
|
49 |
-
<li>Run the WxR Editor tool that comes with the plugin.</li>
|
50 |
-
<li>Select the aircraft from the list and click Install.</li>
|
51 |
-
<li>The plugin will add a weather radar panel to your aircraft's cockpit.</li>
|
52 |
-
<li>You can access the panel by pressing Shift+5 on your keyboard.</li>
|
53 |
-
<li>You can adjust the settings of the radar by using the knobs and buttons on the panel.</li>
|
54 |
-
<li>You can see the weather conditions around you in 3D on the radar screen.</li>
|
55 |
-
</ol>
|
56 |
-
|
57 |
-
<p>You can also uninstall the plugin from any aircraft by using the WxR Editor tool and clicking Uninstall.</p>
|
58 |
-
|
59 |
-
<h2>Conclusion</h2>
|
60 |
-
|
61 |
-
<p>Captain Sim Weather Radar Crack is a powerful plugin that allows you to use a realistic weather radar with any FSX aircraft. However, it is not available for purchase or support from Captain Sim. You can still find it on some torrent sites, but this comes with many risks and legal issues. We suggest you look for other options that are safer and more reliable.</p>
|
62 |
-
|
63 |
-
<p>If you need help with creating amazing flight simulation experiences in FSX, you can check out our online courses and tutorials at FlightSim Academy. We have a team of experts who can teach you everything you need to know about FSX and other flight simulation software. Whether you are a beginner or a professional, we have something for everyone. Join us today and start learning!</p>
|
64 |
-
|
65 |
-
|
66 |
-
- CAPTAIN SIM FORUM - CS Weather Radar
|
67 |
-
- Captain Sim – Weather Radar Expansion for MS FSX
|
68 |
-
- Captain Sim Weather Radar Crack - SoundCloud
|
69 |
-
- Captain Sim Weather Radar !NEW! Crack - bullseyebow.com
|
70 |
-
|
71 |
-
These are some of the websites that have information about the plugin and how to use it. However, as I mentioned before, these are not official or authorized sources. You should be careful when downloading or using files from these sites. They may contain harmful or illegal content.
|
72 |
-
|
73 |
-
I hope you enjoyed reading this article and learned something new. Thank you for your attention and interest. Have a nice day! ?
|
74 |
-
<p>Captain Sim Weather Radar Crack is a powerful plugin that allows you to use a realistic weather radar with any FSX aircraft. However, it is not available for purchase or support from Captain Sim. You can still find it on some torrent sites, but this comes with many risks and legal issues. We suggest you look for other options that are safer and more reliable.</p>
|
75 |
-
|
76 |
-
<p>If you need help with creating amazing flight simulation experiences in FSX, you can check out our online courses and tutorials at FlightSim Academy. We have a team of experts who can teach you everything you need to know about FSX and other flight simulation software. Whether you are a beginner or a professional, we have something for everyone. Join us today and start learning!</p> 3cee63e6c2<br />
|
77 |
-
<br />
|
78 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Fifa 2008 Crack Indir Oyuncehennemi Tek Romagna U8110 Roswel.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Fifa 2008 Crack Indir Oyuncehennemi Tek romagna u8110 roswel</h2><br /><p><b><b>Download Zip</b> » <a href="https://imgfil.com/2uy1r0">https://imgfil.com/2uy1r0</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/commands/web_requests.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
"""Browse a webpage and summarize it using the LLM model"""
|
2 |
-
from __future__ import annotations
|
3 |
-
|
4 |
-
from urllib.parse import urljoin, urlparse
|
5 |
-
|
6 |
-
import requests
|
7 |
-
from bs4 import BeautifulSoup
|
8 |
-
from requests import Response
|
9 |
-
from requests.compat import urljoin
|
10 |
-
|
11 |
-
from autogpt.config import Config
|
12 |
-
from autogpt.memory import get_memory
|
13 |
-
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
14 |
-
|
15 |
-
CFG = Config()
|
16 |
-
memory = get_memory(CFG)
|
17 |
-
|
18 |
-
session = requests.Session()
|
19 |
-
session.headers.update({"User-Agent": CFG.user_agent})
|
20 |
-
|
21 |
-
|
22 |
-
def is_valid_url(url: str) -> bool:
|
23 |
-
"""Check if the URL is valid
|
24 |
-
|
25 |
-
Args:
|
26 |
-
url (str): The URL to check
|
27 |
-
|
28 |
-
Returns:
|
29 |
-
bool: True if the URL is valid, False otherwise
|
30 |
-
"""
|
31 |
-
try:
|
32 |
-
result = urlparse(url)
|
33 |
-
return all([result.scheme, result.netloc])
|
34 |
-
except ValueError:
|
35 |
-
return False
|
36 |
-
|
37 |
-
|
38 |
-
def sanitize_url(url: str) -> str:
|
39 |
-
"""Sanitize the URL
|
40 |
-
|
41 |
-
Args:
|
42 |
-
url (str): The URL to sanitize
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
str: The sanitized URL
|
46 |
-
"""
|
47 |
-
return urljoin(url, urlparse(url).path)
|
48 |
-
|
49 |
-
|
50 |
-
def check_local_file_access(url: str) -> bool:
|
51 |
-
"""Check if the URL is a local file
|
52 |
-
|
53 |
-
Args:
|
54 |
-
url (str): The URL to check
|
55 |
-
|
56 |
-
Returns:
|
57 |
-
bool: True if the URL is a local file, False otherwise
|
58 |
-
"""
|
59 |
-
local_prefixes = [
|
60 |
-
"file:///",
|
61 |
-
"file://localhost/",
|
62 |
-
"file://localhost",
|
63 |
-
"http://localhost",
|
64 |
-
"http://localhost/",
|
65 |
-
"https://localhost",
|
66 |
-
"https://localhost/",
|
67 |
-
"http://2130706433",
|
68 |
-
"http://2130706433/",
|
69 |
-
"https://2130706433",
|
70 |
-
"https://2130706433/",
|
71 |
-
"http://127.0.0.1/",
|
72 |
-
"http://127.0.0.1",
|
73 |
-
"https://127.0.0.1/",
|
74 |
-
"https://127.0.0.1",
|
75 |
-
"https://0.0.0.0/",
|
76 |
-
"https://0.0.0.0",
|
77 |
-
"http://0.0.0.0/",
|
78 |
-
"http://0.0.0.0",
|
79 |
-
"http://0000",
|
80 |
-
"http://0000/",
|
81 |
-
"https://0000",
|
82 |
-
"https://0000/",
|
83 |
-
]
|
84 |
-
return any(url.startswith(prefix) for prefix in local_prefixes)
|
85 |
-
|
86 |
-
|
87 |
-
def get_response(
|
88 |
-
url: str, timeout: int = 10
|
89 |
-
) -> tuple[None, str] | tuple[Response, None]:
|
90 |
-
"""Get the response from a URL
|
91 |
-
|
92 |
-
Args:
|
93 |
-
url (str): The URL to get the response from
|
94 |
-
timeout (int): The timeout for the HTTP request
|
95 |
-
|
96 |
-
Returns:
|
97 |
-
tuple[None, str] | tuple[Response, None]: The response and error message
|
98 |
-
|
99 |
-
Raises:
|
100 |
-
ValueError: If the URL is invalid
|
101 |
-
requests.exceptions.RequestException: If the HTTP request fails
|
102 |
-
"""
|
103 |
-
try:
|
104 |
-
# Restrict access to local files
|
105 |
-
if check_local_file_access(url):
|
106 |
-
raise ValueError("Access to local files is restricted")
|
107 |
-
|
108 |
-
# Most basic check if the URL is valid:
|
109 |
-
if not url.startswith("http://") and not url.startswith("https://"):
|
110 |
-
raise ValueError("Invalid URL format")
|
111 |
-
|
112 |
-
sanitized_url = sanitize_url(url)
|
113 |
-
|
114 |
-
response = session.get(sanitized_url, timeout=timeout)
|
115 |
-
|
116 |
-
# Check if the response contains an HTTP error
|
117 |
-
if response.status_code >= 400:
|
118 |
-
return None, f"Error: HTTP {str(response.status_code)} error"
|
119 |
-
|
120 |
-
return response, None
|
121 |
-
except ValueError as ve:
|
122 |
-
# Handle invalid URL format
|
123 |
-
return None, f"Error: {str(ve)}"
|
124 |
-
|
125 |
-
except requests.exceptions.RequestException as re:
|
126 |
-
# Handle exceptions related to the HTTP request
|
127 |
-
# (e.g., connection errors, timeouts, etc.)
|
128 |
-
return None, f"Error: {str(re)}"
|
129 |
-
|
130 |
-
|
131 |
-
def scrape_text(url: str) -> str:
|
132 |
-
"""Scrape text from a webpage
|
133 |
-
|
134 |
-
Args:
|
135 |
-
url (str): The URL to scrape text from
|
136 |
-
|
137 |
-
Returns:
|
138 |
-
str: The scraped text
|
139 |
-
"""
|
140 |
-
response, error_message = get_response(url)
|
141 |
-
if error_message:
|
142 |
-
return error_message
|
143 |
-
if not response:
|
144 |
-
return "Error: Could not get response"
|
145 |
-
|
146 |
-
soup = BeautifulSoup(response.text, "html.parser")
|
147 |
-
|
148 |
-
for script in soup(["script", "style"]):
|
149 |
-
script.extract()
|
150 |
-
|
151 |
-
text = soup.get_text()
|
152 |
-
lines = (line.strip() for line in text.splitlines())
|
153 |
-
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
154 |
-
text = "\n".join(chunk for chunk in chunks if chunk)
|
155 |
-
|
156 |
-
return text
|
157 |
-
|
158 |
-
|
159 |
-
def scrape_links(url: str) -> str | list[str]:
|
160 |
-
"""Scrape links from a webpage
|
161 |
-
|
162 |
-
Args:
|
163 |
-
url (str): The URL to scrape links from
|
164 |
-
|
165 |
-
Returns:
|
166 |
-
str | list[str]: The scraped links
|
167 |
-
"""
|
168 |
-
response, error_message = get_response(url)
|
169 |
-
if error_message:
|
170 |
-
return error_message
|
171 |
-
if not response:
|
172 |
-
return "Error: Could not get response"
|
173 |
-
soup = BeautifulSoup(response.text, "html.parser")
|
174 |
-
|
175 |
-
for script in soup(["script", "style"]):
|
176 |
-
script.extract()
|
177 |
-
|
178 |
-
hyperlinks = extract_hyperlinks(soup, url)
|
179 |
-
|
180 |
-
return format_hyperlinks(hyperlinks)
|
181 |
-
|
182 |
-
|
183 |
-
def create_message(chunk, question):
|
184 |
-
"""Create a message for the user to summarize a chunk of text"""
|
185 |
-
return {
|
186 |
-
"role": "user",
|
187 |
-
"content": f'"""{chunk}""" Using the above text, answer the following'
|
188 |
-
f' question: "{question}" -- if the question cannot be answered using the'
|
189 |
-
" text, summarize the text.",
|
190 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/speech/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
"""This module contains the speech recognition and speech synthesis functions."""
|
2 |
-
from autogpt.speech.say import say_text
|
3 |
-
|
4 |
-
__all__ = ["say_text"]
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/3D Paint Brush Free Download Tips and Tricks for 3D Artists.md
DELETED
@@ -1,164 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>3D Paint Brush Free Download: How to Create Amazing 3D Artworks with Free Tools</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Have you ever wanted to create stunning 3D artworks, but felt intimidated by the complexity and cost of professional software? If so, you are not alone. Many people are fascinated by the possibilities of 3D painting, but don't know where to start or how to afford it.</p>
|
5 |
-
<h2>3d paint brush free download</h2><br /><p><b><b>Download Zip</b> ⇒ <a href="https://jinyurl.com/2uNSgn">https://jinyurl.com/2uNSgn</a></b></p><br /><br />
|
6 |
-
<p>Fortunately, there are some free tools that can help you unleash your creativity and make your own 3D masterpieces. Whether you are a beginner or a pro, you can find a 3D paint brush software that suits your needs and preferences.</p>
|
7 |
-
<p>In this article, we will introduce you to some of the best free 3D paint brush software available online. We will explain what 3D painting is, why it is popular, and what are the benefits of using free software. We will also show you how to download and use each tool, and what features and advantages they offer.</p>
|
8 |
-
<h2>Best Free 3D Paint Brush Software for Beginners and Professionals</h2>
|
9 |
-
<h3>Paint 3D: The easiest way to create 3D models and scenes</h3>
|
10 |
-
<p>If you are looking for a simple and intuitive way to create 3D artworks, Paint 3D is a great option. Paint 3D is a free app that comes pre-installed with Windows 10. It is an updated version of the classic Paint program, but with added features for 3D creation.</p>
|
11 |
-
<h4>Features and advantages of Paint 3D</h4>
|
12 |
-
<ul>
|
13 |
-
<li>You can easily create 3D models from scratch or use predefined shapes and stickers.</li>
|
14 |
-
<li>You can paint on any surface or texture with different brushes, colors, and effects.</li>
|
15 |
-
<li>You can add depth, lighting, shadows, and perspective to your 3D scenes.</li>
|
16 |
-
<li>You can import and export your 3D creations in various formats, such as PNG, JPEG, GIF, BMP, FBX, STL, OBJ, GLB, etc.</li>
|
17 |
-
<li>You can share your 3D artworks with others on social media or Remix 3D, a community platform for 3D enthusiasts.</li>
|
18 |
-
</ul>
|
19 |
-
<h4>How to download and use Paint 3D</h4>
|
20 |
-
<ol>
|
21 |
-
<li>To download Paint 3D, go to the Microsoft Store app on your Windows 10 device and search for "Paint 3D". Alternatively, you can click on this link to go directly to the download page.</li>
|
22 |
-
<li>To use Paint 3D, open the app and click on "New" to start a new project. You can also click on "Open" to browse your existing files or "Menu" to access more options.</li>
|
23 |
-
<li>To create a 3D model, click on the "3D shapes" icon on the top toolbar. You can choose from different categories of shapes, such as basic shapes, animals, people, vehicles, etc. You can also use the "Freehand" tool to draw your own shape.</li>
|
24 |
-
<li>To paint on your model, click on the "Brushes" icon on the top toolbar. You can choose from different types of brushes, such as matte, gloss, dull metal, polished metal, etc. You can also adjust the size, opacity, color, and texture of your brush.</li>
|
25 |
-
<li>To edit your model , click on the "Select" icon on the top toolbar. You can move, rotate, scale, duplicate, or delete your model. You can also use the "Magic select" tool to cut out a part of your model and place it elsewhere.</li>
|
26 |
-
<li>To create a 3D scene, click on the "Canvas" icon on the right sidebar. You can change the background color, image, or transparency of your canvas. You can also adjust the view angle, zoom level, and grid size of your scene.</li>
|
27 |
-
<li>To add effects to your scene, click on the "Effects" icon on the right sidebar. You can choose from different types of effects, such as lighting, environment, filters, etc. You can also customize the intensity, direction, and color of your effects.</li>
|
28 |
-
<li>To save your project, click on "Menu" and then "Save as". You can choose to save your file as a 2D image or a 3D model. You can also specify the name, location, and format of your file.</li>
|
29 |
-
<li>To share your project, click on "Menu" and then "Share". You can choose to share your file via email, social media, or Remix 3D. You can also copy the link or embed code of your file.</li>
|
30 |
-
</ol>
|
31 |
-
<h3>Blender: The most powerful and versatile 3D creation suite</h3>
|
32 |
-
<p>If you are looking for a more advanced and comprehensive way to create 3D artworks, Blender is a perfect choice. Blender is a free and open source software that can handle all aspects of 3D creation, such as modeling, sculpting, painting, animation, rendering, simulation, compositing, video editing, and more.</p>
|
33 |
-
<h4>Features and advantages of Blender</h4>
|
34 |
-
<ul>
|
35 |
-
<li>You can create complex and realistic 3D models and scenes with various tools and modifiers.</li>
|
36 |
-
<li>You can paint on your models with different brushes, textures, and materials.</li>
|
37 |
-
<li>You can animate your models with keyframes, curves, bones, drivers, etc.</li>
|
38 |
-
<li>You can render your scenes with high-quality engines, such as Cycles and Eevee.</li>
|
39 |
-
<li>You can simulate physical phenomena, such as fluid, smoke, fire, cloth, hair, etc.</li>
|
40 |
-
<li>You can edit and composite your videos with nodes, filters, transitions, etc.</li>
|
41 |
-
<li>You can import and export your 3D creations in many formats, such as OBJ, FBX, STL, GLTF , etc.</li>
|
42 |
-
<li>You can access a large community of users and developers who provide tutorials, support, feedback, and add-ons.</li>
|
43 |
-
</ul>
|
44 |
-
<h4>How to download and use Blender</h4>
|
45 |
-
<ol>
|
46 |
-
<li>To download Blender, go to the official website and click on the "Download Blender" button. Alternatively, you can click on this link to go directly to the download page.</li>
|
47 |
-
<li>To use Blender, open the software and choose a workspace from the top menu. You can select from different workspaces, such as layout, modeling, sculpting, UV editing, texture painting, shading, animation, rendering, compositing, video editing, etc.</li>
|
48 |
-
<li>To create a 3D model, click on the "Add" menu on the top left corner and choose an object type. You can add basic shapes, such as cubes, spheres, cylinders, etc., or more complex objects, such as text, curves, meshes, etc.</li>
|
49 |
-
<li>To paint on your model, switch to the "Texture Paint" workspace from the top menu. You can choose a brush from the left panel and adjust its settings, such as radius, strength, color, texture, etc. You can also create and apply materials to your model from the right panel.</li>
|
50 |
-
<li>To edit your model , click on the "Edit Mode" button on the top left corner. You can use various tools and commands to manipulate your model, such as move, rotate, scale, extrude, inset, loop cut, etc. You can also use modifiers from the right panel to apply non-destructive transformations to your model, such as subdivision surface, mirror, array, boolean, etc.</li>
|
51 |
-
<li>To create a 3D scene, click on the "Layout" workspace from the top menu. You can add more objects to your scene and arrange them in the 3D viewport. You can also add lights, cameras, and backgrounds to your scene from the "Add" menu.</li>
|
52 |
-
<li>To add effects to your scene, switch to the "Shading" workspace from the top menu. You can use nodes to create and edit materials for your objects. You can also use nodes to create and edit world settings for your scene, such as color, environment texture, volume, etc.</li>
|
53 |
-
<li>To save your project, click on "File" and then "Save As". You can choose a name and location for your file. You can also save your file as a Blender file (.blend) or a compressed Blender file (.blend.gz).</li>
|
54 |
-
<li>To share your project, click on "File" and then "Export". You can choose a format for your file, such as OBJ, FBX, STL, GLTF , etc. You can also specify the name, location, and settings of your file. You can also share your file via email, social media, or Blender Cloud, a platform for Blender users and developers.</li>
|
55 |
-
</ol>
|
56 |
-
<h3>SketchUp: The simplest and fastest way to design 3D objects and environments</h3>
|
57 |
-
<p>If you are looking for a quick and easy way to design 3D objects and environments, SketchUp is an ideal choice. SketchUp is a free and web-based software that allows you to create and edit 3D models with simple tools and intuitive interface.</p>
|
58 |
-
<p>3d paint brush software free download<br />
|
59 |
-
3d paint brush vector free download<br />
|
60 |
-
3d paint brush model free download<br />
|
61 |
-
3d paint brush photoshop free download<br />
|
62 |
-
3d paint brush illustrator free download<br />
|
63 |
-
3d paint brush clipart free download<br />
|
64 |
-
3d paint brush png free download<br />
|
65 |
-
3d paint brush svg free download<br />
|
66 |
-
3d paint brush icon free download<br />
|
67 |
-
3d paint brush logo free download<br />
|
68 |
-
3d paint brush animation free download<br />
|
69 |
-
3d paint brush blender free download<br />
|
70 |
-
3d paint brush maya free download<br />
|
71 |
-
3d paint brush sketchup free download<br />
|
72 |
-
3d paint brush cinema 4d free download<br />
|
73 |
-
3d paint brush after effects free download<br />
|
74 |
-
3d paint brush corel draw free download<br />
|
75 |
-
3d paint brush gimp free download<br />
|
76 |
-
3d paint brush inkscape free download<br />
|
77 |
-
3d paint brush online free download<br />
|
78 |
-
3d paint brush app free download<br />
|
79 |
-
3d paint brush tool free download<br />
|
80 |
-
3d paint brush set free download<br />
|
81 |
-
3d paint brush pack free download<br />
|
82 |
-
3d paint brush bundle free download<br />
|
83 |
-
3d paint brush collection free download<br />
|
84 |
-
3d paint brush kit free download<br />
|
85 |
-
3d paint brush design free download<br />
|
86 |
-
3d paint brush art free download<br />
|
87 |
-
3d paint brush texture free download<br />
|
88 |
-
3d paint brush pattern free download<br />
|
89 |
-
3d paint brush background free download<br />
|
90 |
-
3d paint brush wallpaper free download<br />
|
91 |
-
3d paint brush mockup free download<br />
|
92 |
-
3d paint brush template free download<br />
|
93 |
-
3d paint brush tutorial free download<br />
|
94 |
-
3d paint brush guide free download<br />
|
95 |
-
3d paint brush tips free download<br />
|
96 |
-
3d paint brush tricks free download<br />
|
97 |
-
3d paint brush techniques free download<br />
|
98 |
-
3d paint brush examples free download<br />
|
99 |
-
3d paint brush samples free download<br />
|
100 |
-
3d paint brush projects free download<br />
|
101 |
-
3d paint brush ideas free download<br />
|
102 |
-
3d paint brush inspiration free download<br />
|
103 |
-
best 3d paint brush free download</p>
|
104 |
-
<h4>Features and advantages of SketchUp</h4>
|
105 |
-
<ul>
|
106 |
-
<li>You can create 3D models from scratch or use thousands of pre-made models from the 3D Warehouse, a library of free 3D models.</li>
|
107 |
-
<li>You can draw on your models with different tools, such as line, arc, circle, rectangle, polygon, etc.</li>
|
108 |
-
<li>You can modify your models with different tools, such as push/pull, move, rotate, scale, offset, follow me, etc.</li>
|
109 |
-
<li>You can add materials, colors, textures, and styles to your models.</li>
|
110 |
-
<li>You can import and export your 3D models in various formats, such as SKP, STL, OBJ, DWG, DXF, etc.</li>
|
111 |
-
<li>You can share your 3D models with others on the 3D Warehouse or Trimble Connect, a cloud service for collaboration and project management.</li>
|
112 |
-
</ul>
|
113 |
-
<h4>How to download and use SketchUp</h4>
|
114 |
-
<ol>
|
115 |
-
<li>To download SketchUp, go to the official website and click on the "Start Modeling" button. Alternatively, you can click on this link to go directly to the web app.</li>
|
116 |
-
<li>To use SketchUp , open the web app and sign in with your Google or Trimble account. You can also create a new account for free.</li>
|
117 |
-
<li>To create a 3D model, click on the "File" menu and then "New". You can also click on the "Open" menu to browse your existing files or the "3D Warehouse" menu to access the library of free models.</li>
|
118 |
-
<li>To draw on your model, click on the "Tools" menu and choose a tool. You can use different tools to draw lines, curves, shapes, etc. You can also use the "Inferencing" feature to snap your drawing to points, edges, faces, axes, etc.</li>
|
119 |
-
<li>To modify your model, click on the "Tools" menu and choose a tool. You can use different tools to push/pull, move, rotate, scale, offset, follow me, etc. You can also use the "Entity Info" panel to edit the properties of your model, such as name, layer, dimensions, etc.</li>
|
120 |
-
<li>To add materials, colors, textures, and styles to your model, click on the "Materials", "Colors", "Textures", or "Styles" menus. You can choose from different categories of materials, colors, textures, and styles. You can also create and apply your own materials, colors, textures, and styles.</li>
|
121 |
-
<li>To save your project, click on the "File" menu and then "Save". You can choose a name and location for your file. You can also save your file as a SketchUp file (.skp) or a compressed SketchUp file (.skb).</li>
|
122 |
-
<li>To share your project, click on the "File" menu and then "Share". You can choose to share your file via email or Trimble Connect. You can also upload your file to the 3D Warehouse or download it in another format.</li>
|
123 |
-
</ol>
|
124 |
-
<h2>Conclusion</h2>
|
125 |
-
<p>In this article, we have shown you how to create amazing 3D artworks with free tools. We have introduced you to three of the best free 3D paint brush software available online: Paint 3D, Blender, and SketchUp. We have explained what 3D painting is, why it is popular, and what are the benefits of using free software. We have also shown you how to download and use each tool, and what features and advantages they offer.</p>
|
126 |
-
<p>Now that you have learned how to create 3D artworks with free tools, you can start experimenting with different styles and techniques. You can also explore more resources and tutorials online to improve your skills and knowledge. You can also share your 3D creations with others and get feedback and inspiration.</p>
|
127 |
-
<p>So what are you waiting for? Download one of these free 3D paint brush software today and unleash your creativity!</p>
|
128 |
-
<h2>FAQs</h2>
|
129 |
-
<h4>What is 3D painting?</h4>
|
130 |
-
<p>3D painting is a process of creating 3D models and scenes using digital tools that simulate painting on a surface or texture.</p>
|
131 |
-
<h4>What are the benefits of using free 3D paint brush software?</h4>
|
132 |
-
<p>Some of the benefits of using free 3D paint brush software are:</p>
|
133 |
-
<ul>
|
134 |
-
<li>You can save money and time by not having to buy or install expensive and complex software.</li>
|
135 |
-
<li>You can access your projects from any device and location with an internet connection.</li>
|
136 |
-
<li>You can learn and practice 3D painting without any risk or commitment.</li>
|
137 |
-
<li>You can enjoy the features and updates of the software without any cost or hassle.</li>
|
138 |
-
</ul>
|
139 |
-
<h4>What are some tips for creating 3D artworks with free tools?</h4>
|
140 |
-
<p>Some of the tips for creating 3D artworks with free tools are:</p>
|
141 |
-
<ul>
|
142 |
-
<li>Start with a simple idea and sketch it out on paper or screen.</li>
|
143 |
-
<li>Choose a tool that matches your skill level and style preference.</li>
|
144 |
-
<li>Use reference images and tutorials to guide you through the process.</li>
|
145 |
-
<li>Experiment with different tools , colors, textures, effects, etc. to create different looks and moods.</li>
|
146 |
-
<li>Save and export your files in appropriate formats and quality.</li>
|
147 |
-
<li>Share and showcase your 3D artworks with others and get feedback and inspiration.</li>
|
148 |
-
</ul>
|
149 |
-
<h4>What are some examples of 3D artworks created with free tools?</h4>
|
150 |
-
<p>Some of the examples of 3D artworks created with free tools are:</p>
|
151 |
-
<ul>
|
152 |
-
<li>A 3D model of a car created with Paint 3D</li>
|
153 |
-
<li>A 3D animation of a character created with Blender</li>
|
154 |
-
<li>A 3D design of a house created with SketchUp</li>
|
155 |
-
</ul>
|
156 |
-
<h4>What are some other free 3D paint brush software that you can try?</h4>
|
157 |
-
<p>Some of the other free 3D paint brush software that you can try are:</p>
|
158 |
-
<ul>
|
159 |
-
<li>Tilt Brush: A VR app that lets you paint in 3D space with virtual reality.</li>
|
160 |
-
<li>Meshmixer: A 3D sculpting and painting tool that lets you mix and edit 3D meshes.</li>
|
161 |
-
<li>Sculptris: A 3D sculpting and painting tool that lets you create organic shapes and textures.</li>
|
162 |
-
</ul></p> 401be4b1e0<br />
|
163 |
-
<br />
|
164 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/And Blue Download The Ultimate Guide to Playing Android Games on PC.md
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is and blue download?</h1>
|
3 |
-
<p>If you are looking for a way to enjoy your favorite Android apps and games on your PC or Mac, you might have heard of <strong>and blue download</strong>. But what is it exactly, and how does it work? In this article, we will explain everything you need to know about this amazing software that lets you run Android on your computer.</p>
|
4 |
-
<h2>and blue download</h2><br /><p><b><b>DOWNLOAD</b> ✒ <a href="https://jinyurl.com/2uNPvy">https://jinyurl.com/2uNPvy</a></b></p><br /><br />
|
5 |
-
<h2>Why do you need and blue download?</h2>
|
6 |
-
<p>There are many reasons why you might want to use <strong>and blue download</strong> for your PC or Mac. Here are some of them:</p>
|
7 |
-
<ul>
|
8 |
-
<li>You can access thousands of Android apps and games that are not available for Windows or Mac, such as WhatsApp, Instagram, PUBG Mobile, Clash of Clans, etc.</li>
|
9 |
-
<li>You can play Android games with better graphics, performance, and controls on your computer. You can also use your keyboard, mouse, or gamepad for a more immersive experience.</li>
|
10 |
-
<li>You can use the same apps for both Android and Windows or Mac, which means you can sync your data, transfer files, and switch devices easily.</li>
|
11 |
-
<li>You can test new apps or games before installing them on your phone or tablet. You can also use multiple accounts or instances of the same app with <strong>and blue download</strong>.</li>
|
12 |
-
<li>You can enjoy a bigger screen, more storage space, longer battery life, and faster internet connection when using <strong>and blue download</strong> on your computer.</li>
|
13 |
-
</ul>
|
14 |
-
<h2>How to download and install <strong>and blue download</strong>?</h2>
|
15 |
-
<h3>Downloading <strong>and blue download</strong></h3>
|
16 |
-
<p>To download <strong>and blue download</strong>, you can follow these simple steps:</p>
|
17 |
-
<ol>
|
18 |
-
<li>Go to the official website of <strong>and blue download</strong> at [bluestacks.com](^1^) or click [here](^1^).</li>
|
19 |
-
<li>Select the version of <strong>and blue download</strong> that suits your needs. You can choose between BlueStacks 5 or BlueStacks 4, depending on your operating system, hardware specifications, and preferences.</li>
|
20 |
-
<li>Click on the "Download" button to start downloading the .exe (for Windows) or .dmg (for Mac) file.</li>
|
21 |
-
<li>Wait for the file to finish downloading. It might take a few minutes depending on your internet speed.</li>
|
22 |
-
</ol>
|
23 |
-
<h3>Installing <strong>and blue download</strong></h3>
|
24 |
-
<p>To install <strong>and blue download</strong>, you can follow these simple steps:</p>
|
25 |
-
<p>and blue download bluestacks app player<br />
|
26 |
-
and blue download windows 11<br />
|
27 |
-
and blue download android games on pc<br />
|
28 |
-
and blue download nougat 64-bit<br />
|
29 |
-
and blue download pie 64-bit<br />
|
30 |
-
and blue download android 11<br />
|
31 |
-
and blue download cnet<br />
|
32 |
-
and blue download for mac<br />
|
33 |
-
and blue download apk<br />
|
34 |
-
and blue download offline installer<br />
|
35 |
-
and blue download old version<br />
|
36 |
-
and blue download latest version<br />
|
37 |
-
and blue download for windows 10<br />
|
38 |
-
and blue download for windows 7<br />
|
39 |
-
and blue download for windows 8<br />
|
40 |
-
and blue download for linux<br />
|
41 |
-
and blue download for chromebook<br />
|
42 |
-
and blue download free fire<br />
|
43 |
-
and blue download pubg mobile<br />
|
44 |
-
and blue download clash of clans<br />
|
45 |
-
and blue download among us<br />
|
46 |
-
and blue download call of duty mobile<br />
|
47 |
-
and blue download garena free fire max<br />
|
48 |
-
and blue download brawl stars<br />
|
49 |
-
and blue download candy crush saga<br />
|
50 |
-
and blue download minecraft pocket edition<br />
|
51 |
-
and blue download roblox<br />
|
52 |
-
and blue download fortnite<br />
|
53 |
-
and blue download pokemon go<br />
|
54 |
-
and blue download whatsapp messenger<br />
|
55 |
-
and blue download instagram<br />
|
56 |
-
and blue download tiktok<br />
|
57 |
-
and blue download zoom cloud meetings<br />
|
58 |
-
and blue download netflix<br />
|
59 |
-
and blue download spotify music<br />
|
60 |
-
and blue download youtube music<br />
|
61 |
-
and blue download amazon prime video<br />
|
62 |
-
and blue download disney plus hotstar<br />
|
63 |
-
and blue download hulu live tv & streaming <br />
|
64 |
-
and blue download facebook lite</p>
|
65 |
-
<ol>
|
66 |
-
<li>Open the downloaded file by double-clicking on it.</li>
|
67 |
-
<li>Follow the instructions that appear on the screen. You might need to grant some permissions or accept some terms and conditions.</li>
|
68 |
-
<li>Wait for the installation process to complete . It might take a few minutes depending on your computer speed.</li>
|
69 |
-
<li>Once the installation is done, you can launch <strong>and blue download</strong> by clicking on its icon on your desktop or start menu.</li>
|
70 |
-
</ol>
|
71 |
-
<h2>How to use <strong>and blue download</strong>?</h2>
|
72 |
-
<h3>Running Android apps and games on PC or Mac</h3>
|
73 |
-
<p>To run Android apps and games on your PC or Mac with <strong>and blue download</strong>, you can follow these simple steps:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Open <strong>and blue download</strong> and sign in with your Google account. You can also create a new one if you don't have one.</li>
|
76 |
-
<li>Browse the app store or the game center to find the apps or games you want to install. You can also search for them by name or category.</li>
|
77 |
-
<li>Click on the "Install" button to download and install the apps or games on <strong>and blue download</strong>.</li>
|
78 |
-
<li>Once the installation is done, you can launch the apps or games by clicking on their icons on the home screen or the library.</li>
|
79 |
-
<li>Enjoy using your Android apps and games on your PC or Mac with <strong>and blue download</strong>.</li>
|
80 |
-
</ol>
|
81 |
-
<h3>Customizing and optimizing <strong>and blue download</strong></h3>
|
82 |
-
<p>To customize and optimize <strong>and blue download</strong> to suit your needs, you can follow these simple steps:</p>
|
83 |
-
<ol>
|
84 |
-
<li>Open <strong>and blue download</strong> and click on the gear icon on the top right corner to access the settings menu.</li>
|
85 |
-
<li>Select the option that you want to adjust, such as display, engine, preferences, keyboard, gamepad, etc.</li>
|
86 |
-
<li>Make the changes that you want, such as resolution, graphics mode, CPU cores, RAM allocation, language, shortcuts, etc.</li>
|
87 |
-
<li>Save the changes and restart <strong>and blue download</strong> if needed.</li>
|
88 |
-
<li>Enjoy using <strong>and blue download</strong> with your customized and optimized settings.</li>
|
89 |
-
</ol>
|
90 |
-
<h2>How to update and troubleshoot <strong>and blue download</strong>?</h2>
|
91 |
-
<h3>Updating <strong>and blue download</strong></h3>
|
92 |
-
<p>To update <strong>and blue download</strong>, you can follow these simple steps:</p>
|
93 |
-
<ol>
|
94 |
-
<li>Open <strong>and blue download</strong> and click on the hamburger icon on the top left corner to access the menu.</li>
|
95 |
-
<li>Select "Check for updates" and wait for <strong>and blue download</strong> to scan for any available updates.</li>
|
96 |
-
<li>If there are any updates, click on the "Update now" button to start downloading and installing them.</li>
|
97 |
-
<li>Wait for the update process to complete. It might take a few minutes depending on your internet speed and computer speed.</li>
|
98 |
-
<li>Once the update is done, you can enjoy using the latest version of <strong>and blue download</strong>.</li>
|
99 |
-
</ol>
|
100 |
-
<h3>Troubleshooting <strong>and blue download</strong></h3>
|
101 |
-
<p>If you encounter any issues or problems with <strong>and blue download</strong>, you can follow these simple steps:</p>
|
102 |
-
<ol>
|
103 |
-
<li>Open <strong>and blue download</strong> and click on the question mark icon on the top right corner to access the help center.</li>
|
104 |
-
<li>Select the category that matches your issue, such as installation, performance, compatibility, etc.</li>
|
105 |
-
<li>Browse the articles or videos that provide solutions or tips for your issue. You can also search for them by keywords or phrases.</li>
|
106 |
-
<li>If you still need help, you can contact the support team by clicking on the "Contact us" button. You can also visit the official forum or community to get help from other users.</li>
|
107 |
-
<li>Solve your issue and enjoy using <strong>and blue download</strong>.</li>
|
108 |
-
</ol>
|
109 |
-
<h2>Conclusion</h2>
|
110 |
-
<p><Strong><em>In conclusion,</em></Strong><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em></p></em></em></em></em></em></em></em></em></em></em></em></em></em></em></em></em></em></em></p>
|
111 |
-
<p><Strong><u>In conclusion,</u></Strong>, <u><u><u><u><u><u><u><u><u><u><u><u><u><u><u><u><u><u><u>[assistant](#message) <strong>In conclusion,</strong>, <i>& <strong>In conclusion,</strong>, <i>and blue download</i> is a powerful and versatile software that allows you to run Android apps and games on your PC or Mac. It has many benefits and advantages, such as better graphics, performance, controls, compatibility, and convenience. It is also easy to download, install, use, update, and troubleshoot. If you are looking for a way to enjoy your favorite Android apps and games on your computer, you should definitely give <i>and blue download</i> a try. You won't regret it! <h2>FAQs</h2>
|
112 |
-
<p>Here are some frequently asked questions and answers about <i>and blue download</i>:</p>
|
113 |
-
<ul>
|
114 |
-
<li><strong>Is <i>and blue download</i> free?</strong><br>
|
115 |
-
Yes, <i>and blue download</i> is free to download and use. However, some features or options might require a premium subscription or a one-time purchase.</li>
|
116 |
-
<li><strong>Is <i>and blue download</i> safe?</strong><br>
|
117 |
-
Yes, <i>and blue download</i> is safe and secure. It does not contain any malware or viruses. It also respects your privacy and does not collect or share your personal data without your consent.</li>
|
118 |
-
<li><strong>Is <i>and blue download</i> legal?</strong><br>
|
119 |
-
Yes, <i>and blue download</i> is legal. It does not violate any laws or regulations. However, you should only use it for personal and non-commercial purposes. You should also respect the intellectual property rights of the app or game developers and publishers.</li>
|
120 |
-
<li><strong>What are the system requirements for <i>and blue download</i>?</strong><br>
|
121 |
-
The system requirements for <i>and blue download</i> vary depending on the version you choose. For BlueStacks 5, you need at least Windows 7 or Mac OS 10.12, 4 GB of RAM, 5 GB of disk space, and an Intel or AMD processor. For BlueStacks 4, you need at least Windows 7 or Mac OS 10.11, 2 GB of RAM, 5 GB of disk space, and an Intel or AMD processor.</li>
|
122 |
-
<li><strong>How can I contact the support team of <i>and blue download</i>?</strong><br>
|
123 |
-
You can contact the support team of <i>and blue download</i> by clicking on the "Contact us" button in the help center. You can also visit the official website, forum, or community to get more information and assistance.</li>
|
124 |
-
</ul></p> 197e85843d<br />
|
125 |
-
<br />
|
126 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Android 11 Emojis Download How to Get the Latest Unicode 13 Emojis on Any Device.md
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br> - A comparison of the new emojis with previous versions and other platforms. <br> - A list of some of the most notable new emojis and their meanings. | | H2: Why are Android 11 Emojis Popular? | - An explanation of how emojis enhance communication and expression on the internet and through devices. <br> - An analysis of how Android 11 emojis reflect diversity, inclusivity, and creativity. <br> - A summary of the positive reviews and feedback from users and experts. | | H2: How to Get Android 11 Emojis on Your Phone? | - A step-by-step guide on how to update to the latest Android version that supports the new emojis. <br> - An alternative method on how to use Emoji Kitchen on Gboard to create emoji mashups. <br> - Another alternative method on how to install a new keyboard app that has the new emojis. <br> - A bonus method on how to install the new emojis on older Android versions with root using a Magisk module. | | H1: Conclusion | A recap of the main points of the article and a call to action for the readers to try out the new emojis. | | H2: FAQs | A list of five frequently asked questions and answers about Android 11 emojis. | Article with HTML formatting <h1>Android 11 Emojis Download: How to Get the Latest and Cutest Emojis on Your Phone</h1>
|
3 |
-
<p>Emojis are more than just cute icons that you can use to spice up your messages and social media posts. They are also powerful tools that can convey your emotions, intentions, and personality in a fun and creative way. That's why many people are always excited to get new emojis on their phones.</p>
|
4 |
-
<p>If you are one of them, you will be happy to know that Android 11, the latest version of Google's operating system, comes with 117 brand-new emojis and a significant number of design changes for the existing ones. These emojis are based on Unicode's Emoji 13.0 recommendations, which were published earlier this year.</p>
|
5 |
-
<h2>android 11 emojis download</h2><br /><p><b><b>DOWNLOAD</b> ••• <a href="https://jinyurl.com/2uNNVT">https://jinyurl.com/2uNNVT</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will tell you everything you need to know about Android 11 emojis, including what they are, why they are popular, and how to get them on your phone. Let's get started!</p>
|
7 |
-
<h2>What are Android 11 Emojis?</h2>
|
8 |
-
<p>Android 11 emojis are the new set of graphical symbols that Google has added to its operating system as part of its latest update. These emojis cover a wide range of categories, such as smileys, people, animals, food, objects, and symbols.</p>
|
9 |
-
<p>Some of the new emojis are more inclusive versions of the existing ones, such as multiple genders and races of the Feeding Baby, Person With Veil, and Person in Tuxedo emojis. The transgender flag, transgender symbol, and a new People Hugging emoji have also joined the ranks of Android 11 emoji.</p>
|
10 |
-
<p>Some of the new emojis are also more playful and expressive, such as Smiling Face with Tear, Disguised Face, Pinched Fingers, Boomerang, Bubble Tea, Dodo, Seal, Feather, Piñata, Magic Wand, Nesting Dolls, Anatomical Heart, Lungs, and many more.</p>
|
11 |
-
<p>You can see all the new emojis in this <a href="(^3^)">blog post</a> from Emojipedia.</p>
|
12 |
-
<p>android 11 new emojis install root magisk module<br />
|
13 |
-
android 11 emoji changelog emojipedia<br />
|
14 |
-
google noto color emoji android 11<br />
|
15 |
-
how to get android 11 emojis on older versions<br />
|
16 |
-
android 11 emoji font file download<br />
|
17 |
-
android 11 emoji update release date<br />
|
18 |
-
android 11 unicode 13 emojis list<br />
|
19 |
-
android 11 emoji design changes<br />
|
20 |
-
android 11 emoji apk download<br />
|
21 |
-
android 11 emoji pack for gboard<br />
|
22 |
-
android 11 emoji support for whatsapp<br />
|
23 |
-
android 11 emoji mod for samsung devices<br />
|
24 |
-
android 11 emoji comparison with ios<br />
|
25 |
-
android 11 emoji keyboard app<br />
|
26 |
-
android 11 emoji zip file download<br />
|
27 |
-
how to enable android 11 emojis on any device<br />
|
28 |
-
android 11 emoji preview images<br />
|
29 |
-
android 11 emoji beta testing<br />
|
30 |
-
android 11 emoji magisk module xda<br />
|
31 |
-
android 11 emoji font ttf download<br />
|
32 |
-
how to use android 11 emojis on instagram<br />
|
33 |
-
android 11 emoji flashable zip download<br />
|
34 |
-
android 11 emoji meaning and usage<br />
|
35 |
-
android 11 emoji review and feedback<br />
|
36 |
-
android 11 emoji compatibility with other platforms<br />
|
37 |
-
how to uninstall android 11 emojis from root<br />
|
38 |
-
android 11 emoji backup and restore guide<br />
|
39 |
-
android 11 emoji patch for older devices<br />
|
40 |
-
android 11 emoji font converter tool<br />
|
41 |
-
how to customize android 11 emojis with root<br />
|
42 |
-
android 11 emoji font changer app<br />
|
43 |
-
how to access android 11 emojis on telegram<br />
|
44 |
-
android 11 emoji font apk download<br />
|
45 |
-
how to fix android 11 emojis not showing up<br />
|
46 |
-
android 11 emoji font for windows download<br />
|
47 |
-
how to create your own android 11 emojis with root<br />
|
48 |
-
android 11 emoji font for mac download<br />
|
49 |
-
how to share android 11 emojis with non-root users<br />
|
50 |
-
android 11 emoji font for linux download<br />
|
51 |
-
how to update gboard with android 11 emojis <br />
|
52 |
-
best apps to enjoy android 11 emojis <br />
|
53 |
-
how to install fonts with new emojis on Android <br />
|
54 |
-
how to get the latest Android emojis without updating <br />
|
55 |
-
what are the most popular Android 11 emojis <br />
|
56 |
-
how to make your own stickers with Android 11 emojis <br />
|
57 |
-
how to use Android Studio to create custom emojis <br />
|
58 |
-
how to change the color of Android emojis <br />
|
59 |
-
how to get Android emojis on iPhone <br />
|
60 |
-
how to get iPhone emojis on Android</p>
|
61 |
-
<p>The new emojis also have a different style than the previous versions. They have reduced outlines, subtle shading, and more polished look. They also have some design changes that are inspired by popular designs from the past.</p>
|
62 |
-
<p>You can compare the new emojis with other versions and platforms in this <a href="(^9^)">table</a>.</p>
|
63 |
-
<p>Here are some of the most notable new emojis and their meanings:</p>
|
64 |
-
<ul>
|
65 |
-
<li><strong>\uD83E\uDD72 Smiling Face with Tear</strong>: This emoji shows a face with a single tear rolling down its cheek while smiling. It can be used to express gratitude, relief, happiness, or sadness.</li>
|
66 |
-
<li><strong>\uD83E\uDDD0 Disguised Face</strong>: This emoji shows a face with glasses, a fake nose, and a mustache. It can be used to indicate hiding, joking, or spying.</li>
|
67 |
-
<li><strong>\uD83E\uDD1F Pinched Fingers</strong>: This emoji shows a hand gesture that is commonly associated with Italian culture. It can be used to express emphasis, frustration, or disbelief.</li>
|
68 |
-
<li><strong>\uD83D\uDCF9 Boomerang</strong>: This emoji shows a curved wooden stick that is thrown and returns to the thrower. It can be used to symbolize coming back, repeating, or Australia.</li>
|
69 |
-
<li><strong>\uD83C\uDF75 Bubble Tea</strong>: This emoji shows a cup of tea with tapioca pearls at the bottom. It can be used to represent a popular Asian drink, sweetness, or milk tea.</li>
|
70 |
-
<li><strong>\uD83E\uDDA4 Dodo</strong>: This emoji shows a large flightless bird that is extinct. It can be used to refer to something that is outdated, rare, or extinct.</li>
|
71 |
-
<li><strong>\uD83E\uDDAD Seal</strong>: This emoji shows a marine mammal with flippers and whiskers. It can be used to represent cuteness, water animals, or clapping.</li>
|
72 |
-
<li><strong>\uD83E\uDEB0 Feather</strong>: This emoji shows a single feather from a bird. It can be used to symbolize lightness, softness, or writing.</li>
|
73 |
-
<li><strong>\uD83E\uDE82 Piñata</strong>: This emoji shows a colorful paper animal that is filled with candy and hit with a stick. It can be used to represent parties, celebrations, or fun.</li>
|
74 |
-
<li><strong>\uD83E\uDDDE Magic Wand</strong>: This emoji shows a thin stick with a star at the end. It can be used to indicate magic, wishes, or fantasy.</li>
|
75 |
-
<li><strong>\uD83E\uDE90 Nesting Dolls</strong>: This emoji shows a set of wooden dolls that fit inside each other. It can be used to represent Russian culture, layers, or surprises.</li>
|
76 |
-
<li><strong>\uD83E\uDEB2 Anatomical Heart</strong>: This emoji shows a realistic depiction of a human heart. It can be used to express health, love, or science.</li>
|
77 |
-
<li><strong>\uD83E\uDEB4 Lungs</strong>: This emoji shows a pair of human lungs. It can be used to represent breathing, health, or life.</li>
|
78 |
-
</ul>
|
79 |
-
<h2>Why are Android 11 Emojis Popular?</h2>
|
80 |
-
<p>Android 11 emojis are popular because they offer more ways for users to communicate and express themselves on the internet and through their devices. Emojis are not just simple pictures; they are also rich in meaning and context. They can convey emotions, attitudes, opinions, and intentions that words alone cannot capture.</p>
|
81 |
-
<p>Android 11 emojis are also popular because they reflect the diversity, inclusivity, and creativity of the users and the world around them. The new emojis include more options for gender and skin tone variations, as well as more representation for different cultures, lifestyles, and identities. The new emojis also allow users to create their own combinations and variations using Emoji Kitchen on Gboard or other keyboard apps.</p>
|
82 |
-
<p>Android 11 emojis are also popular because they have received positive reviews and feedback from users and experts alike. Many people have praised the new emojis for their improved design, quality, and consistency. Many people have also expressed their delight and excitement over the new emojis and how they can use them in their daily conversations and interactions.</p>
|
83 |
-
<h2>How to Get Android 11 Emojis on Your Phone?</h2>
|
84 |
-
<p>If you want to get Android 11 emojis on your phone, you have several options depending on your device model and software version. Here are some of the methods you can try:</p>
|
85 |
-
<h3>Update to the Latest Android Version</h3>
|
86 |
-
<p>The easiest way to get Android 11 emojis on your phone is to update your device to the latest Android version that supports them. This means you need to have Android 11.0 or higher on your phone. To check your current Android version and update it if possible, follow these steps:</p>
|
87 |
-
<ol>
|
88 |
-
<li>Open the Settings app on your phone.</li>
|
89 |
-
<li>Scroll down and tap on System.</li>
|
90 |
-
<li>Tap on Advanced.</li>
|
91 |
-
<li>Tap on System Update.</li>
|
92 |
-
<li>Check if there is an available update for your device. If there is one, tap on Download and Install.</li>
|
93 |
-
<li>Wait for the update to finish and restart your phone.</li>
|
94 |
-
</ol>
|
95 |
-
<p>After updating your device, you should be able to see and use the new emojis on your default keyboard app and any app that supports them.</p>
|
96 |
-
<h3>Use Emoji Kitchen on Gboard</h3>
|
97 |
-
<p>If you don't have Android 11.0 or higher on your phone, you can still use some of the new emojis by using Emoji Kitchen on Gboard, Google's official keyboard app. Emoji Kitchen is a feature that lets you create emoji mashups by combining two emojis. For example, you can combine \uD83D\uDE0A Smiling Face and \uD83E\uDD72 Smiling Face with Tear to get \uD83E\uDD72\uD83D\uDE0A Smiling Face with Tear and Smiling Face.</p>
|
98 |
-
<p>To use Emoji Kitchen on Gboard, follow these steps:</p>
|
99 |
-
<ol>
|
100 |
-
<li>Download and install Gboard from the <a href="">Google Play Store</a> if you don't have it already.</li>
|
101 |
-
<li>Open the Settings app on your phone and tap on System.</li>
|
102 |
-
<li>Tap on Languages & input.</li>
|
103 |
-
<li>Tap on Virtual keyboard.</li>
|
104 |
-
<li>Tap on Manage keyboards.</li>
|
105 |
-
<li>Enable Gboard as your default keyboard app.</li>
|
106 |
-
<li>Open any app that supports emojis and tap on the text field.</li>
|
107 |
-
<li>Tap on the emoji icon on the bottom left corner of the keyboard.</li>
|
108 |
-
<li>Select any emoji that has a small dot on the bottom right corner. This means that it has a mashup option.</li>
|
109 |
-
<li>You will see a list of emoji mashups that you can choose from. Tap on any one that you like and it will be inserted into the text field.</li>
|
110 |
-
</ol>
|
111 |
-
<p>You can also create your own emoji mashups by tapping on two emojis in a row. For example, you can tap on \uD83D\uDE0A Smiling Face and then \uD83E\uDD72 Smiling Face with Tear to get \uD83E\uDD72\uD83D\uDE0A Smiling Face with Tear and Smiling Face.</p>
|
112 |
-
<p>Note that not all emojis have mashup options and not all apps support emoji mashups. You can check the compatibility of your apps with this <a href="">list</a>.</p>
|
113 |
-
<h3>Install a New Keyboard App</h3>
|
114 |
-
<p>Another way to get Android 11 emojis on your phone is to install a new keyboard app that has them. There are many keyboard apps that offer different emoji styles and features, such as SwiftKey, Fleksy, TouchPal, and more. You can find them on the Google Play Store or other sources.</p>
|
115 |
-
<p>To install a new keyboard app, follow these steps:</p>
|
116 |
-
<ol>
|
117 |
-
<li>Download and install your preferred keyboard app from the Google Play Store or other sources.</li>
|
118 |
-
<li>Open the Settings app on your phone and tap on System.</li>
|
119 |
-
<li>Tap on Languages & input.</li>
|
120 |
-
<li>Tap on Virtual keyboard.</li>
|
121 |
-
<li>Tap on Manage keyboards.</li>
|
122 |
-
<li>Enable your new keyboard app as your default keyboard app.</li>
|
123 |
-
<li>Open any app that supports emojis and tap on the text field.</li>
|
124 |
-
<li>Tap on the emoji icon on the bottom left corner of the keyboard or wherever it is located in your new keyboard app.</li>
|
125 |
-
<li>Select any emoji that you want to use and it will be inserted into the text field.</li>
|
126 |
-
</ol>
|
127 |
-
<p>Note that some keyboard apps may not have all the new emojis or may have different designs for them. You can check the description and reviews of the keyboard apps before installing them to see what they offer.</p>
|
128 |
-
<h3>Bonus Method: Install the New Emojis on Older Android Versions with Root</h3>
|
129 |
-
<p>If you have an older Android version that cannot be updated to Android 11.0 or higher, and you have root access to your device, you can still install the new emojis using a Magisk module. Magisk is a tool that allows you to modify your system without affecting its integrity. A Magisk module is a package that contains modifications for your system, such as fonts, icons, sounds, etc.</p>
|
130 |
-
<p>To install the new emojis using a Magisk module, follow these steps:</p>
|
131 |
-
<ol>
|
132 |
-
<li>Download and install Magisk from its <a href="">official website</a> if you don't have it already. Make sure you follow the instructions carefully and backup your data before proceeding.</li>
|
133 |
-
<li>Download the Magisk module for Android 11 emojis from this <a href="">link</a>.</li>
|
134 |
-
<li>Open Magisk Manager app on your phone and tap on the menu icon on the top left corner of the screen.</li>
|
135 |
-
<li>Tap on Modules.</li>
|
136 |
-
<li>Tap on the plus icon on the bottom of the screen and select the Magisk module file that you downloaded.</li>
|
137 |
-
<li>Wait for the installation to finish and reboot your phone.</li>
|
138 |
-
</ol>
|
139 |
-
<p>After rebooting your device, you should be able to see and use the new emojis on your default keyboard app and any app that supports them.</p>
|
140 |
-
<p>Note that this method requires root access to your device, which may void your warranty, expose your device to security risks, or cause instability issues. Proceed at your own risk and responsibility.</p>
|
141 |
-
<h1>Conclusion</h1>
|
142 |
-
<p>Android 11 emojis are the latest and cutest emojis that you can get on your phone. They are based on Unicode's Emoji 13.0 recommendations and have a new style and design. They also include more options for diversity, inclusivity, and creativity.</p>
|
143 |
-
<p>You can get Android 11 emojis on your phone by updating to the latest Android version, using Emoji Kitchen on Gboard, installing a new keyboard app, or installing a Magisk module if you have root access. Each method has its own advantages and disadvantages, so choose the one that suits your needs and preferences.</p>
|
144 |
-
<p>We hope this article has helped you learn more about Android 11 emojis and how to get them on your phone. Now go ahead and try them out and have fun with them!</p>
|
145 |
-
<h2>FAQs</h2>
|
146 |
-
<p>Here are some of the frequently asked questions and answers about Android 11 emojis:</p>
|
147 |
-
<h3>Q: How many new emojis are there in Android 11?</h3>
|
148 |
-
<p>A: There are 117 new emojis in Android 11, which are part of Unicode's Emoji 13.0 recommendations. These include 62 new emoji characters and 55 new gender and skin tone variants.</p>
|
149 |
-
<h3>Q: What are some of the best apps to use Android 11 emojis?</h3>
|
150 |
-
<p>A: Some of the best apps to use Android 11 emojis are WhatsApp, Telegram, Instagram, Twitter, Facebook, Snapchat, TikTok, and Gmail. These apps support the new emojis and allow you to send and receive them with ease.</p>
|
151 |
-
<h3>Q: How can I see Android 11 emojis on other devices or platforms?</h3>
|
152 |
-
<p>A: You can see Android 11 emojis on other devices or platforms by using a compatible app or browser that supports them. For example, you can use Chrome, Firefox, or Edge browsers to view Android 11 emojis on websites. You can also use Emojipedia or other online tools to view Android 11 emojis on any device or platform.</p>
|
153 |
-
<h3>Q: How can I customize Android 11 emojis?</h3>
|
154 |
-
<p>A: You can customize Android 11 emojis by using Emoji Kitchen on Gboard or other keyboard apps that allow you to create emoji mashups. You can also use emoji stickers, filters, or effects on some apps to enhance your emoji experience.</p>
|
155 |
-
<h3>Q: How can I delete or disable Android 11 emojis?</h3>
|
156 |
-
<p>A: You can delete or disable Android 11 emojis by uninstalling or disabling the keyboard app that has them. You can also revert to an older Android version that does not have them if you have root access. However, we do not recommend doing this as it may cause compatibility or security issues.</p> 401be4b1e0<br />
|
157 |
-
<br />
|
158 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/ui/codeblock.tsx
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import { FC, memo } from 'react'
|
4 |
-
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'
|
5 |
-
import { coldarkDark } from 'react-syntax-highlighter/dist/cjs/styles/prism'
|
6 |
-
|
7 |
-
import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
|
8 |
-
import { IconCheck, IconCopy, IconDownload } from '@/components/ui/icons'
|
9 |
-
import { Button } from '@/components/ui/button'
|
10 |
-
|
11 |
-
interface Props {
|
12 |
-
language: string
|
13 |
-
value: string
|
14 |
-
}
|
15 |
-
|
16 |
-
interface languageMap {
|
17 |
-
[key: string]: string | undefined
|
18 |
-
}
|
19 |
-
|
20 |
-
export const programmingLanguages: languageMap = {
|
21 |
-
javascript: '.js',
|
22 |
-
python: '.py',
|
23 |
-
java: '.java',
|
24 |
-
c: '.c',
|
25 |
-
cpp: '.cpp',
|
26 |
-
'c++': '.cpp',
|
27 |
-
'c#': '.cs',
|
28 |
-
ruby: '.rb',
|
29 |
-
php: '.php',
|
30 |
-
swift: '.swift',
|
31 |
-
'objective-c': '.m',
|
32 |
-
kotlin: '.kt',
|
33 |
-
typescript: '.ts',
|
34 |
-
go: '.go',
|
35 |
-
perl: '.pl',
|
36 |
-
rust: '.rs',
|
37 |
-
scala: '.scala',
|
38 |
-
haskell: '.hs',
|
39 |
-
lua: '.lua',
|
40 |
-
shell: '.sh',
|
41 |
-
sql: '.sql',
|
42 |
-
html: '.html',
|
43 |
-
css: '.css'
|
44 |
-
// add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component
|
45 |
-
}
|
46 |
-
|
47 |
-
export const generateRandomString = (length: number, lowercase = false) => {
|
48 |
-
const chars = 'ABCDEFGHJKLMNPQRSTUVWXY3456789' // excluding similar looking characters like Z, 2, I, 1, O, 0
|
49 |
-
let result = ''
|
50 |
-
for (let i = 0; i < length; i++) {
|
51 |
-
result += chars.charAt(Math.floor(Math.random() * chars.length))
|
52 |
-
}
|
53 |
-
return lowercase ? result.toLowerCase() : result
|
54 |
-
}
|
55 |
-
|
56 |
-
const CodeBlock: FC<Props> = memo(({ language, value }) => {
|
57 |
-
const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
|
58 |
-
|
59 |
-
const downloadAsFile = () => {
|
60 |
-
if (typeof window === 'undefined') {
|
61 |
-
return
|
62 |
-
}
|
63 |
-
const fileExtension = programmingLanguages[language] || '.file'
|
64 |
-
const suggestedFileName = `file-${generateRandomString(
|
65 |
-
3,
|
66 |
-
true
|
67 |
-
)}${fileExtension}`
|
68 |
-
const fileName = window.prompt('Enter file name' || '', suggestedFileName)
|
69 |
-
|
70 |
-
if (!fileName) {
|
71 |
-
// User pressed cancel on prompt.
|
72 |
-
return
|
73 |
-
}
|
74 |
-
|
75 |
-
const blob = new Blob([value], { type: 'text/plain' })
|
76 |
-
const url = URL.createObjectURL(blob)
|
77 |
-
const link = document.createElement('a')
|
78 |
-
link.download = fileName
|
79 |
-
link.href = url
|
80 |
-
link.style.display = 'none'
|
81 |
-
document.body.appendChild(link)
|
82 |
-
link.click()
|
83 |
-
document.body.removeChild(link)
|
84 |
-
URL.revokeObjectURL(url)
|
85 |
-
}
|
86 |
-
|
87 |
-
const onCopy = () => {
|
88 |
-
if (isCopied) return
|
89 |
-
copyToClipboard(value)
|
90 |
-
}
|
91 |
-
|
92 |
-
return (
|
93 |
-
<div className="codeblock relative w-full bg-zinc-950 font-sans">
|
94 |
-
<div className="flex w-full items-center justify-between bg-zinc-800 px-6 py-2 pr-4 text-zinc-100">
|
95 |
-
<span className="text-xs lowercase">{language}</span>
|
96 |
-
<div className="flex items-center space-x-1">
|
97 |
-
<Button
|
98 |
-
variant="ghost"
|
99 |
-
className="hover:bg-zinc-800 focus-visible:ring-1 focus-visible:ring-slate-700 focus-visible:ring-offset-0"
|
100 |
-
onClick={downloadAsFile}
|
101 |
-
size="icon"
|
102 |
-
>
|
103 |
-
<IconDownload />
|
104 |
-
<span className="sr-only">Download</span>
|
105 |
-
</Button>
|
106 |
-
<Button
|
107 |
-
variant="ghost"
|
108 |
-
size="icon"
|
109 |
-
className="text-xs hover:bg-zinc-800 focus-visible:ring-1 focus-visible:ring-slate-700 focus-visible:ring-offset-0"
|
110 |
-
onClick={onCopy}
|
111 |
-
>
|
112 |
-
{isCopied ? <IconCheck /> : <IconCopy />}
|
113 |
-
<span className="sr-only">Copy code</span>
|
114 |
-
</Button>
|
115 |
-
</div>
|
116 |
-
</div>
|
117 |
-
<SyntaxHighlighter
|
118 |
-
language={language}
|
119 |
-
style={coldarkDark}
|
120 |
-
PreTag="div"
|
121 |
-
showLineNumbers
|
122 |
-
customStyle={{
|
123 |
-
margin: 0,
|
124 |
-
width: '100%',
|
125 |
-
background: 'transparent',
|
126 |
-
padding: '1.5rem 1rem'
|
127 |
-
}}
|
128 |
-
codeTagProps={{
|
129 |
-
style: {
|
130 |
-
fontSize: '0.9rem',
|
131 |
-
fontFamily: 'var(--font-mono)'
|
132 |
-
}
|
133 |
-
}}
|
134 |
-
>
|
135 |
-
{value}
|
136 |
-
</SyntaxHighlighter>
|
137 |
-
</div>
|
138 |
-
)
|
139 |
-
})
|
140 |
-
CodeBlock.displayName = 'CodeBlock'
|
141 |
-
|
142 |
-
export { CodeBlock }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/pages/api/create.ts
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
'use server'
|
2 |
-
|
3 |
-
import { NextApiRequest, NextApiResponse } from 'next'
|
4 |
-
import { fetch, debug } from '@/lib/isomorphic'
|
5 |
-
import { createHeaders } from '@/lib/utils'
|
6 |
-
|
7 |
-
// const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create'
|
8 |
-
const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create';
|
9 |
-
|
10 |
-
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
11 |
-
try {
|
12 |
-
const headers = createHeaders(req.cookies)
|
13 |
-
|
14 |
-
res.writeHead(200, {
|
15 |
-
'Content-Type': 'application/json',
|
16 |
-
})
|
17 |
-
|
18 |
-
debug('headers', headers)
|
19 |
-
const response = await fetch(API_ENDPOINT, { method: 'GET', headers })
|
20 |
-
.then((res) => res.text())
|
21 |
-
|
22 |
-
res.end(response)
|
23 |
-
} catch (e) {
|
24 |
-
return res.end(JSON.stringify({
|
25 |
-
result: {
|
26 |
-
value: 'UnauthorizedRequest',
|
27 |
-
message: `${e}`
|
28 |
-
}
|
29 |
-
}))
|
30 |
-
}
|
31 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/adversarial/discriminators/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
# flake8: noqa
|
8 |
-
from .mpd import MultiPeriodDiscriminator
|
9 |
-
from .msd import MultiScaleDiscriminator
|
10 |
-
from .msstftd import MultiScaleSTFTDiscriminator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIDHD/audio-video-transcriber/app.py
DELETED
@@ -1,388 +0,0 @@
|
|
1 |
-
from __future__ import unicode_literals
|
2 |
-
import youtube_dl
|
3 |
-
import yt_dlp
|
4 |
-
from pydub import AudioSegment
|
5 |
-
from pyannote.audio import Pipeline
|
6 |
-
import re
|
7 |
-
import whisper
|
8 |
-
import os
|
9 |
-
import ffmpeg
|
10 |
-
import subprocess
|
11 |
-
import gradio as gr
|
12 |
-
import traceback
|
13 |
-
import json
|
14 |
-
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token="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")
|
15 |
-
from pydub.effects import speedup
|
16 |
-
import moviepy.editor as mp
|
17 |
-
import datetime
|
18 |
-
import torch
|
19 |
-
import pyannote.audio
|
20 |
-
from pyannote.audio.pipelines.speaker_verification import SpeechBrainPretrainedSpeakerEmbedding #PyannoteAudioPretrainedSpeakerEmbedding
|
21 |
-
from pyannote.audio import Audio
|
22 |
-
from pyannote.core import Segment
|
23 |
-
import wave
|
24 |
-
import contextlib
|
25 |
-
from sklearn.cluster import AgglomerativeClustering
|
26 |
-
import numpy as np
|
27 |
-
import json
|
28 |
-
from datetime import timedelta
|
29 |
-
|
30 |
-
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
31 |
-
|
32 |
-
__FILES = set()
|
33 |
-
wispher_models = list(whisper._MODELS.keys())
|
34 |
-
|
35 |
-
def correct_grammar(input_text,num_return_sequences=1):
|
36 |
-
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
37 |
-
tokenizer = T5Tokenizer.from_pretrained('deep-learning-analytics/GrammarCorrector')
|
38 |
-
model = T5ForConditionalGeneration.from_pretrained('deep-learning-analytics/GrammarCorrector').to(torch_device)
|
39 |
-
batch = tokenizer([input_text],truncation=True,padding='max_length',max_length=len(input_text), return_tensors="pt").to(torch_device)
|
40 |
-
results = model.generate(**batch,max_length=len(input_text),num_beams=2, num_return_sequences=num_return_sequences, temperature=1.5)
|
41 |
-
generated_sequences = []
|
42 |
-
for generated_sequence_idx, generated_sequence in enumerate(results):
|
43 |
-
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True, skip_special_tokens=True)
|
44 |
-
generated_sequences.append(text)
|
45 |
-
generated_text = "".join(generated_sequences)
|
46 |
-
_generated_text = ""
|
47 |
-
for idx, _sentence in enumerate(generated_text.split('.'), 0):
|
48 |
-
if not idx:
|
49 |
-
_generated_text+=_sentence+'.'
|
50 |
-
elif _sentence[:1]!=' ':
|
51 |
-
_generated_text+=' '+_sentence+'.'
|
52 |
-
elif _sentence[:1]=='':
|
53 |
-
pass
|
54 |
-
else:
|
55 |
-
_generated_text+=_sentence+'.'
|
56 |
-
return _generated_text
|
57 |
-
|
58 |
-
def CreateFile(filename):
|
59 |
-
__FILES.add(filename)
|
60 |
-
return filename
|
61 |
-
|
62 |
-
def RemoveFile(filename):
|
63 |
-
if (os.path.isfile(filename)):
|
64 |
-
os.remove(filename)
|
65 |
-
|
66 |
-
def RemoveAllFiles():
|
67 |
-
for file in __FILES:
|
68 |
-
if (os.path.isfile(file)):
|
69 |
-
os.remove(file)
|
70 |
-
|
71 |
-
def Transcribe_V1(NumberOfSpeakers, SpeakerNames="", audio="temp_audio.wav"):
|
72 |
-
SPEAKER_DICT = {}
|
73 |
-
SPEAKERS = [speaker.strip() for speaker in SpeakerNames.split(',') if len(speaker)]
|
74 |
-
|
75 |
-
def GetSpeaker(sp):
|
76 |
-
speaker = sp
|
77 |
-
if sp not in list(SPEAKER_DICT.keys()):
|
78 |
-
if len(SPEAKERS):
|
79 |
-
t = SPEAKERS.pop(0)
|
80 |
-
SPEAKER_DICT[sp] = t
|
81 |
-
speaker = SPEAKER_DICT[sp]
|
82 |
-
else:
|
83 |
-
speaker = SPEAKER_DICT[sp]
|
84 |
-
return speaker
|
85 |
-
|
86 |
-
def millisec(timeStr):
|
87 |
-
spl = timeStr.split(":")
|
88 |
-
s = (int)((int(spl[0]) * 60 * 60 + int(spl[1]) * 60 + float(spl[2]) )* 1000)
|
89 |
-
return s
|
90 |
-
|
91 |
-
def preprocess(audio):
|
92 |
-
t1 = 0 * 1000
|
93 |
-
t2 = 20 * 60 * 1000
|
94 |
-
newAudio = AudioSegment.from_wav(audio)
|
95 |
-
a = newAudio[t1:t2]
|
96 |
-
spacermilli = 2000
|
97 |
-
spacer = AudioSegment.silent(duration=spacermilli)
|
98 |
-
newAudio = spacer.append(a, crossfade=0)
|
99 |
-
newAudio.export(audio, format="wav")
|
100 |
-
return spacermilli, spacer
|
101 |
-
|
102 |
-
def diarization(audio):
|
103 |
-
as_audio = AudioSegment.from_wav(audio)
|
104 |
-
DEMO_FILE = {'uri': 'blabal', 'audio': audio}
|
105 |
-
if NumberOfSpeakers:
|
106 |
-
dz = pipeline(DEMO_FILE, num_speakers=NumberOfSpeakers)
|
107 |
-
else:
|
108 |
-
dz = pipeline(DEMO_FILE)
|
109 |
-
with open(CreateFile(f"diarization_{audio}.txt"), "w") as text_file:
|
110 |
-
text_file.write(str(dz))
|
111 |
-
dz = open(CreateFile(f"diarization_{audio}.txt")).read().splitlines()
|
112 |
-
dzList = []
|
113 |
-
for l in dz:
|
114 |
-
start, end = tuple(re.findall('[0-9]+:[0-9]+:[0-9]+\.[0-9]+', string=l))
|
115 |
-
start = millisec(start)
|
116 |
-
end = millisec(end)
|
117 |
-
lex = GetSpeaker(re.findall('(SPEAKER_[0-9][0-9])', string=l)[0])
|
118 |
-
dzList.append([start, end, lex])
|
119 |
-
sounds = spacer
|
120 |
-
segments = []
|
121 |
-
dz = open(CreateFile(f"diarization_{audio}.txt")).read().splitlines()
|
122 |
-
for l in dz:
|
123 |
-
start, end = tuple(re.findall('[0-9]+:[0-9]+:[0-9]+\.[0-9]+', string=l))
|
124 |
-
start = millisec(start)
|
125 |
-
end = millisec(end)
|
126 |
-
segments.append(len(sounds))
|
127 |
-
sounds = sounds.append(as_audio[start:end], crossfade=0)
|
128 |
-
sounds = sounds.append(spacer, crossfade=0)
|
129 |
-
sounds.export(CreateFile(f"dz_{audio}.wav"), format="wav")
|
130 |
-
return f"dz_{audio}.wav", dzList, segments
|
131 |
-
|
132 |
-
def transcribe(dz_audio):
|
133 |
-
model = whisper.load_model("medium")
|
134 |
-
result = model.transcribe(dz_audio)
|
135 |
-
# for _ in result['segments']:
|
136 |
-
# print(_['start'], _['end'], _['text'])
|
137 |
-
captions = [[((caption["start"]*1000)), ((caption["end"]*1000)), caption["text"]] for caption in result['segments']]
|
138 |
-
conversation = []
|
139 |
-
for i in range(len(segments)):
|
140 |
-
idx = 0
|
141 |
-
for idx in range(len(captions)):
|
142 |
-
if captions[idx][0] >= (segments[i] - spacermilli):
|
143 |
-
break;
|
144 |
-
|
145 |
-
while (idx < (len(captions))) and ((i == len(segments) - 1) or (captions[idx][1] < segments[i+1])):
|
146 |
-
c = captions[idx]
|
147 |
-
start = dzList[i][0] + (c[0] -segments[i])
|
148 |
-
if start < 0:
|
149 |
-
start = 0
|
150 |
-
idx += 1
|
151 |
-
if not len(conversation):
|
152 |
-
conversation.append([dzList[i][2], c[2]])
|
153 |
-
elif conversation[-1][0] == dzList[i][2]:
|
154 |
-
conversation[-1][1] += c[2]
|
155 |
-
else:
|
156 |
-
conversation.append([dzList[i][2], c[2]])
|
157 |
-
#print(f"[{dzList[i][2]}] {c[2]}")
|
158 |
-
return conversation, ("".join([f"{speaker} --> {text}\n" for speaker, text in conversation]))
|
159 |
-
|
160 |
-
spacermilli, spacer = preprocess(audio)
|
161 |
-
dz_audio, dzList, segments = diarization(audio)
|
162 |
-
conversation, t_text = transcribe(dz_audio)
|
163 |
-
RemoveAllFiles()
|
164 |
-
return (t_text, ({ "data": [{"speaker": speaker, "text": text} for speaker, text in conversation]}))
|
165 |
-
|
166 |
-
|
167 |
-
def Transcribe_V2(model, num_speakers, speaker_names, audio="temp_audio.wav"):
|
168 |
-
model = whisper.load_model(model)
|
169 |
-
# embedding_model = SpeechBrainPretrainedSpeakerEmbedding("speechbrain/spkrec-ecapa-voxceleb")
|
170 |
-
|
171 |
-
embedding_model = SpeechBrainPretrainedSpeakerEmbedding(
|
172 |
-
"speechbrain/spkrec-ecapa-voxceleb",
|
173 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
174 |
-
)
|
175 |
-
SPEAKER_DICT = {}
|
176 |
-
default_speaker_names = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
|
177 |
-
SPEAKERS = [speaker.strip() for speaker in speaker_names.split(',') if len(speaker)]
|
178 |
-
def GetSpeaker(sp):
|
179 |
-
speaker = sp
|
180 |
-
if sp not in list(SPEAKER_DICT.keys()):
|
181 |
-
if len(SPEAKERS):
|
182 |
-
t = SPEAKERS.pop(0)
|
183 |
-
SPEAKER_DICT[sp] = t
|
184 |
-
speaker = SPEAKER_DICT[sp]
|
185 |
-
elif len(default_speaker_names):
|
186 |
-
t = default_speaker_names.pop(0)
|
187 |
-
SPEAKER_DICT[sp] = t
|
188 |
-
speaker = SPEAKER_DICT[sp]
|
189 |
-
else:
|
190 |
-
speaker = SPEAKER_DICT[sp]
|
191 |
-
return speaker
|
192 |
-
|
193 |
-
# audio = Audio()
|
194 |
-
def diarization(audio):
|
195 |
-
def millisec(timeStr):
|
196 |
-
spl = timeStr.split(":")
|
197 |
-
s = (int)((int(spl[0]) * 60 * 60 + int(spl[1]) * 60 + float(spl[2]) )* 1000)
|
198 |
-
return s
|
199 |
-
as_audio = AudioSegment.from_wav(audio)
|
200 |
-
DEMO_FILE = {'uri': 'blabal', 'audio': audio}
|
201 |
-
hparams = pipeline.parameters(instantiated=True)
|
202 |
-
hparams["segmentation"]["min_duration_off"] -= 0.25
|
203 |
-
pipeline.instantiate(hparams)
|
204 |
-
if num_speakers:
|
205 |
-
dz = pipeline(DEMO_FILE, num_speakers=num_speakers)
|
206 |
-
else:
|
207 |
-
dz = pipeline(DEMO_FILE)
|
208 |
-
with open(CreateFile(f"diarization_{audio}.txt"), "w") as text_file:
|
209 |
-
text_file.write(str(dz))
|
210 |
-
dz = open(CreateFile(f"diarization_{audio}.txt")).read().splitlines()
|
211 |
-
print(dz)
|
212 |
-
dzList = []
|
213 |
-
for l in dz:
|
214 |
-
start, end = tuple(re.findall('[0-9]+:[0-9]+:[0-9]+\.[0-9]+', string=l))
|
215 |
-
start = millisec(start)
|
216 |
-
end = millisec(end)
|
217 |
-
lex = GetSpeaker(re.findall('(SPEAKER_[0-9][0-9])', string=l)[0])
|
218 |
-
dzList.append([start, end, lex])
|
219 |
-
return dzList
|
220 |
-
|
221 |
-
def get_output(segments):
|
222 |
-
# print(segments)
|
223 |
-
conversation=[]
|
224 |
-
for (i, segment) in enumerate(segments):
|
225 |
-
# print(f"{i}, {segment["speaker"]}, {segments[i - 1]["speaker"]}, {}")
|
226 |
-
if not len(conversation):
|
227 |
-
conversation.append([str(timedelta(seconds=float(segment['start']))),str(timedelta(seconds=float(segment['end']))),GetSpeaker(segment["speaker"]), segment["text"].lstrip()])
|
228 |
-
elif conversation[-1][2] == GetSpeaker(segment["speaker"]):
|
229 |
-
conversation[-1][3] += segment["text"].lstrip()
|
230 |
-
else:
|
231 |
-
conversation.append([str(timedelta(seconds=float(segment['start']))),str(timedelta(seconds=float(segment['end']))),GetSpeaker(segment["speaker"]), segment["text"].lstrip()])
|
232 |
-
# if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]:
|
233 |
-
# if i != 0:
|
234 |
-
# conversation.append([GetSpeaker(segment["speaker"]), segment["text"][1:]]) # segment["speaker"] + ' ' + str(time(segment["start"])) + '\n\n'
|
235 |
-
# conversation[-1][1] += segment["text"][1:]
|
236 |
-
# return output
|
237 |
-
for idx in range(len(conversation)):
|
238 |
-
conversation[idx][3] = correct_grammar(conversation[idx][3])
|
239 |
-
return ("".join([f"[{start}] - {speaker} \n{text}\n" for start, end, speaker, text in conversation])), ({ "data": [{"start": start, "end":end, "speaker": speaker, "text": text} for start, end, speaker, text in conversation]})
|
240 |
-
|
241 |
-
def get_duration(path):
|
242 |
-
with contextlib.closing(wave.open(path,'r')) as f:
|
243 |
-
frames = f.getnframes()
|
244 |
-
rate = f.getframerate()
|
245 |
-
return frames / float(rate)
|
246 |
-
|
247 |
-
def make_embeddings(path, segments, duration):
|
248 |
-
embeddings = np.zeros(shape=(len(segments), 192))
|
249 |
-
for i, segment in enumerate(segments):
|
250 |
-
embeddings[i] = segment_embedding(path, segment, duration)
|
251 |
-
return np.nan_to_num(embeddings)
|
252 |
-
|
253 |
-
def segment_embedding(path, segment, duration):
|
254 |
-
start = segment["start"]
|
255 |
-
# Whisper overshoots the end timestamp in the last segment
|
256 |
-
end = min(duration, segment["end"])
|
257 |
-
clip = Segment(start, end)
|
258 |
-
waveform, sample_rate = Audio().crop(path, clip)
|
259 |
-
return embedding_model(waveform[None])
|
260 |
-
|
261 |
-
def add_speaker_labels(segments, embeddings, num_speakers):
|
262 |
-
clustering = AgglomerativeClustering(num_speakers).fit(embeddings)
|
263 |
-
labels = clustering.labels_
|
264 |
-
for i in range(len(segments)):
|
265 |
-
segments[i]["speaker"] = 'SPEAKER ' + str(labels[i] + 1)
|
266 |
-
|
267 |
-
def time(secs):
|
268 |
-
return datetime.timedelta(seconds=round(secs))
|
269 |
-
|
270 |
-
duration = get_duration(audio)
|
271 |
-
if duration > 4 * 60 * 60:
|
272 |
-
return "Audio duration too long"
|
273 |
-
|
274 |
-
# print(json.dumps(diarization(audio)))
|
275 |
-
result = model.transcribe(audio)
|
276 |
-
# print(json.dumps(result))
|
277 |
-
|
278 |
-
segments = result["segments"]
|
279 |
-
|
280 |
-
num_speakers = min(max(round(num_speakers), 1), len(segments))
|
281 |
-
if len(segments) == 1:
|
282 |
-
segments[0]['speaker'] = 'SPEAKER 1'
|
283 |
-
else:
|
284 |
-
embeddings = make_embeddings(audio, segments, duration)
|
285 |
-
add_speaker_labels(segments, embeddings, num_speakers)
|
286 |
-
return get_output(segments)
|
287 |
-
# return output
|
288 |
-
|
289 |
-
def AudioTranscribe(NumberOfSpeakers=None, SpeakerNames="", audio="", retries=5, model='base'):
|
290 |
-
print(f"{NumberOfSpeakers}, {SpeakerNames}, {retries}")
|
291 |
-
if retries:
|
292 |
-
# subprocess.call(['ffmpeg', '-i', audio,'temp_audio.wav'])
|
293 |
-
try:
|
294 |
-
subprocess.call(['ffmpeg', '-i', audio,'temp_audio.wav'])
|
295 |
-
except Exception as ex:
|
296 |
-
traceback.print_exc()
|
297 |
-
return AudioTranscribe(NumberOfSpeakers, SpeakerNames, audio, retries-1)
|
298 |
-
if not (os.path.isfile("temp_audio.wav")):
|
299 |
-
return AudioTranscribe(NumberOfSpeakers, SpeakerNames, audio, retries-1)
|
300 |
-
return Transcribe_V2(model, NumberOfSpeakers, SpeakerNames)
|
301 |
-
else:
|
302 |
-
raise gr.Error("There is some issue ith Audio Transcriber. Please try again later!")
|
303 |
-
|
304 |
-
def VideoTranscribe(NumberOfSpeakers=None, SpeakerNames="", video="", retries=5, model='base'):
|
305 |
-
if retries:
|
306 |
-
try:
|
307 |
-
clip = mp.VideoFileClip(video)
|
308 |
-
clip.audio.write_audiofile("temp_audio.wav")
|
309 |
-
# command = f"ffmpeg -i {video} -ab 160k -ac 2 -ar 44100 -vn temp_audio.wav"
|
310 |
-
# subprocess.call(command, shell=True)
|
311 |
-
except Exception as ex:
|
312 |
-
traceback.print_exc()
|
313 |
-
return VideoTranscribe(NumberOfSpeakers, SpeakerNames, video, retries-1)
|
314 |
-
if not (os.path.isfile("temp_audio.wav")):
|
315 |
-
return VideoTranscribe(NumberOfSpeakers, SpeakerNames, video, retries-1)
|
316 |
-
return Transcribe_V2(model, NumberOfSpeakers, SpeakerNames)
|
317 |
-
else:
|
318 |
-
raise gr.Error("There is some issue ith Video Transcriber. Please try again later!")
|
319 |
-
|
320 |
-
def YoutubeTranscribe(NumberOfSpeakers=None, SpeakerNames="", URL="", retries = 5, model='base'):
|
321 |
-
if retries:
|
322 |
-
if "youtu" not in URL.lower():
|
323 |
-
raise gr.Error(f"{URL} is not a valid youtube URL.")
|
324 |
-
else:
|
325 |
-
RemoveFile("temp_audio.wav")
|
326 |
-
ydl_opts = {
|
327 |
-
'format': 'bestaudio/best',
|
328 |
-
'outtmpl': 'temp_audio.%(ext)s',
|
329 |
-
'postprocessors': [{
|
330 |
-
'key': 'FFmpegExtractAudio',
|
331 |
-
'preferredcodec': 'wav',
|
332 |
-
}],
|
333 |
-
}
|
334 |
-
try:
|
335 |
-
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
336 |
-
ydl.download([URL])
|
337 |
-
except:
|
338 |
-
return YoutubeTranscribe(NumberOfSpeakers, SpeakerNames, URL, retries-1)
|
339 |
-
stream = ffmpeg.input('temp_audio.m4a')
|
340 |
-
stream = ffmpeg.output(stream, 'temp_audio.wav')
|
341 |
-
RemoveFile("temp_audio.m4a")
|
342 |
-
return Transcribe_V2(model, NumberOfSpeakers, SpeakerNames)
|
343 |
-
else:
|
344 |
-
raise gr.Error(f"Unable to get video from {URL}")
|
345 |
-
|
346 |
-
|
347 |
-
with gr.Blocks() as yav_ui:
|
348 |
-
with gr.Row():
|
349 |
-
with gr.Column():
|
350 |
-
with gr.Tab("Youtube", id=1):
|
351 |
-
ysz = gr.Dropdown(label="Model Size", choices=wispher_models , value='base')
|
352 |
-
yinput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
353 |
-
yinput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
354 |
-
yinput = gr.Textbox(label="Youtube Link", placeholder="https://www.youtube.com/watch?v=GECcjrYHH8w")
|
355 |
-
ybutton_transcribe = gr.Button("Transcribe", show_progress=True, scroll_to_output=True)
|
356 |
-
with gr.Tab("Video", id=2):
|
357 |
-
vsz = gr.Dropdown(label="Model Size", choices=wispher_models, value='base')
|
358 |
-
vinput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
359 |
-
vinput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
360 |
-
vinput = gr.Video(label="Video")
|
361 |
-
vbutton_transcribe = gr.Button("Transcribe", show_progress=True, scroll_to_output=True)
|
362 |
-
with gr.Tab("Audio", id=3):
|
363 |
-
asz = gr.Dropdown(label="Model Size", choices=wispher_models , value='base')
|
364 |
-
ainput_nos = gr.Number(label="Number of Speakers", placeholder="2")
|
365 |
-
ainput_sn = gr.Textbox(label="Name of the Speakers (ordered by the time they speak and separated by comma)", placeholder="If Speaker 1 is first to speak followed by Speaker 2 then -> Speaker 1, Speaker 2")
|
366 |
-
ainput = gr.Audio(label="Audio", type="filepath")
|
367 |
-
abutton_transcribe = gr.Button("Transcribe", show_progress=True, scroll_to_output=True)
|
368 |
-
with gr.Column():
|
369 |
-
with gr.Tab("Text"):
|
370 |
-
output_textbox = gr.Textbox(label="Transcribed Text", lines=15)
|
371 |
-
with gr.Tab("JSON"):
|
372 |
-
output_json = gr.JSON(label="Transcribed JSON")
|
373 |
-
ybutton_transcribe.click(
|
374 |
-
fn=YoutubeTranscribe,
|
375 |
-
inputs=[yinput_nos,yinput_sn,yinput, ysz],
|
376 |
-
outputs=[output_textbox,output_json]
|
377 |
-
)
|
378 |
-
abutton_transcribe.click(
|
379 |
-
fn=AudioTranscribe,
|
380 |
-
inputs=[ainput_nos,ainput_sn,ainput, asz],
|
381 |
-
outputs=[output_textbox,output_json]
|
382 |
-
)
|
383 |
-
vbutton_transcribe.click(
|
384 |
-
fn=VideoTranscribe,
|
385 |
-
inputs=[vinput_nos,vinput_sn,vinput, vsz],
|
386 |
-
outputs=[output_textbox,output_json]
|
387 |
-
)
|
388 |
-
yav_ui.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/latent_diffusion/util.py
DELETED
@@ -1,295 +0,0 @@
|
|
1 |
-
# adopted from
|
2 |
-
# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
|
3 |
-
# and
|
4 |
-
# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
5 |
-
# and
|
6 |
-
# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
|
7 |
-
#
|
8 |
-
# thanks!
|
9 |
-
|
10 |
-
|
11 |
-
import os
|
12 |
-
import math
|
13 |
-
import torch
|
14 |
-
import torch.nn as nn
|
15 |
-
import numpy as np
|
16 |
-
from einops import repeat
|
17 |
-
|
18 |
-
from audioldm.utils import instantiate_from_config
|
19 |
-
|
20 |
-
|
21 |
-
def make_beta_schedule(
|
22 |
-
schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
|
23 |
-
):
|
24 |
-
if schedule == "linear":
|
25 |
-
betas = (
|
26 |
-
torch.linspace(
|
27 |
-
linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64
|
28 |
-
)
|
29 |
-
** 2
|
30 |
-
)
|
31 |
-
|
32 |
-
elif schedule == "cosine":
|
33 |
-
timesteps = (
|
34 |
-
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
|
35 |
-
)
|
36 |
-
alphas = timesteps / (1 + cosine_s) * np.pi / 2
|
37 |
-
alphas = torch.cos(alphas).pow(2)
|
38 |
-
alphas = alphas / alphas[0]
|
39 |
-
betas = 1 - alphas[1:] / alphas[:-1]
|
40 |
-
betas = np.clip(betas, a_min=0, a_max=0.999)
|
41 |
-
|
42 |
-
elif schedule == "sqrt_linear":
|
43 |
-
betas = torch.linspace(
|
44 |
-
linear_start, linear_end, n_timestep, dtype=torch.float64
|
45 |
-
)
|
46 |
-
elif schedule == "sqrt":
|
47 |
-
betas = (
|
48 |
-
torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
|
49 |
-
** 0.5
|
50 |
-
)
|
51 |
-
else:
|
52 |
-
raise ValueError(f"schedule '{schedule}' unknown.")
|
53 |
-
return betas.numpy()
|
54 |
-
|
55 |
-
|
56 |
-
def make_ddim_timesteps(
|
57 |
-
ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
|
58 |
-
):
|
59 |
-
if ddim_discr_method == "uniform":
|
60 |
-
c = num_ddpm_timesteps // num_ddim_timesteps
|
61 |
-
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
|
62 |
-
elif ddim_discr_method == "quad":
|
63 |
-
ddim_timesteps = (
|
64 |
-
(np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
|
65 |
-
).astype(int)
|
66 |
-
else:
|
67 |
-
raise NotImplementedError(
|
68 |
-
f'There is no ddim discretization method called "{ddim_discr_method}"'
|
69 |
-
)
|
70 |
-
|
71 |
-
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
|
72 |
-
# add one to get the final alpha values right (the ones from first scale to data during sampling)
|
73 |
-
steps_out = ddim_timesteps + 1
|
74 |
-
if verbose:
|
75 |
-
print(f"Selected timesteps for ddim sampler: {steps_out}")
|
76 |
-
return steps_out
|
77 |
-
|
78 |
-
|
79 |
-
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
|
80 |
-
# select alphas for computing the variance schedule
|
81 |
-
alphas = alphacums[ddim_timesteps]
|
82 |
-
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
|
83 |
-
|
84 |
-
# according the the formula provided in https://arxiv.org/abs/2010.02502
|
85 |
-
sigmas = eta * np.sqrt(
|
86 |
-
(1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
|
87 |
-
)
|
88 |
-
if verbose:
|
89 |
-
print(
|
90 |
-
f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
|
91 |
-
)
|
92 |
-
print(
|
93 |
-
f"For the chosen value of eta, which is {eta}, "
|
94 |
-
f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
|
95 |
-
)
|
96 |
-
return sigmas, alphas, alphas_prev
|
97 |
-
|
98 |
-
|
99 |
-
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
|
100 |
-
"""
|
101 |
-
Create a beta schedule that discretizes the given alpha_t_bar function,
|
102 |
-
which defines the cumulative product of (1-beta) over time from t = [0,1].
|
103 |
-
:param num_diffusion_timesteps: the number of betas to produce.
|
104 |
-
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
|
105 |
-
produces the cumulative product of (1-beta) up to that
|
106 |
-
part of the diffusion process.
|
107 |
-
:param max_beta: the maximum beta to use; use values lower than 1 to
|
108 |
-
prevent singularities.
|
109 |
-
"""
|
110 |
-
betas = []
|
111 |
-
for i in range(num_diffusion_timesteps):
|
112 |
-
t1 = i / num_diffusion_timesteps
|
113 |
-
t2 = (i + 1) / num_diffusion_timesteps
|
114 |
-
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
115 |
-
return np.array(betas)
|
116 |
-
|
117 |
-
|
118 |
-
def extract_into_tensor(a, t, x_shape):
|
119 |
-
b, *_ = t.shape
|
120 |
-
out = a.gather(-1, t).contiguous()
|
121 |
-
return out.reshape(b, *((1,) * (len(x_shape) - 1))).contiguous()
|
122 |
-
|
123 |
-
|
124 |
-
def checkpoint(func, inputs, params, flag):
|
125 |
-
"""
|
126 |
-
Evaluate a function without caching intermediate activations, allowing for
|
127 |
-
reduced memory at the expense of extra compute in the backward pass.
|
128 |
-
:param func: the function to evaluate.
|
129 |
-
:param inputs: the argument sequence to pass to `func`.
|
130 |
-
:param params: a sequence of parameters `func` depends on but does not
|
131 |
-
explicitly take as arguments.
|
132 |
-
:param flag: if False, disable gradient checkpointing.
|
133 |
-
"""
|
134 |
-
if flag:
|
135 |
-
args = tuple(inputs) + tuple(params)
|
136 |
-
return CheckpointFunction.apply(func, len(inputs), *args)
|
137 |
-
else:
|
138 |
-
return func(*inputs)
|
139 |
-
|
140 |
-
|
141 |
-
class CheckpointFunction(torch.autograd.Function):
|
142 |
-
@staticmethod
|
143 |
-
def forward(ctx, run_function, length, *args):
|
144 |
-
ctx.run_function = run_function
|
145 |
-
ctx.input_tensors = list(args[:length])
|
146 |
-
ctx.input_params = list(args[length:])
|
147 |
-
|
148 |
-
with torch.no_grad():
|
149 |
-
output_tensors = ctx.run_function(*ctx.input_tensors)
|
150 |
-
return output_tensors
|
151 |
-
|
152 |
-
@staticmethod
|
153 |
-
def backward(ctx, *output_grads):
|
154 |
-
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
|
155 |
-
with torch.enable_grad():
|
156 |
-
# Fixes a bug where the first op in run_function modifies the
|
157 |
-
# Tensor storage in place, which is not allowed for detach()'d
|
158 |
-
# Tensors.
|
159 |
-
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
|
160 |
-
output_tensors = ctx.run_function(*shallow_copies)
|
161 |
-
input_grads = torch.autograd.grad(
|
162 |
-
output_tensors,
|
163 |
-
ctx.input_tensors + ctx.input_params,
|
164 |
-
output_grads,
|
165 |
-
allow_unused=True,
|
166 |
-
)
|
167 |
-
del ctx.input_tensors
|
168 |
-
del ctx.input_params
|
169 |
-
del output_tensors
|
170 |
-
return (None, None) + input_grads
|
171 |
-
|
172 |
-
|
173 |
-
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
|
174 |
-
"""
|
175 |
-
Create sinusoidal timestep embeddings.
|
176 |
-
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
177 |
-
These may be fractional.
|
178 |
-
:param dim: the dimension of the output.
|
179 |
-
:param max_period: controls the minimum frequency of the embeddings.
|
180 |
-
:return: an [N x dim] Tensor of positional embeddings.
|
181 |
-
"""
|
182 |
-
if not repeat_only:
|
183 |
-
half = dim // 2
|
184 |
-
freqs = torch.exp(
|
185 |
-
-math.log(max_period)
|
186 |
-
* torch.arange(start=0, end=half, dtype=torch.float32)
|
187 |
-
/ half
|
188 |
-
).to(device=timesteps.device)
|
189 |
-
args = timesteps[:, None].float() * freqs[None]
|
190 |
-
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
191 |
-
if dim % 2:
|
192 |
-
embedding = torch.cat(
|
193 |
-
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1
|
194 |
-
)
|
195 |
-
else:
|
196 |
-
embedding = repeat(timesteps, "b -> b d", d=dim)
|
197 |
-
return embedding
|
198 |
-
|
199 |
-
|
200 |
-
def zero_module(module):
|
201 |
-
"""
|
202 |
-
Zero out the parameters of a module and return it.
|
203 |
-
"""
|
204 |
-
for p in module.parameters():
|
205 |
-
p.detach().zero_()
|
206 |
-
return module
|
207 |
-
|
208 |
-
|
209 |
-
def scale_module(module, scale):
|
210 |
-
"""
|
211 |
-
Scale the parameters of a module and return it.
|
212 |
-
"""
|
213 |
-
for p in module.parameters():
|
214 |
-
p.detach().mul_(scale)
|
215 |
-
return module
|
216 |
-
|
217 |
-
|
218 |
-
def mean_flat(tensor):
|
219 |
-
"""
|
220 |
-
Take the mean over all non-batch dimensions.
|
221 |
-
"""
|
222 |
-
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
223 |
-
|
224 |
-
|
225 |
-
def normalization(channels):
|
226 |
-
"""
|
227 |
-
Make a standard normalization layer.
|
228 |
-
:param channels: number of input channels.
|
229 |
-
:return: an nn.Module for normalization.
|
230 |
-
"""
|
231 |
-
return GroupNorm32(32, channels)
|
232 |
-
|
233 |
-
|
234 |
-
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
|
235 |
-
class SiLU(nn.Module):
|
236 |
-
def forward(self, x):
|
237 |
-
return x * torch.sigmoid(x)
|
238 |
-
|
239 |
-
|
240 |
-
class GroupNorm32(nn.GroupNorm):
|
241 |
-
def forward(self, x):
|
242 |
-
return super().forward(x.float()).type(x.dtype)
|
243 |
-
|
244 |
-
|
245 |
-
def conv_nd(dims, *args, **kwargs):
|
246 |
-
"""
|
247 |
-
Create a 1D, 2D, or 3D convolution module.
|
248 |
-
"""
|
249 |
-
if dims == 1:
|
250 |
-
return nn.Conv1d(*args, **kwargs)
|
251 |
-
elif dims == 2:
|
252 |
-
return nn.Conv2d(*args, **kwargs)
|
253 |
-
elif dims == 3:
|
254 |
-
return nn.Conv3d(*args, **kwargs)
|
255 |
-
raise ValueError(f"unsupported dimensions: {dims}")
|
256 |
-
|
257 |
-
|
258 |
-
def linear(*args, **kwargs):
|
259 |
-
"""
|
260 |
-
Create a linear module.
|
261 |
-
"""
|
262 |
-
return nn.Linear(*args, **kwargs)
|
263 |
-
|
264 |
-
|
265 |
-
def avg_pool_nd(dims, *args, **kwargs):
|
266 |
-
"""
|
267 |
-
Create a 1D, 2D, or 3D average pooling module.
|
268 |
-
"""
|
269 |
-
if dims == 1:
|
270 |
-
return nn.AvgPool1d(*args, **kwargs)
|
271 |
-
elif dims == 2:
|
272 |
-
return nn.AvgPool2d(*args, **kwargs)
|
273 |
-
elif dims == 3:
|
274 |
-
return nn.AvgPool3d(*args, **kwargs)
|
275 |
-
raise ValueError(f"unsupported dimensions: {dims}")
|
276 |
-
|
277 |
-
|
278 |
-
class HybridConditioner(nn.Module):
|
279 |
-
def __init__(self, c_concat_config, c_crossattn_config):
|
280 |
-
super().__init__()
|
281 |
-
self.concat_conditioner = instantiate_from_config(c_concat_config)
|
282 |
-
self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
|
283 |
-
|
284 |
-
def forward(self, c_concat, c_crossattn):
|
285 |
-
c_concat = self.concat_conditioner(c_concat)
|
286 |
-
c_crossattn = self.crossattn_conditioner(c_crossattn)
|
287 |
-
return {"c_concat": [c_concat], "c_crossattn": [c_crossattn]}
|
288 |
-
|
289 |
-
|
290 |
-
def noise_like(shape, device, repeat=False):
|
291 |
-
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
|
292 |
-
shape[0], *((1,) * (len(shape) - 1))
|
293 |
-
)
|
294 |
-
noise = lambda: torch.randn(shape, device=device)
|
295 |
-
return repeat_noise() if repeat else noise()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps.py
DELETED
@@ -1,194 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch import nn
|
5 |
-
|
6 |
-
from text_to_speech.modules.tts.portaspeech.portaspeech import PortaSpeech
|
7 |
-
from tasks.tts.fs import FastSpeechTask
|
8 |
-
from text_to_speech.utils.audio.align import mel2token_to_dur
|
9 |
-
from text_to_speech.utils.commons.hparams import hparams
|
10 |
-
from text_to_speech.utils.metrics.diagonal_metrics import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate
|
11 |
-
from text_to_speech.utils.nn.model_utils import num_params
|
12 |
-
import numpy as np
|
13 |
-
|
14 |
-
from text_to_speech.utils.plot.plot import spec_to_figure
|
15 |
-
from text_to_speech.utils.text.text_encoder import build_token_encoder
|
16 |
-
|
17 |
-
|
18 |
-
class PortaSpeechTask(FastSpeechTask):
|
19 |
-
def __init__(self):
|
20 |
-
super().__init__()
|
21 |
-
data_dir = hparams['binary_data_dir']
|
22 |
-
self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json')
|
23 |
-
|
24 |
-
def build_tts_model(self):
|
25 |
-
ph_dict_size = len(self.token_encoder)
|
26 |
-
word_dict_size = len(self.word_encoder)
|
27 |
-
self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams)
|
28 |
-
|
29 |
-
def on_train_start(self):
|
30 |
-
super().on_train_start()
|
31 |
-
for n, m in self.model.named_children():
|
32 |
-
num_params(m, model_name=n)
|
33 |
-
if hasattr(self.model, 'fvae'):
|
34 |
-
for n, m in self.model.fvae.named_children():
|
35 |
-
num_params(m, model_name=f'fvae.{n}')
|
36 |
-
|
37 |
-
def run_model(self, sample, infer=False, *args, **kwargs):
|
38 |
-
txt_tokens = sample['txt_tokens']
|
39 |
-
word_tokens = sample['word_tokens']
|
40 |
-
spk_embed = sample.get('spk_embed')
|
41 |
-
spk_id = sample.get('spk_ids')
|
42 |
-
if not infer:
|
43 |
-
output = self.model(txt_tokens, word_tokens,
|
44 |
-
ph2word=sample['ph2word'],
|
45 |
-
mel2word=sample['mel2word'],
|
46 |
-
mel2ph=sample['mel2ph'],
|
47 |
-
word_len=sample['word_lengths'].max(),
|
48 |
-
tgt_mels=sample['mels'],
|
49 |
-
pitch=sample.get('pitch'),
|
50 |
-
spk_embed=spk_embed,
|
51 |
-
spk_id=spk_id,
|
52 |
-
infer=False,
|
53 |
-
global_step=self.global_step)
|
54 |
-
losses = {}
|
55 |
-
losses['kl_v'] = output['kl'].detach()
|
56 |
-
losses_kl = output['kl']
|
57 |
-
losses_kl = torch.clamp(losses_kl, min=hparams['kl_min'])
|
58 |
-
losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl
|
59 |
-
losses_kl = losses_kl * hparams['lambda_kl']
|
60 |
-
losses['kl'] = losses_kl
|
61 |
-
self.add_mel_loss(output['mel_out'], sample['mels'], losses)
|
62 |
-
if hparams['dur_level'] == 'word':
|
63 |
-
self.add_dur_loss(
|
64 |
-
output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses)
|
65 |
-
self.get_attn_stats(output['attn'], sample, losses)
|
66 |
-
else:
|
67 |
-
super(PortaSpeechTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses)
|
68 |
-
return losses, output
|
69 |
-
else:
|
70 |
-
use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
|
71 |
-
output = self.model(
|
72 |
-
txt_tokens, word_tokens,
|
73 |
-
ph2word=sample['ph2word'],
|
74 |
-
word_len=sample['word_lengths'].max(),
|
75 |
-
pitch=sample.get('pitch'),
|
76 |
-
mel2ph=sample['mel2ph'] if use_gt_dur else None,
|
77 |
-
mel2word=sample['mel2word'] if use_gt_dur else None,
|
78 |
-
tgt_mels=sample['mels'],
|
79 |
-
infer=True,
|
80 |
-
spk_embed=spk_embed,
|
81 |
-
spk_id=spk_id,
|
82 |
-
)
|
83 |
-
return output
|
84 |
-
|
85 |
-
def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None):
|
86 |
-
T = word_len.max()
|
87 |
-
dur_gt = mel2token_to_dur(mel2token, T).float()
|
88 |
-
nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float()
|
89 |
-
dur_pred = dur_pred * nonpadding
|
90 |
-
dur_gt = dur_gt * nonpadding
|
91 |
-
wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none')
|
92 |
-
wdur = (wdur * nonpadding).sum() / nonpadding.sum()
|
93 |
-
if hparams['lambda_word_dur'] > 0:
|
94 |
-
losses['wdur'] = wdur * hparams['lambda_word_dur']
|
95 |
-
if hparams['lambda_sent_dur'] > 0:
|
96 |
-
sent_dur_p = dur_pred.sum(-1)
|
97 |
-
sent_dur_g = dur_gt.sum(-1)
|
98 |
-
sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean')
|
99 |
-
losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
|
100 |
-
|
101 |
-
def validation_step(self, sample, batch_idx):
|
102 |
-
return super().validation_step(sample, batch_idx)
|
103 |
-
|
104 |
-
def save_valid_result(self, sample, batch_idx, model_out):
|
105 |
-
super(PortaSpeechTask, self).save_valid_result(sample, batch_idx, model_out)
|
106 |
-
if self.global_step > 0 and hparams['dur_level'] == 'word':
|
107 |
-
self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step)
|
108 |
-
|
109 |
-
def get_attn_stats(self, attn, sample, logging_outputs, prefix=''):
|
110 |
-
# diagonal_focus_rate
|
111 |
-
txt_lengths = sample['txt_lengths'].float()
|
112 |
-
mel_lengths = sample['mel_lengths'].float()
|
113 |
-
src_padding_mask = sample['txt_tokens'].eq(0)
|
114 |
-
target_padding_mask = sample['mels'].abs().sum(-1).eq(0)
|
115 |
-
src_seg_mask = sample['txt_tokens'].eq(self.seg_idx)
|
116 |
-
attn_ks = txt_lengths.float() / mel_lengths.float()
|
117 |
-
|
118 |
-
focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data
|
119 |
-
phone_coverage_rate = get_phone_coverage_rate(
|
120 |
-
attn, src_padding_mask, src_seg_mask, target_padding_mask).mean()
|
121 |
-
diagonal_focus_rate, diag_mask = get_diagonal_focus_rate(
|
122 |
-
attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask)
|
123 |
-
logging_outputs[f'{prefix}fr'] = focus_rate.mean().data
|
124 |
-
logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data
|
125 |
-
logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data
|
126 |
-
|
127 |
-
def get_plot_dur_info(self, sample, model_out):
|
128 |
-
if hparams['dur_level'] == 'word':
|
129 |
-
T_txt = sample['word_lengths'].max()
|
130 |
-
dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0]
|
131 |
-
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
|
132 |
-
txt = sample['ph_words'][0].split(" ")
|
133 |
-
else:
|
134 |
-
T_txt = sample['txt_tokens'].shape[1]
|
135 |
-
dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0]
|
136 |
-
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
|
137 |
-
txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
|
138 |
-
txt = txt.split(" ")
|
139 |
-
return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
|
140 |
-
|
141 |
-
def build_optimizer(self, model):
|
142 |
-
self.optimizer = torch.optim.AdamW(
|
143 |
-
self.model.parameters(),
|
144 |
-
lr=hparams['lr'],
|
145 |
-
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
|
146 |
-
weight_decay=hparams['weight_decay'])
|
147 |
-
return self.optimizer
|
148 |
-
|
149 |
-
def build_scheduler(self, optimizer):
|
150 |
-
return FastSpeechTask.build_scheduler(self, optimizer)
|
151 |
-
|
152 |
-
############
|
153 |
-
# infer
|
154 |
-
############
|
155 |
-
def test_start(self):
|
156 |
-
super().test_start()
|
157 |
-
if hparams.get('save_attn', False):
|
158 |
-
os.makedirs(f'{self.gen_dir}/attn', exist_ok=True)
|
159 |
-
self.model.store_inverse_all()
|
160 |
-
|
161 |
-
def test_step(self, sample, batch_idx):
|
162 |
-
assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
|
163 |
-
outputs = self.run_model(sample, infer=True)
|
164 |
-
text = sample['text'][0]
|
165 |
-
item_name = sample['item_name'][0]
|
166 |
-
tokens = sample['txt_tokens'][0].cpu().numpy()
|
167 |
-
mel_gt = sample['mels'][0].cpu().numpy()
|
168 |
-
mel_pred = outputs['mel_out'][0].cpu().numpy()
|
169 |
-
mel2ph = sample['mel2ph'][0].cpu().numpy()
|
170 |
-
mel2ph_pred = None
|
171 |
-
str_phs = self.token_encoder.decode(tokens, strip_padding=True)
|
172 |
-
base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]'
|
173 |
-
if text is not None:
|
174 |
-
base_fn += text.replace(":", "$3A")[:80]
|
175 |
-
base_fn = base_fn.replace(' ', '_')
|
176 |
-
gen_dir = self.gen_dir
|
177 |
-
wav_pred = self.vocoder.spec2wav(mel_pred)
|
178 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
179 |
-
wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred])
|
180 |
-
if hparams['save_gt']:
|
181 |
-
wav_gt = self.vocoder.spec2wav(mel_gt)
|
182 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
183 |
-
wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph])
|
184 |
-
if hparams.get('save_attn', False):
|
185 |
-
attn = outputs['attn'][0].cpu().numpy()
|
186 |
-
np.save(f'{gen_dir}/attn/{item_name}.npy', attn)
|
187 |
-
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
|
188 |
-
return {
|
189 |
-
'item_name': item_name,
|
190 |
-
'text': text,
|
191 |
-
'ph_tokens': self.token_encoder.decode(tokens.tolist()),
|
192 |
-
'wav_fn_pred': base_fn % 'P',
|
193 |
-
'wav_fn_gt': base_fn % 'G',
|
194 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/utils.py
DELETED
@@ -1,369 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
from torch import nn as nn
|
4 |
-
from torchvision.ops.misc import FrozenBatchNorm2d
|
5 |
-
import logging
|
6 |
-
import h5py
|
7 |
-
from tqdm import tqdm
|
8 |
-
import random
|
9 |
-
import json
|
10 |
-
import os
|
11 |
-
import pathlib
|
12 |
-
|
13 |
-
# TODO: (yusong) this not a good place to store those information and does not scale. Need to be fixed later.
|
14 |
-
dataset_split = {
|
15 |
-
"audiocaps": ["train", "valid", "test"],
|
16 |
-
"audioset": ["balanced_train", "unbalanced_train", "eval"],
|
17 |
-
"BBCSoundEffects": ["train", "test"],
|
18 |
-
"Clotho": ["train", "test", "valid"],
|
19 |
-
"free_to_use_sounds": ["train", "test"],
|
20 |
-
"paramount_motion": ["train", "test"],
|
21 |
-
"sonniss_game_effects": ["train", "test"],
|
22 |
-
"wesoundeffects": ["train", "test"],
|
23 |
-
"MACS": ["train", "test"],
|
24 |
-
"freesound": ["train", "test"],
|
25 |
-
"FSD50K": ["train", "test", "valid"],
|
26 |
-
"fsd50k_class_label": ["train", "test", "valid"],
|
27 |
-
"esc50": ["train", "test"],
|
28 |
-
"audiostock": ["train", "test"],
|
29 |
-
"freesound_no_overlap_noesc50": ["train", "test"],
|
30 |
-
"epidemic_sound_effects": ["train", "test"],
|
31 |
-
"VGGSound": ["train", "test"],
|
32 |
-
"urbansound8k_class_label": ["train", "test"],
|
33 |
-
"audioset_t5": ["balanced_train", "unbalanced_train", "eval"],
|
34 |
-
"epidemic_sound_effects_t5": ["train", "test"],
|
35 |
-
"WavText5K": ["train", "test"],
|
36 |
-
"esc50_no_overlap": ["train", "test"],
|
37 |
-
"usd8k_no_overlap": ["train", "test"],
|
38 |
-
"fsd50k_200_class_label": ["train", "test", "valid"]
|
39 |
-
}
|
40 |
-
|
41 |
-
|
42 |
-
def freeze_batch_norm_2d(module, module_match={}, name=""):
|
43 |
-
"""
|
44 |
-
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
|
45 |
-
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
|
46 |
-
returned. Otherwise, the module is walked recursively and submodules are converted in place.
|
47 |
-
|
48 |
-
Args:
|
49 |
-
module (torch.nn.Module): Any PyTorch module.
|
50 |
-
module_match (dict): Dictionary of full module names to freeze (all if empty)
|
51 |
-
name (str): Full module name (prefix)
|
52 |
-
|
53 |
-
Returns:
|
54 |
-
torch.nn.Module: Resulting module
|
55 |
-
|
56 |
-
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
|
57 |
-
"""
|
58 |
-
res = module
|
59 |
-
is_match = True
|
60 |
-
if module_match:
|
61 |
-
is_match = name in module_match
|
62 |
-
if is_match and isinstance(
|
63 |
-
module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)
|
64 |
-
):
|
65 |
-
res = FrozenBatchNorm2d(module.num_features)
|
66 |
-
res.num_features = module.num_features
|
67 |
-
res.affine = module.affine
|
68 |
-
if module.affine:
|
69 |
-
res.weight.data = module.weight.data.clone().detach()
|
70 |
-
res.bias.data = module.bias.data.clone().detach()
|
71 |
-
res.running_mean.data = module.running_mean.data
|
72 |
-
res.running_var.data = module.running_var.data
|
73 |
-
res.eps = module.eps
|
74 |
-
else:
|
75 |
-
for child_name, child in module.named_children():
|
76 |
-
full_child_name = ".".join([name, child_name]) if name else child_name
|
77 |
-
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
|
78 |
-
if new_child is not child:
|
79 |
-
res.add_module(child_name, new_child)
|
80 |
-
return res
|
81 |
-
|
82 |
-
|
83 |
-
def exist(dataset_name, dataset_type):
|
84 |
-
"""
|
85 |
-
Check if dataset exists
|
86 |
-
"""
|
87 |
-
if dataset_type in dataset_split[dataset_name]:
|
88 |
-
return True
|
89 |
-
else:
|
90 |
-
return False
|
91 |
-
|
92 |
-
|
93 |
-
def get_tar_path_from_dataset_name(
|
94 |
-
dataset_names,
|
95 |
-
dataset_types,
|
96 |
-
islocal,
|
97 |
-
dataset_path,
|
98 |
-
proportion=1,
|
99 |
-
full_dataset=None
|
100 |
-
):
|
101 |
-
"""
|
102 |
-
Get tar path from dataset name and type
|
103 |
-
"""
|
104 |
-
output = []
|
105 |
-
for n in dataset_names:
|
106 |
-
if full_dataset is not None and n in full_dataset:
|
107 |
-
current_dataset_types = dataset_split[n]
|
108 |
-
else:
|
109 |
-
current_dataset_types = dataset_types
|
110 |
-
for s in current_dataset_types:
|
111 |
-
tmp = []
|
112 |
-
if islocal:
|
113 |
-
sizefilepath_ = f"{dataset_path}/{n}/{s}/sizes.json"
|
114 |
-
if not os.path.exists(sizefilepath_):
|
115 |
-
sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
|
116 |
-
else:
|
117 |
-
sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
|
118 |
-
if not os.path.exists(sizefilepath_):
|
119 |
-
continue
|
120 |
-
sizes = json.load(open(sizefilepath_, "r"))
|
121 |
-
for k in sizes.keys():
|
122 |
-
if islocal:
|
123 |
-
tmp.append(f"{dataset_path}/{n}/{s}/{k}")
|
124 |
-
else:
|
125 |
-
tmp.append(
|
126 |
-
f"pipe:aws s3 --cli-connect-timeout 0 cp s3://s-laion-audio/webdataset_tar/{n}/{s}/{k} -"
|
127 |
-
)
|
128 |
-
if proportion != 1:
|
129 |
-
tmp = random.sample(tmp, int(proportion * len(tmp)))
|
130 |
-
output.append(tmp)
|
131 |
-
return sum(output, [])
|
132 |
-
|
133 |
-
|
134 |
-
def get_tar_path_from_txts(txt_path, islocal, proportion=1):
|
135 |
-
"""
|
136 |
-
Get tar path from txt path
|
137 |
-
"""
|
138 |
-
if isinstance(txt_path, (list, tuple)):
|
139 |
-
return sum(
|
140 |
-
[
|
141 |
-
get_tar_path_from_txts(
|
142 |
-
txt_path[i], islocal=islocal, proportion=proportion
|
143 |
-
)
|
144 |
-
for i in range(len(txt_path))
|
145 |
-
],
|
146 |
-
[],
|
147 |
-
)
|
148 |
-
if isinstance(txt_path, str):
|
149 |
-
with open(txt_path) as f:
|
150 |
-
lines = f.readlines()
|
151 |
-
if islocal:
|
152 |
-
lines = [
|
153 |
-
lines[i]
|
154 |
-
.split("\n")[0]
|
155 |
-
.replace("pipe:aws s3 cp s3://s-laion-audio/", "/mnt/audio_clip/")
|
156 |
-
for i in range(len(lines))
|
157 |
-
]
|
158 |
-
else:
|
159 |
-
lines = [
|
160 |
-
lines[i].split("\n")[0].replace(".tar", ".tar -")
|
161 |
-
for i in range(len(lines))
|
162 |
-
]
|
163 |
-
if proportion != 1:
|
164 |
-
print("Sampling tars with proportion of {}".format(proportion))
|
165 |
-
lines = random.sample(lines, int(proportion * len(lines)))
|
166 |
-
return lines
|
167 |
-
|
168 |
-
|
169 |
-
def get_mix_lambda(mixup_alpha, batch_size):
|
170 |
-
mixup_lambdas = [
|
171 |
-
np.random.beta(mixup_alpha, mixup_alpha, 1)[0] for _ in range(batch_size)
|
172 |
-
]
|
173 |
-
return np.array(mixup_lambdas).astype(np.float32)
|
174 |
-
|
175 |
-
|
176 |
-
def do_mixup(x, mixup_lambda):
|
177 |
-
"""
|
178 |
-
Args:
|
179 |
-
x: (batch_size , ...)
|
180 |
-
mixup_lambda: (batch_size,)
|
181 |
-
Returns:
|
182 |
-
out: (batch_size, ...)
|
183 |
-
"""
|
184 |
-
out = (
|
185 |
-
x.transpose(0, -1) * mixup_lambda
|
186 |
-
+ torch.flip(x, dims=[0]).transpose(0, -1) * (1 - mixup_lambda)
|
187 |
-
).transpose(0, -1)
|
188 |
-
return out
|
189 |
-
|
190 |
-
|
191 |
-
def interpolate(x, ratio):
|
192 |
-
"""Interpolate data in time domain. This is used to compensate the
|
193 |
-
resolution reduction in downsampling of a CNN.
|
194 |
-
|
195 |
-
Args:
|
196 |
-
x: (batch_size, time_steps, classes_num)
|
197 |
-
ratio: int, ratio to interpolate
|
198 |
-
Returns:
|
199 |
-
upsampled: (batch_size, time_steps * ratio, classes_num)
|
200 |
-
"""
|
201 |
-
(batch_size, time_steps, classes_num) = x.shape
|
202 |
-
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
|
203 |
-
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
|
204 |
-
return upsampled
|
205 |
-
|
206 |
-
|
207 |
-
def pad_framewise_output(framewise_output, frames_num):
|
208 |
-
"""Pad framewise_output to the same length as input frames. The pad value
|
209 |
-
is the same as the value of the last frame.
|
210 |
-
Args:
|
211 |
-
framewise_output: (batch_size, frames_num, classes_num)
|
212 |
-
frames_num: int, number of frames to pad
|
213 |
-
Outputs:
|
214 |
-
output: (batch_size, frames_num, classes_num)
|
215 |
-
"""
|
216 |
-
pad = framewise_output[:, -1:, :].repeat(
|
217 |
-
1, frames_num - framewise_output.shape[1], 1
|
218 |
-
)
|
219 |
-
"""tensor for padding"""
|
220 |
-
|
221 |
-
output = torch.cat((framewise_output, pad), dim=1)
|
222 |
-
"""(batch_size, frames_num, classes_num)"""
|
223 |
-
|
224 |
-
|
225 |
-
def process_ipc(index_path, classes_num, filename):
|
226 |
-
# load data
|
227 |
-
logging.info("Load Data...............")
|
228 |
-
ipc = [[] for _ in range(classes_num)]
|
229 |
-
with h5py.File(index_path, "r") as f:
|
230 |
-
for i in tqdm(range(len(f["target"]))):
|
231 |
-
t_class = np.where(f["target"][i])[0]
|
232 |
-
for t in t_class:
|
233 |
-
ipc[t].append(i)
|
234 |
-
print(ipc)
|
235 |
-
np.save(filename, ipc)
|
236 |
-
logging.info("Load Data Succeed...............")
|
237 |
-
|
238 |
-
|
239 |
-
def save_to_dict(s, o_={}):
|
240 |
-
sp = s.split(": ")
|
241 |
-
o_.update({sp[0]: float(sp[1])})
|
242 |
-
return o_
|
243 |
-
|
244 |
-
|
245 |
-
def get_data_from_log(txt_path):
|
246 |
-
"""
|
247 |
-
Output dictionary from out.txt log file
|
248 |
-
"""
|
249 |
-
with open(txt_path) as f:
|
250 |
-
lines = f.readlines()
|
251 |
-
val_data = {}
|
252 |
-
train_data = {}
|
253 |
-
train_losses = []
|
254 |
-
train_losses_epoch = []
|
255 |
-
for i in range(len(lines)):
|
256 |
-
if "| INFO |" in lines[i]:
|
257 |
-
if "Eval Epoch" in lines[i]:
|
258 |
-
if "val_loss" in lines[i]:
|
259 |
-
# float(regex.sub("", lines[310].split(" ")[-1]).replace(" ", ""))
|
260 |
-
line = lines[i].split("Eval Epoch: ")[-1]
|
261 |
-
num_epoch = int(line.split(" ")[0].split(" ")[0])
|
262 |
-
d = {
|
263 |
-
line.split(" ")[0]
|
264 |
-
.split(" ")[1]
|
265 |
-
.replace(":", ""): float(line.split(" ")[0].split(" ")[-1])
|
266 |
-
}
|
267 |
-
for i in range(1, len(line.split(" "))):
|
268 |
-
d = save_to_dict(line.split(" ")[i], d)
|
269 |
-
val_data[num_epoch] = d
|
270 |
-
elif "Train Epoch" in lines[i]:
|
271 |
-
num_epoch = int(lines[i].split("Train Epoch: ")[1][0])
|
272 |
-
loss = float(lines[i].split("Loss: ")[-1].split(" (")[0])
|
273 |
-
train_losses.append(loss)
|
274 |
-
train_losses_epoch.append(num_epoch)
|
275 |
-
for i in range(len(train_losses)):
|
276 |
-
train_data[i] = {
|
277 |
-
"num_epoch": train_losses_epoch[i],
|
278 |
-
"train_loss": train_losses[i],
|
279 |
-
}
|
280 |
-
return train_data, val_data
|
281 |
-
|
282 |
-
|
283 |
-
def save_p(obj, filename):
|
284 |
-
import pickle
|
285 |
-
|
286 |
-
try:
|
287 |
-
from deepdiff import DeepDiff
|
288 |
-
except:
|
289 |
-
os.system("pip install deepdiff")
|
290 |
-
from deepdiff import DeepDiff
|
291 |
-
with open(filename, "wb") as file:
|
292 |
-
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) # highest protocol
|
293 |
-
with open(filename, "rb") as file:
|
294 |
-
z = pickle.load(file)
|
295 |
-
assert (
|
296 |
-
DeepDiff(obj, z, ignore_string_case=True) == {}
|
297 |
-
), "there is something wrong with the saving process"
|
298 |
-
return
|
299 |
-
|
300 |
-
|
301 |
-
def load_p(filename):
|
302 |
-
import pickle
|
303 |
-
|
304 |
-
with open(filename, "rb") as file:
|
305 |
-
z = pickle.load(file)
|
306 |
-
return z
|
307 |
-
|
308 |
-
|
309 |
-
def save_json(data, name="data.json"):
|
310 |
-
import json
|
311 |
-
with open(name, 'w') as fp:
|
312 |
-
json.dump(data, fp)
|
313 |
-
return
|
314 |
-
|
315 |
-
|
316 |
-
def load_json(name):
|
317 |
-
import json
|
318 |
-
with open(name, 'r') as fp:
|
319 |
-
data = json.load(fp)
|
320 |
-
return data
|
321 |
-
|
322 |
-
|
323 |
-
from multiprocessing import Process, Manager
|
324 |
-
from multiprocessing import Process, Value, Array
|
325 |
-
from ctypes import c_wchar
|
326 |
-
|
327 |
-
|
328 |
-
def load_class_label(path):
|
329 |
-
# https://stackoverflow.com/questions/48004243/how-to-share-large-read-only-dictionary-list-across-processes-in-multiprocessing
|
330 |
-
# https://stackoverflow.com/questions/45693949/storing-strings-in-a-multiprocessing-sharedctypes-array
|
331 |
-
out = None
|
332 |
-
if path is not None:
|
333 |
-
if pathlib.Path(path).suffix in [".pkl", ".pickle"]:
|
334 |
-
out = load_p(path)
|
335 |
-
elif pathlib.Path(path).suffix in [".json", ".txt"]:
|
336 |
-
out = load_json(path)
|
337 |
-
elif pathlib.Path(path).suffix in [".npy", ".npz"]:
|
338 |
-
out = np.load(path)
|
339 |
-
elif pathlib.Path(path).suffix in [".csv"]:
|
340 |
-
import pandas as pd
|
341 |
-
out = pd.read_csv(path)
|
342 |
-
return out
|
343 |
-
# if out is None:
|
344 |
-
# return None
|
345 |
-
# else:
|
346 |
-
# key = Array(c_wchar, '\n'.join(list(out.keys())), lock=False)
|
347 |
-
# val = Array('i', out.values(), lock=False)
|
348 |
-
# return (key, val)
|
349 |
-
|
350 |
-
|
351 |
-
from torch import optim
|
352 |
-
|
353 |
-
|
354 |
-
def get_optimizer(params, lr, betas, eps, momentum, optimizer_name):
|
355 |
-
if optimizer_name.lower() == "adamw":
|
356 |
-
optimizer = optim.AdamW(
|
357 |
-
params, lr=lr, betas=betas, eps=eps
|
358 |
-
)
|
359 |
-
elif optimizer_name.lower() == "sgd":
|
360 |
-
optimizer = optim.SGD(
|
361 |
-
params, lr=lr, momentum=momentum
|
362 |
-
)
|
363 |
-
elif optimizer_name.lower() == "adam":
|
364 |
-
optimizer = optim.Adam(
|
365 |
-
params, lr=lr, betas=betas, eps=eps
|
366 |
-
)
|
367 |
-
else:
|
368 |
-
raise ValueError("optimizer name is not correct")
|
369 |
-
return optimizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIatUIUC/CodeLATS/executors/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from .py_executor import PyExecutor
|
2 |
-
from .factory import executor_factory
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/input/Input.js
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
const GetValue = Phaser.Utils.Objects.GetValue;
|
2 |
-
class Input {
|
3 |
-
constructor(bejeweled, config) {
|
4 |
-
this.bejeweled = bejeweled; // Bejeweled
|
5 |
-
this.scene = bejeweled.scene; // Bejeweled.scene
|
6 |
-
|
7 |
-
this.setEnable(GetValue(config, 'input.enable', true));
|
8 |
-
this.boot();
|
9 |
-
}
|
10 |
-
|
11 |
-
boot() {
|
12 |
-
// Touch control
|
13 |
-
this.scene.input
|
14 |
-
.on('pointerdown', this.selectChess1, this)
|
15 |
-
.on('pointermove', this.selectChess2, this);
|
16 |
-
}
|
17 |
-
|
18 |
-
shutdown() {
|
19 |
-
this.scene.input
|
20 |
-
.off('pointerdown', this.selectChess1, this)
|
21 |
-
.off('pointermove', this.selectChess2, this);
|
22 |
-
this.bejeweled = undefined;
|
23 |
-
this.scene = undefined;
|
24 |
-
}
|
25 |
-
|
26 |
-
destroy() {
|
27 |
-
this.shutdown();
|
28 |
-
return this;
|
29 |
-
}
|
30 |
-
|
31 |
-
setEnable(enabled) {
|
32 |
-
if (enabled === undefined) {
|
33 |
-
enabled = true;
|
34 |
-
}
|
35 |
-
this.enable = enabled;
|
36 |
-
return this;
|
37 |
-
}
|
38 |
-
|
39 |
-
selectChess1(pointer) {
|
40 |
-
if (!this.enable) {
|
41 |
-
return this;
|
42 |
-
}
|
43 |
-
var chess = this.bejeweled.worldXYToChess(pointer.worldX, pointer.worldY);
|
44 |
-
if (chess) {
|
45 |
-
this.bejeweled.selectChess1(chess);
|
46 |
-
}
|
47 |
-
}
|
48 |
-
|
49 |
-
selectChess2(pointer) {
|
50 |
-
if (!this.enable) {
|
51 |
-
return this;
|
52 |
-
}
|
53 |
-
|
54 |
-
if (!pointer.isDown) {
|
55 |
-
return;
|
56 |
-
}
|
57 |
-
var chess = this.bejeweled.worldXYToChess(pointer.worldX, pointer.worldY);
|
58 |
-
if (chess && (chess !== this.bejeweled.getSelectedChess1())) {
|
59 |
-
this.bejeweled.selectChess2(chess);
|
60 |
-
}
|
61 |
-
}
|
62 |
-
}
|
63 |
-
|
64 |
-
export default Input;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/listpanel/CreateListPanel.js
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
import Buttons from '../../../buttons/Buttons.js';
|
2 |
-
import FixWidthButtons from '../../../fixwidthbuttons/FixWidthButtons.js';
|
3 |
-
|
4 |
-
var CreateListPanel = function () {
|
5 |
-
var scene = this.scene;
|
6 |
-
|
7 |
-
var background;
|
8 |
-
var createBackgroundCallback = this.listCreateBackgroundCallback;
|
9 |
-
if (createBackgroundCallback) {
|
10 |
-
background = createBackgroundCallback.call(this, scene);
|
11 |
-
scene.add.existing(background);
|
12 |
-
}
|
13 |
-
|
14 |
-
var buttons = [];
|
15 |
-
var createButtonCallback = this.listCreateButtonCallback;
|
16 |
-
if (createButtonCallback) {
|
17 |
-
var options = this.options;
|
18 |
-
for (var i = 0, cnt = options.length; i < cnt; i++) {
|
19 |
-
var button = createButtonCallback.call(this, scene, options[i], i, options);
|
20 |
-
scene.add.existing(button);
|
21 |
-
buttons.push(button);
|
22 |
-
}
|
23 |
-
}
|
24 |
-
|
25 |
-
var width = this.listWidth;
|
26 |
-
if (width === undefined) {
|
27 |
-
if (this.listAlignMode === 'text') {
|
28 |
-
width = this.getElement('text').width;
|
29 |
-
} else {
|
30 |
-
width = this.width;
|
31 |
-
}
|
32 |
-
}
|
33 |
-
var height = this.listHeight;
|
34 |
-
|
35 |
-
var listPanel;
|
36 |
-
if (!this.listWrapEnable) {
|
37 |
-
listPanel = new Buttons(scene, {
|
38 |
-
width: width, height: height,
|
39 |
-
|
40 |
-
orientation: 'y',
|
41 |
-
background: background,
|
42 |
-
buttons: buttons,
|
43 |
-
|
44 |
-
space: this.listSpace,
|
45 |
-
draggable: this.listDraggable,
|
46 |
-
});
|
47 |
-
} else {
|
48 |
-
listPanel = new FixWidthButtons(scene, {
|
49 |
-
width: width, height: height,
|
50 |
-
|
51 |
-
background: background,
|
52 |
-
buttons: buttons,
|
53 |
-
|
54 |
-
space: this.listSpace,
|
55 |
-
draggable: this.listDraggable,
|
56 |
-
});
|
57 |
-
}
|
58 |
-
|
59 |
-
scene.add.existing(listPanel);
|
60 |
-
|
61 |
-
return listPanel;
|
62 |
-
}
|
63 |
-
|
64 |
-
export default CreateListPanel;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/face3d/options/base_options.py
DELETED
@@ -1,169 +0,0 @@
|
|
1 |
-
"""This script contains base options for Deep3DFaceRecon_pytorch
|
2 |
-
"""
|
3 |
-
|
4 |
-
import argparse
|
5 |
-
import os
|
6 |
-
from util import util
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import face3d.models as models
|
10 |
-
import face3d.data as data
|
11 |
-
|
12 |
-
|
13 |
-
class BaseOptions():
|
14 |
-
"""This class defines options used during both training and test time.
|
15 |
-
|
16 |
-
It also implements several helper functions such as parsing, printing, and saving the options.
|
17 |
-
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
|
18 |
-
"""
|
19 |
-
|
20 |
-
def __init__(self, cmd_line=None):
|
21 |
-
"""Reset the class; indicates the class hasn't been initailized"""
|
22 |
-
self.initialized = False
|
23 |
-
self.cmd_line = None
|
24 |
-
if cmd_line is not None:
|
25 |
-
self.cmd_line = cmd_line.split()
|
26 |
-
|
27 |
-
def initialize(self, parser):
|
28 |
-
"""Define the common options that are used in both training and test."""
|
29 |
-
# basic parameters
|
30 |
-
parser.add_argument('--name', type=str, default='face_recon', help='name of the experiment. It decides where to store samples and models')
|
31 |
-
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
|
32 |
-
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
|
33 |
-
parser.add_argument('--vis_batch_nums', type=float, default=1, help='batch nums of images for visulization')
|
34 |
-
parser.add_argument('--eval_batch_nums', type=float, default=float('inf'), help='batch nums of images for evaluation')
|
35 |
-
parser.add_argument('--use_ddp', type=util.str2bool, nargs='?', const=True, default=True, help='whether use distributed data parallel')
|
36 |
-
parser.add_argument('--ddp_port', type=str, default='12355', help='ddp port')
|
37 |
-
parser.add_argument('--display_per_batch', type=util.str2bool, nargs='?', const=True, default=True, help='whether use batch to show losses')
|
38 |
-
parser.add_argument('--add_image', type=util.str2bool, nargs='?', const=True, default=True, help='whether add image to tensorboard')
|
39 |
-
parser.add_argument('--world_size', type=int, default=1, help='batch nums of images for evaluation')
|
40 |
-
|
41 |
-
# model parameters
|
42 |
-
parser.add_argument('--model', type=str, default='facerecon', help='chooses which model to use.')
|
43 |
-
|
44 |
-
# additional parameters
|
45 |
-
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
|
46 |
-
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
|
47 |
-
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
|
48 |
-
|
49 |
-
self.initialized = True
|
50 |
-
return parser
|
51 |
-
|
52 |
-
def gather_options(self):
|
53 |
-
"""Initialize our parser with basic options(only once).
|
54 |
-
Add additional model-specific and dataset-specific options.
|
55 |
-
These options are defined in the <modify_commandline_options> function
|
56 |
-
in model and dataset classes.
|
57 |
-
"""
|
58 |
-
if not self.initialized: # check if it has been initialized
|
59 |
-
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
60 |
-
parser = self.initialize(parser)
|
61 |
-
|
62 |
-
# get the basic options
|
63 |
-
if self.cmd_line is None:
|
64 |
-
opt, _ = parser.parse_known_args()
|
65 |
-
else:
|
66 |
-
opt, _ = parser.parse_known_args(self.cmd_line)
|
67 |
-
|
68 |
-
# set cuda visible devices
|
69 |
-
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids
|
70 |
-
|
71 |
-
# modify model-related parser options
|
72 |
-
model_name = opt.model
|
73 |
-
model_option_setter = models.get_option_setter(model_name)
|
74 |
-
parser = model_option_setter(parser, self.isTrain)
|
75 |
-
if self.cmd_line is None:
|
76 |
-
opt, _ = parser.parse_known_args() # parse again with new defaults
|
77 |
-
else:
|
78 |
-
opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults
|
79 |
-
|
80 |
-
# modify dataset-related parser options
|
81 |
-
if opt.dataset_mode:
|
82 |
-
dataset_name = opt.dataset_mode
|
83 |
-
dataset_option_setter = data.get_option_setter(dataset_name)
|
84 |
-
parser = dataset_option_setter(parser, self.isTrain)
|
85 |
-
|
86 |
-
# save and return the parser
|
87 |
-
self.parser = parser
|
88 |
-
if self.cmd_line is None:
|
89 |
-
return parser.parse_args()
|
90 |
-
else:
|
91 |
-
return parser.parse_args(self.cmd_line)
|
92 |
-
|
93 |
-
def print_options(self, opt):
|
94 |
-
"""Print and save options
|
95 |
-
|
96 |
-
It will print both current options and default values(if different).
|
97 |
-
It will save options into a text file / [checkpoints_dir] / opt.txt
|
98 |
-
"""
|
99 |
-
message = ''
|
100 |
-
message += '----------------- Options ---------------\n'
|
101 |
-
for k, v in sorted(vars(opt).items()):
|
102 |
-
comment = ''
|
103 |
-
default = self.parser.get_default(k)
|
104 |
-
if v != default:
|
105 |
-
comment = '\t[default: %s]' % str(default)
|
106 |
-
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
|
107 |
-
message += '----------------- End -------------------'
|
108 |
-
print(message)
|
109 |
-
|
110 |
-
# save to the disk
|
111 |
-
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
|
112 |
-
util.mkdirs(expr_dir)
|
113 |
-
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
|
114 |
-
try:
|
115 |
-
with open(file_name, 'wt') as opt_file:
|
116 |
-
opt_file.write(message)
|
117 |
-
opt_file.write('\n')
|
118 |
-
except PermissionError as error:
|
119 |
-
print("permission error {}".format(error))
|
120 |
-
pass
|
121 |
-
|
122 |
-
def parse(self):
|
123 |
-
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
|
124 |
-
opt = self.gather_options()
|
125 |
-
opt.isTrain = self.isTrain # train or test
|
126 |
-
|
127 |
-
# process opt.suffix
|
128 |
-
if opt.suffix:
|
129 |
-
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
|
130 |
-
opt.name = opt.name + suffix
|
131 |
-
|
132 |
-
|
133 |
-
# set gpu ids
|
134 |
-
str_ids = opt.gpu_ids.split(',')
|
135 |
-
gpu_ids = []
|
136 |
-
for str_id in str_ids:
|
137 |
-
id = int(str_id)
|
138 |
-
if id >= 0:
|
139 |
-
gpu_ids.append(id)
|
140 |
-
opt.world_size = len(gpu_ids)
|
141 |
-
# if len(opt.gpu_ids) > 0:
|
142 |
-
# torch.cuda.set_device(gpu_ids[0])
|
143 |
-
if opt.world_size == 1:
|
144 |
-
opt.use_ddp = False
|
145 |
-
|
146 |
-
if opt.phase != 'test':
|
147 |
-
# set continue_train automatically
|
148 |
-
if opt.pretrained_name is None:
|
149 |
-
model_dir = os.path.join(opt.checkpoints_dir, opt.name)
|
150 |
-
else:
|
151 |
-
model_dir = os.path.join(opt.checkpoints_dir, opt.pretrained_name)
|
152 |
-
if os.path.isdir(model_dir):
|
153 |
-
model_pths = [i for i in os.listdir(model_dir) if i.endswith('pth')]
|
154 |
-
if os.path.isdir(model_dir) and len(model_pths) != 0:
|
155 |
-
opt.continue_train= True
|
156 |
-
|
157 |
-
# update the latest epoch count
|
158 |
-
if opt.continue_train:
|
159 |
-
if opt.epoch == 'latest':
|
160 |
-
epoch_counts = [int(i.split('.')[0].split('_')[-1]) for i in model_pths if 'latest' not in i]
|
161 |
-
if len(epoch_counts) != 0:
|
162 |
-
opt.epoch_count = max(epoch_counts) + 1
|
163 |
-
else:
|
164 |
-
opt.epoch_count = int(opt.epoch) + 1
|
165 |
-
|
166 |
-
|
167 |
-
self.print_options(opt)
|
168 |
-
self.opt = opt
|
169 |
-
return self.opt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Altinas/vits-uma-genshin-honkais/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
license: apache-2.0
|
3 |
-
title: ' vits-uma-genshin-honkai'
|
4 |
-
sdk: gradio
|
5 |
-
sdk_version: 3.7
|
6 |
-
emoji: 🐨
|
7 |
-
colorTo: yellow
|
8 |
-
pinned: false
|
9 |
-
app_file: app.py
|
10 |
-
duplicated_from: Altinas/vits-uma-genshin-honkai
|
11 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/utils/__init__.py
DELETED
File without changes
|
spaces/Amrrs/DragGan-Inversion/viz/renderer.py
DELETED
@@ -1,442 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
from socket import has_dualstack_ipv6
|
10 |
-
import sys
|
11 |
-
import copy
|
12 |
-
import traceback
|
13 |
-
import math
|
14 |
-
import numpy as np
|
15 |
-
from PIL import Image, ImageDraw, ImageFont
|
16 |
-
import torch
|
17 |
-
import torch.fft
|
18 |
-
import torch.nn as nn
|
19 |
-
import torch.nn.functional as F
|
20 |
-
import matplotlib.cm
|
21 |
-
import dnnlib
|
22 |
-
from torch_utils.ops import upfirdn2d
|
23 |
-
import legacy # pylint: disable=import-error
|
24 |
-
|
25 |
-
# ----------------------------------------------------------------------------
|
26 |
-
|
27 |
-
|
28 |
-
class CapturedException(Exception):
|
29 |
-
def __init__(self, msg=None):
|
30 |
-
if msg is None:
|
31 |
-
_type, value, _traceback = sys.exc_info()
|
32 |
-
assert value is not None
|
33 |
-
if isinstance(value, CapturedException):
|
34 |
-
msg = str(value)
|
35 |
-
else:
|
36 |
-
msg = traceback.format_exc()
|
37 |
-
assert isinstance(msg, str)
|
38 |
-
super().__init__(msg)
|
39 |
-
|
40 |
-
# ----------------------------------------------------------------------------
|
41 |
-
|
42 |
-
|
43 |
-
class CaptureSuccess(Exception):
|
44 |
-
def __init__(self, out):
|
45 |
-
super().__init__()
|
46 |
-
self.out = out
|
47 |
-
|
48 |
-
# ----------------------------------------------------------------------------
|
49 |
-
|
50 |
-
|
51 |
-
def add_watermark_np(input_image_array, watermark_text="AI Generated"):
|
52 |
-
image = Image.fromarray(np.uint8(input_image_array)).convert("RGBA")
|
53 |
-
|
54 |
-
# Initialize text image
|
55 |
-
txt = Image.new('RGBA', image.size, (255, 255, 255, 0))
|
56 |
-
font = ImageFont.truetype('arial.ttf', round(25/512*image.size[0]))
|
57 |
-
d = ImageDraw.Draw(txt)
|
58 |
-
|
59 |
-
text_width, text_height = font.getsize(watermark_text)
|
60 |
-
text_position = (image.size[0] - text_width -
|
61 |
-
10, image.size[1] - text_height - 10)
|
62 |
-
# white color with the alpha channel set to semi-transparent
|
63 |
-
text_color = (255, 255, 255, 128)
|
64 |
-
|
65 |
-
# Draw the text onto the text canvas
|
66 |
-
d.text(text_position, watermark_text, font=font, fill=text_color)
|
67 |
-
|
68 |
-
# Combine the image with the watermark
|
69 |
-
watermarked = Image.alpha_composite(image, txt)
|
70 |
-
watermarked_array = np.array(watermarked)
|
71 |
-
return watermarked_array
|
72 |
-
|
73 |
-
# ----------------------------------------------------------------------------
|
74 |
-
|
75 |
-
|
76 |
-
class Renderer:
|
77 |
-
def __init__(self, disable_timing=False):
|
78 |
-
self._device = torch.device('cuda' if torch.cuda.is_available(
|
79 |
-
) else 'mps' if torch.backends.mps.is_available() else 'cpu')
|
80 |
-
self._dtype = torch.float32 if self._device.type == 'mps' else torch.float64
|
81 |
-
self._pkl_data = dict() # {pkl: dict | CapturedException, ...}
|
82 |
-
self._networks = dict() # {cache_key: torch.nn.Module, ...}
|
83 |
-
self._pinned_bufs = dict() # {(shape, dtype): torch.Tensor, ...}
|
84 |
-
self._cmaps = dict() # {name: torch.Tensor, ...}
|
85 |
-
self._is_timing = False
|
86 |
-
if not disable_timing:
|
87 |
-
self._start_event = torch.cuda.Event(enable_timing=True)
|
88 |
-
self._end_event = torch.cuda.Event(enable_timing=True)
|
89 |
-
self._disable_timing = disable_timing
|
90 |
-
self._net_layers = dict() # {cache_key: [dnnlib.EasyDict, ...], ...}
|
91 |
-
|
92 |
-
def render(self, **args):
|
93 |
-
if self._disable_timing:
|
94 |
-
self._is_timing = False
|
95 |
-
else:
|
96 |
-
self._start_event.record(torch.cuda.current_stream(self._device))
|
97 |
-
self._is_timing = True
|
98 |
-
res = dnnlib.EasyDict()
|
99 |
-
try:
|
100 |
-
init_net = False
|
101 |
-
if not hasattr(self, 'G'):
|
102 |
-
init_net = True
|
103 |
-
if hasattr(self, 'pkl'):
|
104 |
-
if self.pkl != args['pkl']:
|
105 |
-
init_net = True
|
106 |
-
if hasattr(self, 'w_load'):
|
107 |
-
if self.w_load is not args['w_load']:
|
108 |
-
init_net = True
|
109 |
-
if hasattr(self, 'w0_seed'):
|
110 |
-
if self.w0_seed != args['w0_seed']:
|
111 |
-
init_net = True
|
112 |
-
if hasattr(self, 'w_plus'):
|
113 |
-
if self.w_plus != args['w_plus']:
|
114 |
-
init_net = True
|
115 |
-
if args['reset_w']:
|
116 |
-
init_net = True
|
117 |
-
res.init_net = init_net
|
118 |
-
if init_net:
|
119 |
-
self.init_network(res, **args)
|
120 |
-
self._render_drag_impl(res, **args)
|
121 |
-
except:
|
122 |
-
res.error = CapturedException()
|
123 |
-
if not self._disable_timing:
|
124 |
-
self._end_event.record(torch.cuda.current_stream(self._device))
|
125 |
-
if 'image' in res:
|
126 |
-
res.image = self.to_cpu(res.image).detach().numpy()
|
127 |
-
res.image = add_watermark_np(res.image, 'AI Generated')
|
128 |
-
if 'stats' in res:
|
129 |
-
res.stats = self.to_cpu(res.stats).detach().numpy()
|
130 |
-
if 'error' in res:
|
131 |
-
res.error = str(res.error)
|
132 |
-
# if 'stop' in res and res.stop:
|
133 |
-
|
134 |
-
if self._is_timing and not self._disable_timing:
|
135 |
-
self._end_event.synchronize()
|
136 |
-
res.render_time = self._start_event.elapsed_time(
|
137 |
-
self._end_event) * 1e-3
|
138 |
-
self._is_timing = False
|
139 |
-
return res
|
140 |
-
|
141 |
-
def get_network(self, pkl, key, **tweak_kwargs):
|
142 |
-
data = self._pkl_data.get(pkl, None)
|
143 |
-
if data is None:
|
144 |
-
print(f'Loading "{pkl}"... ', end='', flush=True)
|
145 |
-
try:
|
146 |
-
with dnnlib.util.open_url(pkl, verbose=False) as f:
|
147 |
-
data = legacy.load_network_pkl(f)
|
148 |
-
print('Done.')
|
149 |
-
except:
|
150 |
-
data = CapturedException()
|
151 |
-
print('Failed!')
|
152 |
-
self._pkl_data[pkl] = data
|
153 |
-
self._ignore_timing()
|
154 |
-
if isinstance(data, CapturedException):
|
155 |
-
raise data
|
156 |
-
|
157 |
-
orig_net = data[key]
|
158 |
-
cache_key = (orig_net, self._device, tuple(
|
159 |
-
sorted(tweak_kwargs.items())))
|
160 |
-
net = self._networks.get(cache_key, None)
|
161 |
-
if net is None:
|
162 |
-
try:
|
163 |
-
if 'stylegan2' in pkl:
|
164 |
-
from training.networks_stylegan2 import Generator
|
165 |
-
elif 'stylegan3' in pkl:
|
166 |
-
from training.networks_stylegan3 import Generator
|
167 |
-
elif 'stylegan_human' in pkl:
|
168 |
-
from stylegan_human.training_scripts.sg2.training.networks import Generator
|
169 |
-
else:
|
170 |
-
raise NameError('Cannot infer model type from pkl name!')
|
171 |
-
|
172 |
-
print(data[key].init_args)
|
173 |
-
print(data[key].init_kwargs)
|
174 |
-
if 'stylegan_human' in pkl:
|
175 |
-
net = Generator(
|
176 |
-
*data[key].init_args, **data[key].init_kwargs, square=False, padding=True)
|
177 |
-
else:
|
178 |
-
net = Generator(*data[key].init_args,
|
179 |
-
**data[key].init_kwargs)
|
180 |
-
net.load_state_dict(data[key].state_dict())
|
181 |
-
net.to(self._device)
|
182 |
-
except:
|
183 |
-
net = CapturedException()
|
184 |
-
self._networks[cache_key] = net
|
185 |
-
self._ignore_timing()
|
186 |
-
if isinstance(net, CapturedException):
|
187 |
-
raise net
|
188 |
-
return net
|
189 |
-
|
190 |
-
def _get_pinned_buf(self, ref):
|
191 |
-
key = (tuple(ref.shape), ref.dtype)
|
192 |
-
buf = self._pinned_bufs.get(key, None)
|
193 |
-
if buf is None:
|
194 |
-
buf = torch.empty(ref.shape, dtype=ref.dtype).pin_memory()
|
195 |
-
self._pinned_bufs[key] = buf
|
196 |
-
return buf
|
197 |
-
|
198 |
-
def to_device(self, buf):
|
199 |
-
return self._get_pinned_buf(buf).copy_(buf).to(self._device)
|
200 |
-
|
201 |
-
def to_cpu(self, buf):
|
202 |
-
return self._get_pinned_buf(buf).copy_(buf).clone()
|
203 |
-
|
204 |
-
def _ignore_timing(self):
|
205 |
-
self._is_timing = False
|
206 |
-
|
207 |
-
def _apply_cmap(self, x, name='viridis'):
|
208 |
-
cmap = self._cmaps.get(name, None)
|
209 |
-
if cmap is None:
|
210 |
-
cmap = matplotlib.cm.get_cmap(name)
|
211 |
-
cmap = cmap(np.linspace(0, 1, num=1024), bytes=True)[:, :3]
|
212 |
-
cmap = self.to_device(torch.from_numpy(cmap))
|
213 |
-
self._cmaps[name] = cmap
|
214 |
-
hi = cmap.shape[0] - 1
|
215 |
-
x = (x * hi + 0.5).clamp(0, hi).to(torch.int64)
|
216 |
-
x = torch.nn.functional.embedding(x, cmap)
|
217 |
-
return x
|
218 |
-
|
219 |
-
def init_network(self, res,
|
220 |
-
pkl=None,
|
221 |
-
w0_seed=0,
|
222 |
-
w_load=None,
|
223 |
-
w_plus=True,
|
224 |
-
noise_mode='const',
|
225 |
-
trunc_psi=0.7,
|
226 |
-
trunc_cutoff=None,
|
227 |
-
input_transform=None,
|
228 |
-
lr=0.001,
|
229 |
-
**kwargs
|
230 |
-
):
|
231 |
-
# Dig up network details.
|
232 |
-
self.pkl = pkl
|
233 |
-
G = self.get_network(pkl, 'G_ema')
|
234 |
-
self.G = G
|
235 |
-
res.img_resolution = G.img_resolution
|
236 |
-
res.num_ws = G.num_ws
|
237 |
-
res.has_noise = any('noise_const' in name for name,
|
238 |
-
_buf in G.synthesis.named_buffers())
|
239 |
-
res.has_input_transform = (
|
240 |
-
hasattr(G.synthesis, 'input') and hasattr(G.synthesis.input, 'transform'))
|
241 |
-
res.stop = False
|
242 |
-
self.lr = lr
|
243 |
-
# Set input transform.
|
244 |
-
if res.has_input_transform:
|
245 |
-
m = np.eye(3)
|
246 |
-
try:
|
247 |
-
if input_transform is not None:
|
248 |
-
m = np.linalg.inv(np.asarray(input_transform))
|
249 |
-
except np.linalg.LinAlgError:
|
250 |
-
res.error = CapturedException()
|
251 |
-
G.synthesis.input.transform.copy_(torch.from_numpy(m))
|
252 |
-
|
253 |
-
# Generate random latents.
|
254 |
-
self.w0_seed = w0_seed
|
255 |
-
self.w_load = w_load
|
256 |
-
|
257 |
-
if self.w_load is None:
|
258 |
-
# Generate random latents.
|
259 |
-
z = torch.from_numpy(np.random.RandomState(w0_seed).randn(
|
260 |
-
1, 512)).to(self._device, dtype=self._dtype)
|
261 |
-
|
262 |
-
# Run mapping network.
|
263 |
-
label = torch.zeros([1, G.c_dim], device=self._device)
|
264 |
-
w = G.mapping(z, label, truncation_psi=trunc_psi,
|
265 |
-
truncation_cutoff=trunc_cutoff)
|
266 |
-
else:
|
267 |
-
w = self.w_load.clone().to(self._device)
|
268 |
-
|
269 |
-
self.w0 = w.detach().clone()
|
270 |
-
self.w_plus = w_plus
|
271 |
-
if w_plus:
|
272 |
-
self.w = w.detach()
|
273 |
-
else:
|
274 |
-
self.w = w[:, 0, :].detach()
|
275 |
-
self.w.requires_grad = True
|
276 |
-
self.w_optim = torch.optim.Adam([self.w], lr=lr)
|
277 |
-
|
278 |
-
self.feat_refs = None
|
279 |
-
self.points0_pt = None
|
280 |
-
|
281 |
-
def set_latent(self, w, trunc_psi, trunc_cutoff):
|
282 |
-
# label = torch.zeros([1, self.G.c_dim], device=self._device)
|
283 |
-
# w = self.G.mapping(z, label, truncation_psi=trunc_psi, truncation_cutoff=trunc_cutoff)
|
284 |
-
self.w0 = w.detach().clone()
|
285 |
-
if self.w_plus:
|
286 |
-
self.w = w.detach()
|
287 |
-
else:
|
288 |
-
self.w = w[:, 0, :].detach()
|
289 |
-
self.w.requires_grad = True
|
290 |
-
self.w_optim = torch.optim.Adam([self.w], lr=self.lr)
|
291 |
-
|
292 |
-
self.feat_refs = None
|
293 |
-
self.points0_pt = None
|
294 |
-
|
295 |
-
def update_lr(self, lr):
|
296 |
-
|
297 |
-
del self.w_optim
|
298 |
-
self.w_optim = torch.optim.Adam([self.w], lr=lr)
|
299 |
-
print(f'Rebuild optimizer with lr: {lr}')
|
300 |
-
print(' Remain feat_refs and points0_pt')
|
301 |
-
|
302 |
-
def _render_drag_impl(self, res,
|
303 |
-
points=[],
|
304 |
-
targets=[],
|
305 |
-
mask=None,
|
306 |
-
lambda_mask=10,
|
307 |
-
reg=0,
|
308 |
-
feature_idx=5,
|
309 |
-
r1=3,
|
310 |
-
r2=12,
|
311 |
-
random_seed=0,
|
312 |
-
noise_mode='const',
|
313 |
-
trunc_psi=0.7,
|
314 |
-
force_fp32=False,
|
315 |
-
layer_name=None,
|
316 |
-
sel_channels=3,
|
317 |
-
base_channel=0,
|
318 |
-
img_scale_db=0,
|
319 |
-
img_normalize=False,
|
320 |
-
untransform=False,
|
321 |
-
is_drag=False,
|
322 |
-
reset=False,
|
323 |
-
to_pil=False,
|
324 |
-
**kwargs
|
325 |
-
):
|
326 |
-
try:
|
327 |
-
G = self.G
|
328 |
-
ws = self.w
|
329 |
-
if ws.dim() == 2:
|
330 |
-
ws = ws.unsqueeze(1).repeat(1, 6, 1)
|
331 |
-
ws = torch.cat([ws[:, :6, :], self.w0[:, 6:, :]], dim=1)
|
332 |
-
if hasattr(self, 'points'):
|
333 |
-
if len(points) != len(self.points):
|
334 |
-
reset = True
|
335 |
-
if reset:
|
336 |
-
self.feat_refs = None
|
337 |
-
self.points0_pt = None
|
338 |
-
self.points = points
|
339 |
-
|
340 |
-
# Run synthesis network.
|
341 |
-
label = torch.zeros([1, G.c_dim], device=self._device)
|
342 |
-
img, feat = G(ws, label, truncation_psi=trunc_psi,
|
343 |
-
noise_mode=noise_mode, input_is_w=True, return_feature=True)
|
344 |
-
|
345 |
-
h, w = G.img_resolution, G.img_resolution
|
346 |
-
|
347 |
-
if is_drag:
|
348 |
-
X = torch.linspace(0, h, h)
|
349 |
-
Y = torch.linspace(0, w, w)
|
350 |
-
xx, yy = torch.meshgrid(X, Y)
|
351 |
-
feat_resize = F.interpolate(
|
352 |
-
feat[feature_idx], [h, w], mode='bilinear')
|
353 |
-
if self.feat_refs is None:
|
354 |
-
self.feat0_resize = F.interpolate(
|
355 |
-
feat[feature_idx].detach(), [h, w], mode='bilinear')
|
356 |
-
self.feat_refs = []
|
357 |
-
for point in points:
|
358 |
-
py, px = round(point[0]), round(point[1])
|
359 |
-
self.feat_refs.append(self.feat0_resize[:, :, py, px])
|
360 |
-
self.points0_pt = torch.Tensor(points).unsqueeze(
|
361 |
-
0).to(self._device) # 1, N, 2
|
362 |
-
|
363 |
-
# Point tracking with feature matching
|
364 |
-
with torch.no_grad():
|
365 |
-
for j, point in enumerate(points):
|
366 |
-
r = round(r2 / 512 * h)
|
367 |
-
up = max(point[0] - r, 0)
|
368 |
-
down = min(point[0] + r + 1, h)
|
369 |
-
left = max(point[1] - r, 0)
|
370 |
-
right = min(point[1] + r + 1, w)
|
371 |
-
feat_patch = feat_resize[:, :, up:down, left:right]
|
372 |
-
L2 = torch.linalg.norm(
|
373 |
-
feat_patch - self.feat_refs[j].reshape(1, -1, 1, 1), dim=1)
|
374 |
-
_, idx = torch.min(L2.view(1, -1), -1)
|
375 |
-
width = right - left
|
376 |
-
point = [idx.item() // width + up, idx.item() %
|
377 |
-
width + left]
|
378 |
-
points[j] = point
|
379 |
-
|
380 |
-
res.points = [[point[0], point[1]] for point in points]
|
381 |
-
|
382 |
-
# Motion supervision
|
383 |
-
loss_motion = 0
|
384 |
-
res.stop = True
|
385 |
-
for j, point in enumerate(points):
|
386 |
-
direction = torch.Tensor(
|
387 |
-
[targets[j][1] - point[1], targets[j][0] - point[0]])
|
388 |
-
if torch.linalg.norm(direction) > max(2 / 512 * h, 2):
|
389 |
-
res.stop = False
|
390 |
-
if torch.linalg.norm(direction) > 1:
|
391 |
-
distance = (
|
392 |
-
(xx.to(self._device) - point[0])**2 + (yy.to(self._device) - point[1])**2)**0.5
|
393 |
-
relis, reljs = torch.where(
|
394 |
-
distance < round(r1 / 512 * h))
|
395 |
-
direction = direction / \
|
396 |
-
(torch.linalg.norm(direction) + 1e-7)
|
397 |
-
gridh = (relis-direction[1]) / (h-1) * 2 - 1
|
398 |
-
gridw = (reljs-direction[0]) / (w-1) * 2 - 1
|
399 |
-
grid = torch.stack(
|
400 |
-
[gridw, gridh], dim=-1).unsqueeze(0).unsqueeze(0)
|
401 |
-
target = F.grid_sample(
|
402 |
-
feat_resize.float(), grid, align_corners=True).squeeze(2)
|
403 |
-
loss_motion += F.l1_loss(
|
404 |
-
feat_resize[:, :, relis, reljs], target.detach())
|
405 |
-
|
406 |
-
loss = loss_motion
|
407 |
-
if mask is not None:
|
408 |
-
if mask.min() == 0 and mask.max() == 1:
|
409 |
-
mask_usq = mask.to(
|
410 |
-
self._device).unsqueeze(0).unsqueeze(0)
|
411 |
-
loss_fix = F.l1_loss(
|
412 |
-
feat_resize * mask_usq, self.feat0_resize * mask_usq)
|
413 |
-
loss += lambda_mask * loss_fix
|
414 |
-
|
415 |
-
# latent code regularization
|
416 |
-
loss += reg * F.l1_loss(ws, self.w0)
|
417 |
-
if not res.stop:
|
418 |
-
self.w_optim.zero_grad()
|
419 |
-
loss.backward()
|
420 |
-
self.w_optim.step()
|
421 |
-
|
422 |
-
# Scale and convert to uint8.
|
423 |
-
img = img[0]
|
424 |
-
if img_normalize:
|
425 |
-
img = img / img.norm(float('inf'),
|
426 |
-
dim=[1, 2], keepdim=True).clip(1e-8, 1e8)
|
427 |
-
img = img * (10 ** (img_scale_db / 20))
|
428 |
-
img = (img * 127.5 + 128).clamp(0,
|
429 |
-
255).to(torch.uint8).permute(1, 2, 0)
|
430 |
-
if to_pil:
|
431 |
-
from PIL import Image
|
432 |
-
img = img.cpu().numpy()
|
433 |
-
img = Image.fromarray(img)
|
434 |
-
res.image = img
|
435 |
-
res.w = ws.detach().cpu().numpy()
|
436 |
-
except Exception as e:
|
437 |
-
import os
|
438 |
-
print(f'Renderer error: {e}')
|
439 |
-
print("Out of memory error occurred. Restarting the app...")
|
440 |
-
os.execv(sys.executable, ['python'] + sys.argv)
|
441 |
-
|
442 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ananthap4/itineraryGenerator/app.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import openai
|
2 |
-
import gradio
|
3 |
-
openai.api_key="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"
|
4 |
-
|
5 |
-
|
6 |
-
messages = [{"role": "system", "content": "To explore best places around you are in the right place"}]
|
7 |
-
|
8 |
-
def CustomChatGPT(Place_to_hangout,Current_location,Time,Distance):
|
9 |
-
messages.append({"role": "user", "content": f"places to visit in {Place_to_hangout} from {Current_location} in {Time} within {Distance}"})
|
10 |
-
response = openai.ChatCompletion.create(
|
11 |
-
model = "gpt-3.5-turbo",
|
12 |
-
messages = messages
|
13 |
-
)
|
14 |
-
ChatGPT_reply = response["choices"][0]["message"]["content"]
|
15 |
-
messages.append({"role": "assistant", "content": ChatGPT_reply})
|
16 |
-
return ChatGPT_reply
|
17 |
-
|
18 |
-
demo = gradio.Interface(fn=CustomChatGPT, inputs =["text","text","text","text"], outputs = "text", title = "Itinerary generator")
|
19 |
-
|
20 |
-
demo.launch()
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/diffusion_pipeline.md
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Pipelines
|
14 |
-
|
15 |
-
The [`DiffusionPipeline`] is the quickest way to load any pretrained diffusion pipeline from the [Hub](https://huggingface.co/models?library=diffusers) for inference.
|
16 |
-
|
17 |
-
<Tip>
|
18 |
-
|
19 |
-
You shouldn't use the [`DiffusionPipeline`] class for training or finetuning a diffusion model. Individual
|
20 |
-
components (for example, [`UNet2DModel`] and [`UNet2DConditionModel`]) of diffusion pipelines are usually trained individually, so we suggest directly working with them instead.
|
21 |
-
|
22 |
-
</Tip>
|
23 |
-
|
24 |
-
The pipeline type (for example [`StableDiffusionPipeline`]) of any diffusion pipeline loaded with [`~DiffusionPipeline.from_pretrained`] is automatically
|
25 |
-
detected and pipeline components are loaded and passed to the `__init__` function of the pipeline.
|
26 |
-
|
27 |
-
Any pipeline object can be saved locally with [`~DiffusionPipeline.save_pretrained`].
|
28 |
-
|
29 |
-
## DiffusionPipeline
|
30 |
-
|
31 |
-
[[autodoc]] DiffusionPipeline
|
32 |
-
- all
|
33 |
-
- __call__
|
34 |
-
- device
|
35 |
-
- to
|
36 |
-
- components
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/schedules/schedule_2x.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
# optimizer
|
2 |
-
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
3 |
-
optimizer_config = dict(grad_clip=None)
|
4 |
-
# learning policy
|
5 |
-
lr_config = dict(
|
6 |
-
policy='step',
|
7 |
-
warmup='linear',
|
8 |
-
warmup_iters=500,
|
9 |
-
warmup_ratio=0.001,
|
10 |
-
step=[16, 22])
|
11 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/__init__.py
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
# flake8: noqa
|
2 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
3 |
-
from .config import Config, ConfigDict, DictAction
|
4 |
-
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
|
5 |
-
has_method, import_modules_from_strings, is_list_of,
|
6 |
-
is_method_overridden, is_seq_of, is_str, is_tuple_of,
|
7 |
-
iter_cast, list_cast, requires_executable, requires_package,
|
8 |
-
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
|
9 |
-
to_ntuple, tuple_cast)
|
10 |
-
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
|
11 |
-
scandir, symlink)
|
12 |
-
from .progressbar import (ProgressBar, track_iter_progress,
|
13 |
-
track_parallel_progress, track_progress)
|
14 |
-
from .testing import (assert_attrs_equal, assert_dict_contains_subset,
|
15 |
-
assert_dict_has_keys, assert_is_norm_layer,
|
16 |
-
assert_keys_equal, assert_params_all_zeros,
|
17 |
-
check_python_script)
|
18 |
-
from .timer import Timer, TimerError, check_time
|
19 |
-
from .version_utils import digit_version, get_git_hash
|
20 |
-
|
21 |
-
try:
|
22 |
-
import torch
|
23 |
-
except ImportError:
|
24 |
-
__all__ = [
|
25 |
-
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
|
26 |
-
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
|
27 |
-
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
|
28 |
-
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
|
29 |
-
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
|
30 |
-
'track_progress', 'track_iter_progress', 'track_parallel_progress',
|
31 |
-
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
|
32 |
-
'digit_version', 'get_git_hash', 'import_modules_from_strings',
|
33 |
-
'assert_dict_contains_subset', 'assert_attrs_equal',
|
34 |
-
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
|
35 |
-
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
|
36 |
-
'is_method_overridden', 'has_method'
|
37 |
-
]
|
38 |
-
else:
|
39 |
-
from .env import collect_env
|
40 |
-
from .logging import get_logger, print_log
|
41 |
-
from .parrots_jit import jit, skip_no_elena
|
42 |
-
from .parrots_wrapper import (
|
43 |
-
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
|
44 |
-
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
|
45 |
-
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
|
46 |
-
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
|
47 |
-
from .registry import Registry, build_from_cfg
|
48 |
-
from .trace import is_jit_tracing
|
49 |
-
__all__ = [
|
50 |
-
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
|
51 |
-
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
|
52 |
-
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
|
53 |
-
'check_prerequisites', 'requires_package', 'requires_executable',
|
54 |
-
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
|
55 |
-
'symlink', 'scandir', 'ProgressBar', 'track_progress',
|
56 |
-
'track_iter_progress', 'track_parallel_progress', 'Registry',
|
57 |
-
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
|
58 |
-
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
|
59 |
-
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
|
60 |
-
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
|
61 |
-
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
|
62 |
-
'deprecated_api_warning', 'digit_version', 'get_git_hash',
|
63 |
-
'import_modules_from_strings', 'jit', 'skip_no_elena',
|
64 |
-
'assert_dict_contains_subset', 'assert_attrs_equal',
|
65 |
-
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
|
66 |
-
'assert_params_all_zeros', 'check_python_script',
|
67 |
-
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
|
68 |
-
'_get_cuda_home', 'has_method'
|
69 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/tutorial_train_sd21.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
from share import *
|
2 |
-
|
3 |
-
import pytorch_lightning as pl
|
4 |
-
from torch.utils.data import DataLoader
|
5 |
-
from tutorial_dataset import MyDataset
|
6 |
-
from cldm.logger import ImageLogger
|
7 |
-
from cldm.model import create_model, load_state_dict
|
8 |
-
|
9 |
-
|
10 |
-
# Configs
|
11 |
-
resume_path = './models/control_sd21_ini.ckpt'
|
12 |
-
batch_size = 4
|
13 |
-
logger_freq = 300
|
14 |
-
learning_rate = 1e-5
|
15 |
-
sd_locked = True
|
16 |
-
only_mid_control = False
|
17 |
-
|
18 |
-
|
19 |
-
# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.
|
20 |
-
model = create_model('./models/cldm_v21.yaml').cpu()
|
21 |
-
model.load_state_dict(load_state_dict(resume_path, location='cpu'))
|
22 |
-
model.learning_rate = learning_rate
|
23 |
-
model.sd_locked = sd_locked
|
24 |
-
model.only_mid_control = only_mid_control
|
25 |
-
|
26 |
-
|
27 |
-
# Misc
|
28 |
-
dataset = MyDataset()
|
29 |
-
dataloader = DataLoader(dataset, num_workers=0, batch_size=batch_size, shuffle=True)
|
30 |
-
logger = ImageLogger(batch_frequency=logger_freq)
|
31 |
-
trainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger])
|
32 |
-
|
33 |
-
|
34 |
-
# Train!
|
35 |
-
trainer.fit(model, dataloader)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/ROOPOK/roop/capturer.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
import cv2
|
3 |
-
|
4 |
-
from roop.typing import Frame
|
5 |
-
|
6 |
-
|
7 |
-
def get_video_frame(video_path: str, frame_number: int = 0) -> Optional[Frame]:
|
8 |
-
capture = cv2.VideoCapture(video_path)
|
9 |
-
frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
|
10 |
-
capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
|
11 |
-
has_frame, frame = capture.read()
|
12 |
-
capture.release()
|
13 |
-
if has_frame:
|
14 |
-
return frame
|
15 |
-
return None
|
16 |
-
|
17 |
-
|
18 |
-
def get_video_frame_total(video_path: str) -> int:
|
19 |
-
capture = cv2.VideoCapture(video_path)
|
20 |
-
video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
|
21 |
-
capture.release()
|
22 |
-
return video_frame_total
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/__init__.py
DELETED
File without changes
|
spaces/Arnx/MusicGenXvAKN/audiocraft/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
# flake8: noqa
|
8 |
-
from . import data, modules, models
|
9 |
-
|
10 |
-
__version__ = '0.0.2a2'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/audiocraft/modules/rope.py
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import typing as tp
|
8 |
-
|
9 |
-
from torch import nn
|
10 |
-
import torch
|
11 |
-
|
12 |
-
|
13 |
-
class XPos(nn.Module):
|
14 |
-
"""Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1).
|
15 |
-
This applies an exponential decay to the RoPE rotation matrix.
|
16 |
-
|
17 |
-
Args:
|
18 |
-
dim (int): Embedding dimension.
|
19 |
-
smoothing (float): Smoothing factor applied to the decay rates.
|
20 |
-
base_scale (int): Base decay rate, given in terms of scaling time.
|
21 |
-
device (torch.device or None): Device on which to initialize the module.
|
22 |
-
dtype (torch.dtype): dtype to use to generate the embedding.
|
23 |
-
"""
|
24 |
-
def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512,
|
25 |
-
device=None, dtype: torch.dtype = torch.float32):
|
26 |
-
super().__init__()
|
27 |
-
assert dim % 2 == 0
|
28 |
-
assert dtype in [torch.float64, torch.float32]
|
29 |
-
self.dtype = dtype
|
30 |
-
self.base_scale = base_scale
|
31 |
-
|
32 |
-
half_dim = dim // 2
|
33 |
-
adim = torch.arange(half_dim, device=device, dtype=dtype)
|
34 |
-
decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing)
|
35 |
-
self.register_buffer("decay_rates", decay_rates)
|
36 |
-
self.decay: tp.Optional[torch.Tensor] = None
|
37 |
-
|
38 |
-
def get_decay(self, start: int, end: int):
|
39 |
-
"""Create complex decay tensor, cache values for fast computation.
|
40 |
-
"""
|
41 |
-
if self.decay is None or end > self.decay.shape[0]:
|
42 |
-
assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker.
|
43 |
-
idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype)
|
44 |
-
power = idx / self.base_scale
|
45 |
-
scale = self.decay_rates ** power.unsqueeze(-1)
|
46 |
-
self.decay = torch.polar(scale, torch.zeros_like(scale))
|
47 |
-
return self.decay[start:end] # [T, C/2]
|
48 |
-
|
49 |
-
|
50 |
-
class RotaryEmbedding(nn.Module):
|
51 |
-
"""Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864).
|
52 |
-
|
53 |
-
Args:
|
54 |
-
dim (int): Embedding dimension (twice the number of frequencies).
|
55 |
-
max_period (float): Maximum period of the rotation frequencies.
|
56 |
-
xpos (bool): Use xPos, applies an exponential decay to rotation matrix.
|
57 |
-
scale (float): Scale of positional embedding, set to 0 to deactivate.
|
58 |
-
device (torch.device or None): Device on which to initialize the module.
|
59 |
-
dtype (torch.dtype): dtype to use to generate the embedding.
|
60 |
-
"""
|
61 |
-
def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False,
|
62 |
-
scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32):
|
63 |
-
super().__init__()
|
64 |
-
assert dim % 2 == 0
|
65 |
-
self.scale = scale
|
66 |
-
assert dtype in [torch.float64, torch.float32]
|
67 |
-
self.dtype = dtype
|
68 |
-
|
69 |
-
adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)]
|
70 |
-
frequencies = 1.0 / (max_period ** (adim / dim))
|
71 |
-
self.register_buffer("frequencies", frequencies)
|
72 |
-
self.rotation: tp.Optional[torch.Tensor] = None
|
73 |
-
|
74 |
-
self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None
|
75 |
-
|
76 |
-
def get_rotation(self, start: int, end: int):
|
77 |
-
"""Create complex rotation tensor, cache values for fast computation.
|
78 |
-
"""
|
79 |
-
if self.rotation is None or end > self.rotation.shape[0]:
|
80 |
-
assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker.
|
81 |
-
idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype)
|
82 |
-
angles = torch.outer(idx, self.frequencies)
|
83 |
-
self.rotation = torch.polar(torch.ones_like(angles), angles)
|
84 |
-
return self.rotation[start:end]
|
85 |
-
|
86 |
-
def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False):
|
87 |
-
"""Apply rope rotation to query or key tensor.
|
88 |
-
"""
|
89 |
-
T = x.shape[1]
|
90 |
-
rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2)
|
91 |
-
|
92 |
-
if self.xpos:
|
93 |
-
decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2)
|
94 |
-
else:
|
95 |
-
decay = 1.0
|
96 |
-
|
97 |
-
if invert_decay:
|
98 |
-
decay = decay ** -1
|
99 |
-
|
100 |
-
x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2))
|
101 |
-
scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale)
|
102 |
-
x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2)
|
103 |
-
|
104 |
-
return x_out.type_as(x)
|
105 |
-
|
106 |
-
def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0):
|
107 |
-
""" Apply rope rotation to both query and key tensors.
|
108 |
-
Supports streaming mode, in which query and key are not expected to have the same shape.
|
109 |
-
In streaming mode, key will be of legnth [P + C] with P the cached past timesteps, but
|
110 |
-
query will be [C] (typically C == 1).
|
111 |
-
|
112 |
-
Args:
|
113 |
-
query (torch.Tensor): Query to rotate.
|
114 |
-
key (torch.Tensor): Key to rotate.
|
115 |
-
start (int): Start index of the sequence for time offset.
|
116 |
-
"""
|
117 |
-
query_timesteps = query.shape[1]
|
118 |
-
key_timesteps = key.shape[1]
|
119 |
-
streaming_offset = key_timesteps - query_timesteps
|
120 |
-
|
121 |
-
query_out = self.rotate(query, start + streaming_offset)
|
122 |
-
key_out = self.rotate(key, start, invert_decay=True)
|
123 |
-
|
124 |
-
return query_out, key_out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/easy_install.py
DELETED
@@ -1,2312 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Easy Install
|
3 |
-
------------
|
4 |
-
|
5 |
-
A tool for doing automatic download/extract/build of distutils-based Python
|
6 |
-
packages. For detailed documentation, see the accompanying EasyInstall.txt
|
7 |
-
file, or visit the `EasyInstall home page`__.
|
8 |
-
|
9 |
-
__ https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
|
10 |
-
|
11 |
-
"""
|
12 |
-
|
13 |
-
from glob import glob
|
14 |
-
from distutils.util import get_platform
|
15 |
-
from distutils.util import convert_path, subst_vars
|
16 |
-
from distutils.errors import (
|
17 |
-
DistutilsArgError, DistutilsOptionError,
|
18 |
-
DistutilsError, DistutilsPlatformError,
|
19 |
-
)
|
20 |
-
from distutils import log, dir_util
|
21 |
-
from distutils.command.build_scripts import first_line_re
|
22 |
-
from distutils.spawn import find_executable
|
23 |
-
from distutils.command import install
|
24 |
-
import sys
|
25 |
-
import os
|
26 |
-
import zipimport
|
27 |
-
import shutil
|
28 |
-
import tempfile
|
29 |
-
import zipfile
|
30 |
-
import re
|
31 |
-
import stat
|
32 |
-
import random
|
33 |
-
import textwrap
|
34 |
-
import warnings
|
35 |
-
import site
|
36 |
-
import struct
|
37 |
-
import contextlib
|
38 |
-
import subprocess
|
39 |
-
import shlex
|
40 |
-
import io
|
41 |
-
import configparser
|
42 |
-
import sysconfig
|
43 |
-
|
44 |
-
|
45 |
-
from sysconfig import get_path
|
46 |
-
|
47 |
-
from setuptools import SetuptoolsDeprecationWarning
|
48 |
-
|
49 |
-
from setuptools import Command
|
50 |
-
from setuptools.sandbox import run_setup
|
51 |
-
from setuptools.command import setopt
|
52 |
-
from setuptools.archive_util import unpack_archive
|
53 |
-
from setuptools.package_index import (
|
54 |
-
PackageIndex, parse_requirement_arg, URL_SCHEME,
|
55 |
-
)
|
56 |
-
from setuptools.command import bdist_egg, egg_info
|
57 |
-
from setuptools.wheel import Wheel
|
58 |
-
from pkg_resources import (
|
59 |
-
normalize_path, resource_string,
|
60 |
-
get_distribution, find_distributions, Environment, Requirement,
|
61 |
-
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
|
62 |
-
VersionConflict, DEVELOP_DIST,
|
63 |
-
)
|
64 |
-
import pkg_resources
|
65 |
-
from .._path import ensure_directory
|
66 |
-
from ..extern.jaraco.text import yield_lines
|
67 |
-
|
68 |
-
|
69 |
-
# Turn on PEP440Warnings
|
70 |
-
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
|
71 |
-
|
72 |
-
__all__ = [
|
73 |
-
'easy_install', 'PthDistributions', 'extract_wininst_cfg',
|
74 |
-
'get_exe_prefixes',
|
75 |
-
]
|
76 |
-
|
77 |
-
|
78 |
-
def is_64bit():
|
79 |
-
return struct.calcsize("P") == 8
|
80 |
-
|
81 |
-
|
82 |
-
def _to_bytes(s):
|
83 |
-
return s.encode('utf8')
|
84 |
-
|
85 |
-
|
86 |
-
def isascii(s):
|
87 |
-
try:
|
88 |
-
s.encode('ascii')
|
89 |
-
return True
|
90 |
-
except UnicodeError:
|
91 |
-
return False
|
92 |
-
|
93 |
-
|
94 |
-
def _one_liner(text):
|
95 |
-
return textwrap.dedent(text).strip().replace('\n', '; ')
|
96 |
-
|
97 |
-
|
98 |
-
class easy_install(Command):
|
99 |
-
"""Manage a download/build/install process"""
|
100 |
-
description = "Find/get/install Python packages"
|
101 |
-
command_consumes_arguments = True
|
102 |
-
|
103 |
-
user_options = [
|
104 |
-
('prefix=', None, "installation prefix"),
|
105 |
-
("zip-ok", "z", "install package as a zipfile"),
|
106 |
-
("multi-version", "m", "make apps have to require() a version"),
|
107 |
-
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
|
108 |
-
("install-dir=", "d", "install package to DIR"),
|
109 |
-
("script-dir=", "s", "install scripts to DIR"),
|
110 |
-
("exclude-scripts", "x", "Don't install scripts"),
|
111 |
-
("always-copy", "a", "Copy all needed packages to install dir"),
|
112 |
-
("index-url=", "i", "base URL of Python Package Index"),
|
113 |
-
("find-links=", "f", "additional URL(s) to search for packages"),
|
114 |
-
("build-directory=", "b",
|
115 |
-
"download/extract/build in DIR; keep the results"),
|
116 |
-
('optimize=', 'O',
|
117 |
-
"also compile with optimization: -O1 for \"python -O\", "
|
118 |
-
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
|
119 |
-
('record=', None,
|
120 |
-
"filename in which to record list of installed files"),
|
121 |
-
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
|
122 |
-
('site-dirs=', 'S', "list of directories where .pth files work"),
|
123 |
-
('editable', 'e', "Install specified packages in editable form"),
|
124 |
-
('no-deps', 'N', "don't install dependencies"),
|
125 |
-
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
|
126 |
-
('local-snapshots-ok', 'l',
|
127 |
-
"allow building eggs from local checkouts"),
|
128 |
-
('version', None, "print version information and exit"),
|
129 |
-
('no-find-links', None,
|
130 |
-
"Don't load find-links defined in packages being installed"),
|
131 |
-
('user', None, "install in user site-package '%s'" % site.USER_SITE)
|
132 |
-
]
|
133 |
-
boolean_options = [
|
134 |
-
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
|
135 |
-
'editable',
|
136 |
-
'no-deps', 'local-snapshots-ok', 'version',
|
137 |
-
'user'
|
138 |
-
]
|
139 |
-
|
140 |
-
negative_opt = {'always-unzip': 'zip-ok'}
|
141 |
-
create_index = PackageIndex
|
142 |
-
|
143 |
-
def initialize_options(self):
|
144 |
-
warnings.warn(
|
145 |
-
"easy_install command is deprecated. "
|
146 |
-
"Use build and pip and other standards-based tools.",
|
147 |
-
EasyInstallDeprecationWarning,
|
148 |
-
)
|
149 |
-
|
150 |
-
# the --user option seems to be an opt-in one,
|
151 |
-
# so the default should be False.
|
152 |
-
self.user = 0
|
153 |
-
self.zip_ok = self.local_snapshots_ok = None
|
154 |
-
self.install_dir = self.script_dir = self.exclude_scripts = None
|
155 |
-
self.index_url = None
|
156 |
-
self.find_links = None
|
157 |
-
self.build_directory = None
|
158 |
-
self.args = None
|
159 |
-
self.optimize = self.record = None
|
160 |
-
self.upgrade = self.always_copy = self.multi_version = None
|
161 |
-
self.editable = self.no_deps = self.allow_hosts = None
|
162 |
-
self.root = self.prefix = self.no_report = None
|
163 |
-
self.version = None
|
164 |
-
self.install_purelib = None # for pure module distributions
|
165 |
-
self.install_platlib = None # non-pure (dists w/ extensions)
|
166 |
-
self.install_headers = None # for C/C++ headers
|
167 |
-
self.install_lib = None # set to either purelib or platlib
|
168 |
-
self.install_scripts = None
|
169 |
-
self.install_data = None
|
170 |
-
self.install_base = None
|
171 |
-
self.install_platbase = None
|
172 |
-
self.install_userbase = site.USER_BASE
|
173 |
-
self.install_usersite = site.USER_SITE
|
174 |
-
self.no_find_links = None
|
175 |
-
|
176 |
-
# Options not specifiable via command line
|
177 |
-
self.package_index = None
|
178 |
-
self.pth_file = self.always_copy_from = None
|
179 |
-
self.site_dirs = None
|
180 |
-
self.installed_projects = {}
|
181 |
-
# Always read easy_install options, even if we are subclassed, or have
|
182 |
-
# an independent instance created. This ensures that defaults will
|
183 |
-
# always come from the standard configuration file(s)' "easy_install"
|
184 |
-
# section, even if this is a "develop" or "install" command, or some
|
185 |
-
# other embedding.
|
186 |
-
self._dry_run = None
|
187 |
-
self.verbose = self.distribution.verbose
|
188 |
-
self.distribution._set_command_options(
|
189 |
-
self, self.distribution.get_option_dict('easy_install')
|
190 |
-
)
|
191 |
-
|
192 |
-
def delete_blockers(self, blockers):
|
193 |
-
extant_blockers = (
|
194 |
-
filename for filename in blockers
|
195 |
-
if os.path.exists(filename) or os.path.islink(filename)
|
196 |
-
)
|
197 |
-
list(map(self._delete_path, extant_blockers))
|
198 |
-
|
199 |
-
def _delete_path(self, path):
|
200 |
-
log.info("Deleting %s", path)
|
201 |
-
if self.dry_run:
|
202 |
-
return
|
203 |
-
|
204 |
-
is_tree = os.path.isdir(path) and not os.path.islink(path)
|
205 |
-
remover = rmtree if is_tree else os.unlink
|
206 |
-
remover(path)
|
207 |
-
|
208 |
-
@staticmethod
|
209 |
-
def _render_version():
|
210 |
-
"""
|
211 |
-
Render the Setuptools version and installation details, then exit.
|
212 |
-
"""
|
213 |
-
ver = '{}.{}'.format(*sys.version_info)
|
214 |
-
dist = get_distribution('setuptools')
|
215 |
-
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
|
216 |
-
print(tmpl.format(**locals()))
|
217 |
-
raise SystemExit()
|
218 |
-
|
219 |
-
def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME
|
220 |
-
self.version and self._render_version()
|
221 |
-
|
222 |
-
py_version = sys.version.split()[0]
|
223 |
-
|
224 |
-
self.config_vars = dict(sysconfig.get_config_vars())
|
225 |
-
|
226 |
-
self.config_vars.update({
|
227 |
-
'dist_name': self.distribution.get_name(),
|
228 |
-
'dist_version': self.distribution.get_version(),
|
229 |
-
'dist_fullname': self.distribution.get_fullname(),
|
230 |
-
'py_version': py_version,
|
231 |
-
'py_version_short': f'{sys.version_info.major}.{sys.version_info.minor}',
|
232 |
-
'py_version_nodot': f'{sys.version_info.major}{sys.version_info.minor}',
|
233 |
-
'sys_prefix': self.config_vars['prefix'],
|
234 |
-
'sys_exec_prefix': self.config_vars['exec_prefix'],
|
235 |
-
# Only python 3.2+ has abiflags
|
236 |
-
'abiflags': getattr(sys, 'abiflags', ''),
|
237 |
-
'platlibdir': getattr(sys, 'platlibdir', 'lib'),
|
238 |
-
})
|
239 |
-
with contextlib.suppress(AttributeError):
|
240 |
-
# only for distutils outside stdlib
|
241 |
-
self.config_vars.update({
|
242 |
-
'implementation_lower': install._get_implementation().lower(),
|
243 |
-
'implementation': install._get_implementation(),
|
244 |
-
})
|
245 |
-
|
246 |
-
# pypa/distutils#113 Python 3.9 compat
|
247 |
-
self.config_vars.setdefault(
|
248 |
-
'py_version_nodot_plat',
|
249 |
-
getattr(sys, 'windir', '').replace('.', ''),
|
250 |
-
)
|
251 |
-
|
252 |
-
self.config_vars['userbase'] = self.install_userbase
|
253 |
-
self.config_vars['usersite'] = self.install_usersite
|
254 |
-
if self.user and not site.ENABLE_USER_SITE:
|
255 |
-
log.warn("WARNING: The user site-packages directory is disabled.")
|
256 |
-
|
257 |
-
self._fix_install_dir_for_user_site()
|
258 |
-
|
259 |
-
self.expand_basedirs()
|
260 |
-
self.expand_dirs()
|
261 |
-
|
262 |
-
self._expand(
|
263 |
-
'install_dir', 'script_dir', 'build_directory',
|
264 |
-
'site_dirs',
|
265 |
-
)
|
266 |
-
# If a non-default installation directory was specified, default the
|
267 |
-
# script directory to match it.
|
268 |
-
if self.script_dir is None:
|
269 |
-
self.script_dir = self.install_dir
|
270 |
-
|
271 |
-
if self.no_find_links is None:
|
272 |
-
self.no_find_links = False
|
273 |
-
|
274 |
-
# Let install_dir get set by install_lib command, which in turn
|
275 |
-
# gets its info from the install command, and takes into account
|
276 |
-
# --prefix and --home and all that other crud.
|
277 |
-
self.set_undefined_options(
|
278 |
-
'install_lib', ('install_dir', 'install_dir')
|
279 |
-
)
|
280 |
-
# Likewise, set default script_dir from 'install_scripts.install_dir'
|
281 |
-
self.set_undefined_options(
|
282 |
-
'install_scripts', ('install_dir', 'script_dir')
|
283 |
-
)
|
284 |
-
|
285 |
-
if self.user and self.install_purelib:
|
286 |
-
self.install_dir = self.install_purelib
|
287 |
-
self.script_dir = self.install_scripts
|
288 |
-
# default --record from the install command
|
289 |
-
self.set_undefined_options('install', ('record', 'record'))
|
290 |
-
self.all_site_dirs = get_site_dirs()
|
291 |
-
self.all_site_dirs.extend(self._process_site_dirs(self.site_dirs))
|
292 |
-
|
293 |
-
if not self.editable:
|
294 |
-
self.check_site_dir()
|
295 |
-
default_index = os.getenv("__EASYINSTALL_INDEX", "https://pypi.org/simple/")
|
296 |
-
# ^ Private API for testing purposes only
|
297 |
-
self.index_url = self.index_url or default_index
|
298 |
-
self.shadow_path = self.all_site_dirs[:]
|
299 |
-
for path_item in self.install_dir, normalize_path(self.script_dir):
|
300 |
-
if path_item not in self.shadow_path:
|
301 |
-
self.shadow_path.insert(0, path_item)
|
302 |
-
|
303 |
-
if self.allow_hosts is not None:
|
304 |
-
hosts = [s.strip() for s in self.allow_hosts.split(',')]
|
305 |
-
else:
|
306 |
-
hosts = ['*']
|
307 |
-
if self.package_index is None:
|
308 |
-
self.package_index = self.create_index(
|
309 |
-
self.index_url, search_path=self.shadow_path, hosts=hosts,
|
310 |
-
)
|
311 |
-
self.local_index = Environment(self.shadow_path + sys.path)
|
312 |
-
|
313 |
-
if self.find_links is not None:
|
314 |
-
if isinstance(self.find_links, str):
|
315 |
-
self.find_links = self.find_links.split()
|
316 |
-
else:
|
317 |
-
self.find_links = []
|
318 |
-
if self.local_snapshots_ok:
|
319 |
-
self.package_index.scan_egg_links(self.shadow_path + sys.path)
|
320 |
-
if not self.no_find_links:
|
321 |
-
self.package_index.add_find_links(self.find_links)
|
322 |
-
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
|
323 |
-
self.optimize = self._validate_optimize(self.optimize)
|
324 |
-
|
325 |
-
if self.editable and not self.build_directory:
|
326 |
-
raise DistutilsArgError(
|
327 |
-
"Must specify a build directory (-b) when using --editable"
|
328 |
-
)
|
329 |
-
if not self.args:
|
330 |
-
raise DistutilsArgError(
|
331 |
-
"No urls, filenames, or requirements specified (see --help)")
|
332 |
-
|
333 |
-
self.outputs = []
|
334 |
-
|
335 |
-
@staticmethod
|
336 |
-
def _process_site_dirs(site_dirs):
|
337 |
-
if site_dirs is None:
|
338 |
-
return
|
339 |
-
|
340 |
-
normpath = map(normalize_path, sys.path)
|
341 |
-
site_dirs = [
|
342 |
-
os.path.expanduser(s.strip()) for s in
|
343 |
-
site_dirs.split(',')
|
344 |
-
]
|
345 |
-
for d in site_dirs:
|
346 |
-
if not os.path.isdir(d):
|
347 |
-
log.warn("%s (in --site-dirs) does not exist", d)
|
348 |
-
elif normalize_path(d) not in normpath:
|
349 |
-
raise DistutilsOptionError(
|
350 |
-
d + " (in --site-dirs) is not on sys.path"
|
351 |
-
)
|
352 |
-
else:
|
353 |
-
yield normalize_path(d)
|
354 |
-
|
355 |
-
@staticmethod
|
356 |
-
def _validate_optimize(value):
|
357 |
-
try:
|
358 |
-
value = int(value)
|
359 |
-
if value not in range(3):
|
360 |
-
raise ValueError
|
361 |
-
except ValueError as e:
|
362 |
-
raise DistutilsOptionError(
|
363 |
-
"--optimize must be 0, 1, or 2"
|
364 |
-
) from e
|
365 |
-
|
366 |
-
return value
|
367 |
-
|
368 |
-
def _fix_install_dir_for_user_site(self):
|
369 |
-
"""
|
370 |
-
Fix the install_dir if "--user" was used.
|
371 |
-
"""
|
372 |
-
if not self.user:
|
373 |
-
return
|
374 |
-
|
375 |
-
self.create_home_path()
|
376 |
-
if self.install_userbase is None:
|
377 |
-
msg = "User base directory is not specified"
|
378 |
-
raise DistutilsPlatformError(msg)
|
379 |
-
self.install_base = self.install_platbase = self.install_userbase
|
380 |
-
scheme_name = f'{os.name}_user'
|
381 |
-
self.select_scheme(scheme_name)
|
382 |
-
|
383 |
-
def _expand_attrs(self, attrs):
|
384 |
-
for attr in attrs:
|
385 |
-
val = getattr(self, attr)
|
386 |
-
if val is not None:
|
387 |
-
if os.name == 'posix' or os.name == 'nt':
|
388 |
-
val = os.path.expanduser(val)
|
389 |
-
val = subst_vars(val, self.config_vars)
|
390 |
-
setattr(self, attr, val)
|
391 |
-
|
392 |
-
def expand_basedirs(self):
|
393 |
-
"""Calls `os.path.expanduser` on install_base, install_platbase and
|
394 |
-
root."""
|
395 |
-
self._expand_attrs(['install_base', 'install_platbase', 'root'])
|
396 |
-
|
397 |
-
def expand_dirs(self):
|
398 |
-
"""Calls `os.path.expanduser` on install dirs."""
|
399 |
-
dirs = [
|
400 |
-
'install_purelib',
|
401 |
-
'install_platlib',
|
402 |
-
'install_lib',
|
403 |
-
'install_headers',
|
404 |
-
'install_scripts',
|
405 |
-
'install_data',
|
406 |
-
]
|
407 |
-
self._expand_attrs(dirs)
|
408 |
-
|
409 |
-
def run(self, show_deprecation=True):
|
410 |
-
if show_deprecation:
|
411 |
-
self.announce(
|
412 |
-
"WARNING: The easy_install command is deprecated "
|
413 |
-
"and will be removed in a future version.",
|
414 |
-
log.WARN,
|
415 |
-
)
|
416 |
-
if self.verbose != self.distribution.verbose:
|
417 |
-
log.set_verbosity(self.verbose)
|
418 |
-
try:
|
419 |
-
for spec in self.args:
|
420 |
-
self.easy_install(spec, not self.no_deps)
|
421 |
-
if self.record:
|
422 |
-
outputs = self.outputs
|
423 |
-
if self.root: # strip any package prefix
|
424 |
-
root_len = len(self.root)
|
425 |
-
for counter in range(len(outputs)):
|
426 |
-
outputs[counter] = outputs[counter][root_len:]
|
427 |
-
from distutils import file_util
|
428 |
-
|
429 |
-
self.execute(
|
430 |
-
file_util.write_file, (self.record, outputs),
|
431 |
-
"writing list of installed files to '%s'" %
|
432 |
-
self.record
|
433 |
-
)
|
434 |
-
self.warn_deprecated_options()
|
435 |
-
finally:
|
436 |
-
log.set_verbosity(self.distribution.verbose)
|
437 |
-
|
438 |
-
def pseudo_tempname(self):
|
439 |
-
"""Return a pseudo-tempname base in the install directory.
|
440 |
-
This code is intentionally naive; if a malicious party can write to
|
441 |
-
the target directory you're already in deep doodoo.
|
442 |
-
"""
|
443 |
-
try:
|
444 |
-
pid = os.getpid()
|
445 |
-
except Exception:
|
446 |
-
pid = random.randint(0, sys.maxsize)
|
447 |
-
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
|
448 |
-
|
449 |
-
def warn_deprecated_options(self):
|
450 |
-
pass
|
451 |
-
|
452 |
-
def check_site_dir(self): # noqa: C901 # is too complex (12) # FIXME
|
453 |
-
"""Verify that self.install_dir is .pth-capable dir, if needed"""
|
454 |
-
|
455 |
-
instdir = normalize_path(self.install_dir)
|
456 |
-
pth_file = os.path.join(instdir, 'easy-install.pth')
|
457 |
-
|
458 |
-
if not os.path.exists(instdir):
|
459 |
-
try:
|
460 |
-
os.makedirs(instdir)
|
461 |
-
except (OSError, IOError):
|
462 |
-
self.cant_write_to_target()
|
463 |
-
|
464 |
-
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
|
465 |
-
is_site_dir = instdir in self.all_site_dirs
|
466 |
-
|
467 |
-
if not is_site_dir and not self.multi_version:
|
468 |
-
# No? Then directly test whether it does .pth file processing
|
469 |
-
is_site_dir = self.check_pth_processing()
|
470 |
-
else:
|
471 |
-
# make sure we can write to target dir
|
472 |
-
testfile = self.pseudo_tempname() + '.write-test'
|
473 |
-
test_exists = os.path.exists(testfile)
|
474 |
-
try:
|
475 |
-
if test_exists:
|
476 |
-
os.unlink(testfile)
|
477 |
-
open(testfile, 'w').close()
|
478 |
-
os.unlink(testfile)
|
479 |
-
except (OSError, IOError):
|
480 |
-
self.cant_write_to_target()
|
481 |
-
|
482 |
-
if not is_site_dir and not self.multi_version:
|
483 |
-
# Can't install non-multi to non-site dir with easy_install
|
484 |
-
pythonpath = os.environ.get('PYTHONPATH', '')
|
485 |
-
log.warn(self.__no_default_msg, self.install_dir, pythonpath)
|
486 |
-
|
487 |
-
if is_site_dir:
|
488 |
-
if self.pth_file is None:
|
489 |
-
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
|
490 |
-
else:
|
491 |
-
self.pth_file = None
|
492 |
-
|
493 |
-
if self.multi_version and not os.path.exists(pth_file):
|
494 |
-
self.pth_file = None # don't create a .pth file
|
495 |
-
self.install_dir = instdir
|
496 |
-
|
497 |
-
__cant_write_msg = textwrap.dedent("""
|
498 |
-
can't create or remove files in install directory
|
499 |
-
|
500 |
-
The following error occurred while trying to add or remove files in the
|
501 |
-
installation directory:
|
502 |
-
|
503 |
-
%s
|
504 |
-
|
505 |
-
The installation directory you specified (via --install-dir, --prefix, or
|
506 |
-
the distutils default setting) was:
|
507 |
-
|
508 |
-
%s
|
509 |
-
""").lstrip() # noqa
|
510 |
-
|
511 |
-
__not_exists_id = textwrap.dedent("""
|
512 |
-
This directory does not currently exist. Please create it and try again, or
|
513 |
-
choose a different installation directory (using the -d or --install-dir
|
514 |
-
option).
|
515 |
-
""").lstrip() # noqa
|
516 |
-
|
517 |
-
__access_msg = textwrap.dedent("""
|
518 |
-
Perhaps your account does not have write access to this directory? If the
|
519 |
-
installation directory is a system-owned directory, you may need to sign in
|
520 |
-
as the administrator or "root" account. If you do not have administrative
|
521 |
-
access to this machine, you may wish to choose a different installation
|
522 |
-
directory, preferably one that is listed in your PYTHONPATH environment
|
523 |
-
variable.
|
524 |
-
|
525 |
-
For information on other options, you may wish to consult the
|
526 |
-
documentation at:
|
527 |
-
|
528 |
-
https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
|
529 |
-
|
530 |
-
Please make the appropriate changes for your system and try again.
|
531 |
-
""").lstrip() # noqa
|
532 |
-
|
533 |
-
def cant_write_to_target(self):
|
534 |
-
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
|
535 |
-
|
536 |
-
if not os.path.exists(self.install_dir):
|
537 |
-
msg += '\n' + self.__not_exists_id
|
538 |
-
else:
|
539 |
-
msg += '\n' + self.__access_msg
|
540 |
-
raise DistutilsError(msg)
|
541 |
-
|
542 |
-
def check_pth_processing(self):
|
543 |
-
"""Empirically verify whether .pth files are supported in inst. dir"""
|
544 |
-
instdir = self.install_dir
|
545 |
-
log.info("Checking .pth file support in %s", instdir)
|
546 |
-
pth_file = self.pseudo_tempname() + ".pth"
|
547 |
-
ok_file = pth_file + '.ok'
|
548 |
-
ok_exists = os.path.exists(ok_file)
|
549 |
-
tmpl = _one_liner("""
|
550 |
-
import os
|
551 |
-
f = open({ok_file!r}, 'w')
|
552 |
-
f.write('OK')
|
553 |
-
f.close()
|
554 |
-
""") + '\n'
|
555 |
-
try:
|
556 |
-
if ok_exists:
|
557 |
-
os.unlink(ok_file)
|
558 |
-
dirname = os.path.dirname(ok_file)
|
559 |
-
os.makedirs(dirname, exist_ok=True)
|
560 |
-
f = open(pth_file, 'w')
|
561 |
-
except (OSError, IOError):
|
562 |
-
self.cant_write_to_target()
|
563 |
-
else:
|
564 |
-
try:
|
565 |
-
f.write(tmpl.format(**locals()))
|
566 |
-
f.close()
|
567 |
-
f = None
|
568 |
-
executable = sys.executable
|
569 |
-
if os.name == 'nt':
|
570 |
-
dirname, basename = os.path.split(executable)
|
571 |
-
alt = os.path.join(dirname, 'pythonw.exe')
|
572 |
-
use_alt = (
|
573 |
-
basename.lower() == 'python.exe' and
|
574 |
-
os.path.exists(alt)
|
575 |
-
)
|
576 |
-
if use_alt:
|
577 |
-
# use pythonw.exe to avoid opening a console window
|
578 |
-
executable = alt
|
579 |
-
|
580 |
-
from distutils.spawn import spawn
|
581 |
-
|
582 |
-
spawn([executable, '-E', '-c', 'pass'], 0)
|
583 |
-
|
584 |
-
if os.path.exists(ok_file):
|
585 |
-
log.info(
|
586 |
-
"TEST PASSED: %s appears to support .pth files",
|
587 |
-
instdir
|
588 |
-
)
|
589 |
-
return True
|
590 |
-
finally:
|
591 |
-
if f:
|
592 |
-
f.close()
|
593 |
-
if os.path.exists(ok_file):
|
594 |
-
os.unlink(ok_file)
|
595 |
-
if os.path.exists(pth_file):
|
596 |
-
os.unlink(pth_file)
|
597 |
-
if not self.multi_version:
|
598 |
-
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
|
599 |
-
return False
|
600 |
-
|
601 |
-
def install_egg_scripts(self, dist):
|
602 |
-
"""Write all the scripts for `dist`, unless scripts are excluded"""
|
603 |
-
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
|
604 |
-
for script_name in dist.metadata_listdir('scripts'):
|
605 |
-
if dist.metadata_isdir('scripts/' + script_name):
|
606 |
-
# The "script" is a directory, likely a Python 3
|
607 |
-
# __pycache__ directory, so skip it.
|
608 |
-
continue
|
609 |
-
self.install_script(
|
610 |
-
dist, script_name,
|
611 |
-
dist.get_metadata('scripts/' + script_name)
|
612 |
-
)
|
613 |
-
self.install_wrapper_scripts(dist)
|
614 |
-
|
615 |
-
def add_output(self, path):
|
616 |
-
if os.path.isdir(path):
|
617 |
-
for base, dirs, files in os.walk(path):
|
618 |
-
for filename in files:
|
619 |
-
self.outputs.append(os.path.join(base, filename))
|
620 |
-
else:
|
621 |
-
self.outputs.append(path)
|
622 |
-
|
623 |
-
def not_editable(self, spec):
|
624 |
-
if self.editable:
|
625 |
-
raise DistutilsArgError(
|
626 |
-
"Invalid argument %r: you can't use filenames or URLs "
|
627 |
-
"with --editable (except via the --find-links option)."
|
628 |
-
% (spec,)
|
629 |
-
)
|
630 |
-
|
631 |
-
def check_editable(self, spec):
|
632 |
-
if not self.editable:
|
633 |
-
return
|
634 |
-
|
635 |
-
if os.path.exists(os.path.join(self.build_directory, spec.key)):
|
636 |
-
raise DistutilsArgError(
|
637 |
-
"%r already exists in %s; can't do a checkout there" %
|
638 |
-
(spec.key, self.build_directory)
|
639 |
-
)
|
640 |
-
|
641 |
-
@contextlib.contextmanager
|
642 |
-
def _tmpdir(self):
|
643 |
-
tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
|
644 |
-
try:
|
645 |
-
# cast to str as workaround for #709 and #710 and #712
|
646 |
-
yield str(tmpdir)
|
647 |
-
finally:
|
648 |
-
os.path.exists(tmpdir) and rmtree(tmpdir)
|
649 |
-
|
650 |
-
def easy_install(self, spec, deps=False):
|
651 |
-
with self._tmpdir() as tmpdir:
|
652 |
-
if not isinstance(spec, Requirement):
|
653 |
-
if URL_SCHEME(spec):
|
654 |
-
# It's a url, download it to tmpdir and process
|
655 |
-
self.not_editable(spec)
|
656 |
-
dl = self.package_index.download(spec, tmpdir)
|
657 |
-
return self.install_item(None, dl, tmpdir, deps, True)
|
658 |
-
|
659 |
-
elif os.path.exists(spec):
|
660 |
-
# Existing file or directory, just process it directly
|
661 |
-
self.not_editable(spec)
|
662 |
-
return self.install_item(None, spec, tmpdir, deps, True)
|
663 |
-
else:
|
664 |
-
spec = parse_requirement_arg(spec)
|
665 |
-
|
666 |
-
self.check_editable(spec)
|
667 |
-
dist = self.package_index.fetch_distribution(
|
668 |
-
spec, tmpdir, self.upgrade, self.editable,
|
669 |
-
not self.always_copy, self.local_index
|
670 |
-
)
|
671 |
-
if dist is None:
|
672 |
-
msg = "Could not find suitable distribution for %r" % spec
|
673 |
-
if self.always_copy:
|
674 |
-
msg += " (--always-copy skips system and development eggs)"
|
675 |
-
raise DistutilsError(msg)
|
676 |
-
elif dist.precedence == DEVELOP_DIST:
|
677 |
-
# .egg-info dists don't need installing, just process deps
|
678 |
-
self.process_distribution(spec, dist, deps, "Using")
|
679 |
-
return dist
|
680 |
-
else:
|
681 |
-
return self.install_item(spec, dist.location, tmpdir, deps)
|
682 |
-
|
683 |
-
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
|
684 |
-
|
685 |
-
# Installation is also needed if file in tmpdir or is not an egg
|
686 |
-
install_needed = install_needed or self.always_copy
|
687 |
-
install_needed = install_needed or os.path.dirname(download) == tmpdir
|
688 |
-
install_needed = install_needed or not download.endswith('.egg')
|
689 |
-
install_needed = install_needed or (
|
690 |
-
self.always_copy_from is not None and
|
691 |
-
os.path.dirname(normalize_path(download)) ==
|
692 |
-
normalize_path(self.always_copy_from)
|
693 |
-
)
|
694 |
-
|
695 |
-
if spec and not install_needed:
|
696 |
-
# at this point, we know it's a local .egg, we just don't know if
|
697 |
-
# it's already installed.
|
698 |
-
for dist in self.local_index[spec.project_name]:
|
699 |
-
if dist.location == download:
|
700 |
-
break
|
701 |
-
else:
|
702 |
-
install_needed = True # it's not in the local index
|
703 |
-
|
704 |
-
log.info("Processing %s", os.path.basename(download))
|
705 |
-
|
706 |
-
if install_needed:
|
707 |
-
dists = self.install_eggs(spec, download, tmpdir)
|
708 |
-
for dist in dists:
|
709 |
-
self.process_distribution(spec, dist, deps)
|
710 |
-
else:
|
711 |
-
dists = [self.egg_distribution(download)]
|
712 |
-
self.process_distribution(spec, dists[0], deps, "Using")
|
713 |
-
|
714 |
-
if spec is not None:
|
715 |
-
for dist in dists:
|
716 |
-
if dist in spec:
|
717 |
-
return dist
|
718 |
-
|
719 |
-
def select_scheme(self, name):
|
720 |
-
try:
|
721 |
-
install._select_scheme(self, name)
|
722 |
-
except AttributeError:
|
723 |
-
# stdlib distutils
|
724 |
-
install.install.select_scheme(self, name.replace('posix', 'unix'))
|
725 |
-
|
726 |
-
# FIXME: 'easy_install.process_distribution' is too complex (12)
|
727 |
-
def process_distribution( # noqa: C901
|
728 |
-
self, requirement, dist, deps=True, *info,
|
729 |
-
):
|
730 |
-
self.update_pth(dist)
|
731 |
-
self.package_index.add(dist)
|
732 |
-
if dist in self.local_index[dist.key]:
|
733 |
-
self.local_index.remove(dist)
|
734 |
-
self.local_index.add(dist)
|
735 |
-
self.install_egg_scripts(dist)
|
736 |
-
self.installed_projects[dist.key] = dist
|
737 |
-
log.info(self.installation_report(requirement, dist, *info))
|
738 |
-
if (dist.has_metadata('dependency_links.txt') and
|
739 |
-
not self.no_find_links):
|
740 |
-
self.package_index.add_find_links(
|
741 |
-
dist.get_metadata_lines('dependency_links.txt')
|
742 |
-
)
|
743 |
-
if not deps and not self.always_copy:
|
744 |
-
return
|
745 |
-
elif requirement is not None and dist.key != requirement.key:
|
746 |
-
log.warn("Skipping dependencies for %s", dist)
|
747 |
-
return # XXX this is not the distribution we were looking for
|
748 |
-
elif requirement is None or dist not in requirement:
|
749 |
-
# if we wound up with a different version, resolve what we've got
|
750 |
-
distreq = dist.as_requirement()
|
751 |
-
requirement = Requirement(str(distreq))
|
752 |
-
log.info("Processing dependencies for %s", requirement)
|
753 |
-
try:
|
754 |
-
distros = WorkingSet([]).resolve(
|
755 |
-
[requirement], self.local_index, self.easy_install
|
756 |
-
)
|
757 |
-
except DistributionNotFound as e:
|
758 |
-
raise DistutilsError(str(e)) from e
|
759 |
-
except VersionConflict as e:
|
760 |
-
raise DistutilsError(e.report()) from e
|
761 |
-
if self.always_copy or self.always_copy_from:
|
762 |
-
# Force all the relevant distros to be copied or activated
|
763 |
-
for dist in distros:
|
764 |
-
if dist.key not in self.installed_projects:
|
765 |
-
self.easy_install(dist.as_requirement())
|
766 |
-
log.info("Finished processing dependencies for %s", requirement)
|
767 |
-
|
768 |
-
def should_unzip(self, dist):
|
769 |
-
if self.zip_ok is not None:
|
770 |
-
return not self.zip_ok
|
771 |
-
if dist.has_metadata('not-zip-safe'):
|
772 |
-
return True
|
773 |
-
if not dist.has_metadata('zip-safe'):
|
774 |
-
return True
|
775 |
-
return False
|
776 |
-
|
777 |
-
def maybe_move(self, spec, dist_filename, setup_base):
|
778 |
-
dst = os.path.join(self.build_directory, spec.key)
|
779 |
-
if os.path.exists(dst):
|
780 |
-
msg = (
|
781 |
-
"%r already exists in %s; build directory %s will not be kept"
|
782 |
-
)
|
783 |
-
log.warn(msg, spec.key, self.build_directory, setup_base)
|
784 |
-
return setup_base
|
785 |
-
if os.path.isdir(dist_filename):
|
786 |
-
setup_base = dist_filename
|
787 |
-
else:
|
788 |
-
if os.path.dirname(dist_filename) == setup_base:
|
789 |
-
os.unlink(dist_filename) # get it out of the tmp dir
|
790 |
-
contents = os.listdir(setup_base)
|
791 |
-
if len(contents) == 1:
|
792 |
-
dist_filename = os.path.join(setup_base, contents[0])
|
793 |
-
if os.path.isdir(dist_filename):
|
794 |
-
# if the only thing there is a directory, move it instead
|
795 |
-
setup_base = dist_filename
|
796 |
-
ensure_directory(dst)
|
797 |
-
shutil.move(setup_base, dst)
|
798 |
-
return dst
|
799 |
-
|
800 |
-
def install_wrapper_scripts(self, dist):
|
801 |
-
if self.exclude_scripts:
|
802 |
-
return
|
803 |
-
for args in ScriptWriter.best().get_args(dist):
|
804 |
-
self.write_script(*args)
|
805 |
-
|
806 |
-
def install_script(self, dist, script_name, script_text, dev_path=None):
|
807 |
-
"""Generate a legacy script wrapper and install it"""
|
808 |
-
spec = str(dist.as_requirement())
|
809 |
-
is_script = is_python_script(script_text, script_name)
|
810 |
-
|
811 |
-
if is_script:
|
812 |
-
body = self._load_template(dev_path) % locals()
|
813 |
-
script_text = ScriptWriter.get_header(script_text) + body
|
814 |
-
self.write_script(script_name, _to_bytes(script_text), 'b')
|
815 |
-
|
816 |
-
@staticmethod
|
817 |
-
def _load_template(dev_path):
|
818 |
-
"""
|
819 |
-
There are a couple of template scripts in the package. This
|
820 |
-
function loads one of them and prepares it for use.
|
821 |
-
"""
|
822 |
-
# See https://github.com/pypa/setuptools/issues/134 for info
|
823 |
-
# on script file naming and downstream issues with SVR4
|
824 |
-
name = 'script.tmpl'
|
825 |
-
if dev_path:
|
826 |
-
name = name.replace('.tmpl', ' (dev).tmpl')
|
827 |
-
|
828 |
-
raw_bytes = resource_string('setuptools', name)
|
829 |
-
return raw_bytes.decode('utf-8')
|
830 |
-
|
831 |
-
def write_script(self, script_name, contents, mode="t", blockers=()):
|
832 |
-
"""Write an executable file to the scripts directory"""
|
833 |
-
self.delete_blockers( # clean up old .py/.pyw w/o a script
|
834 |
-
[os.path.join(self.script_dir, x) for x in blockers]
|
835 |
-
)
|
836 |
-
log.info("Installing %s script to %s", script_name, self.script_dir)
|
837 |
-
target = os.path.join(self.script_dir, script_name)
|
838 |
-
self.add_output(target)
|
839 |
-
|
840 |
-
if self.dry_run:
|
841 |
-
return
|
842 |
-
|
843 |
-
mask = current_umask()
|
844 |
-
ensure_directory(target)
|
845 |
-
if os.path.exists(target):
|
846 |
-
os.unlink(target)
|
847 |
-
with open(target, "w" + mode) as f:
|
848 |
-
f.write(contents)
|
849 |
-
chmod(target, 0o777 - mask)
|
850 |
-
|
851 |
-
def install_eggs(self, spec, dist_filename, tmpdir):
|
852 |
-
# .egg dirs or files are already built, so just return them
|
853 |
-
installer_map = {
|
854 |
-
'.egg': self.install_egg,
|
855 |
-
'.exe': self.install_exe,
|
856 |
-
'.whl': self.install_wheel,
|
857 |
-
}
|
858 |
-
try:
|
859 |
-
install_dist = installer_map[
|
860 |
-
dist_filename.lower()[-4:]
|
861 |
-
]
|
862 |
-
except KeyError:
|
863 |
-
pass
|
864 |
-
else:
|
865 |
-
return [install_dist(dist_filename, tmpdir)]
|
866 |
-
|
867 |
-
# Anything else, try to extract and build
|
868 |
-
setup_base = tmpdir
|
869 |
-
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
|
870 |
-
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
|
871 |
-
elif os.path.isdir(dist_filename):
|
872 |
-
setup_base = os.path.abspath(dist_filename)
|
873 |
-
|
874 |
-
if (setup_base.startswith(tmpdir) # something we downloaded
|
875 |
-
and self.build_directory and spec is not None):
|
876 |
-
setup_base = self.maybe_move(spec, dist_filename, setup_base)
|
877 |
-
|
878 |
-
# Find the setup.py file
|
879 |
-
setup_script = os.path.join(setup_base, 'setup.py')
|
880 |
-
|
881 |
-
if not os.path.exists(setup_script):
|
882 |
-
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
|
883 |
-
if not setups:
|
884 |
-
raise DistutilsError(
|
885 |
-
"Couldn't find a setup script in %s" %
|
886 |
-
os.path.abspath(dist_filename)
|
887 |
-
)
|
888 |
-
if len(setups) > 1:
|
889 |
-
raise DistutilsError(
|
890 |
-
"Multiple setup scripts in %s" %
|
891 |
-
os.path.abspath(dist_filename)
|
892 |
-
)
|
893 |
-
setup_script = setups[0]
|
894 |
-
|
895 |
-
# Now run it, and return the result
|
896 |
-
if self.editable:
|
897 |
-
log.info(self.report_editable(spec, setup_script))
|
898 |
-
return []
|
899 |
-
else:
|
900 |
-
return self.build_and_install(setup_script, setup_base)
|
901 |
-
|
902 |
-
def egg_distribution(self, egg_path):
|
903 |
-
if os.path.isdir(egg_path):
|
904 |
-
metadata = PathMetadata(egg_path, os.path.join(egg_path,
|
905 |
-
'EGG-INFO'))
|
906 |
-
else:
|
907 |
-
metadata = EggMetadata(zipimport.zipimporter(egg_path))
|
908 |
-
return Distribution.from_filename(egg_path, metadata=metadata)
|
909 |
-
|
910 |
-
# FIXME: 'easy_install.install_egg' is too complex (11)
|
911 |
-
def install_egg(self, egg_path, tmpdir): # noqa: C901
|
912 |
-
destination = os.path.join(
|
913 |
-
self.install_dir,
|
914 |
-
os.path.basename(egg_path),
|
915 |
-
)
|
916 |
-
destination = os.path.abspath(destination)
|
917 |
-
if not self.dry_run:
|
918 |
-
ensure_directory(destination)
|
919 |
-
|
920 |
-
dist = self.egg_distribution(egg_path)
|
921 |
-
if not (
|
922 |
-
os.path.exists(destination) and os.path.samefile(egg_path, destination)
|
923 |
-
):
|
924 |
-
if os.path.isdir(destination) and not os.path.islink(destination):
|
925 |
-
dir_util.remove_tree(destination, dry_run=self.dry_run)
|
926 |
-
elif os.path.exists(destination):
|
927 |
-
self.execute(
|
928 |
-
os.unlink,
|
929 |
-
(destination,),
|
930 |
-
"Removing " + destination,
|
931 |
-
)
|
932 |
-
try:
|
933 |
-
new_dist_is_zipped = False
|
934 |
-
if os.path.isdir(egg_path):
|
935 |
-
if egg_path.startswith(tmpdir):
|
936 |
-
f, m = shutil.move, "Moving"
|
937 |
-
else:
|
938 |
-
f, m = shutil.copytree, "Copying"
|
939 |
-
elif self.should_unzip(dist):
|
940 |
-
self.mkpath(destination)
|
941 |
-
f, m = self.unpack_and_compile, "Extracting"
|
942 |
-
else:
|
943 |
-
new_dist_is_zipped = True
|
944 |
-
if egg_path.startswith(tmpdir):
|
945 |
-
f, m = shutil.move, "Moving"
|
946 |
-
else:
|
947 |
-
f, m = shutil.copy2, "Copying"
|
948 |
-
self.execute(
|
949 |
-
f,
|
950 |
-
(egg_path, destination),
|
951 |
-
(m + " %s to %s") % (
|
952 |
-
os.path.basename(egg_path),
|
953 |
-
os.path.dirname(destination)
|
954 |
-
),
|
955 |
-
)
|
956 |
-
update_dist_caches(
|
957 |
-
destination,
|
958 |
-
fix_zipimporter_caches=new_dist_is_zipped,
|
959 |
-
)
|
960 |
-
except Exception:
|
961 |
-
update_dist_caches(destination, fix_zipimporter_caches=False)
|
962 |
-
raise
|
963 |
-
|
964 |
-
self.add_output(destination)
|
965 |
-
return self.egg_distribution(destination)
|
966 |
-
|
967 |
-
def install_exe(self, dist_filename, tmpdir):
|
968 |
-
# See if it's valid, get data
|
969 |
-
cfg = extract_wininst_cfg(dist_filename)
|
970 |
-
if cfg is None:
|
971 |
-
raise DistutilsError(
|
972 |
-
"%s is not a valid distutils Windows .exe" % dist_filename
|
973 |
-
)
|
974 |
-
# Create a dummy distribution object until we build the real distro
|
975 |
-
dist = Distribution(
|
976 |
-
None,
|
977 |
-
project_name=cfg.get('metadata', 'name'),
|
978 |
-
version=cfg.get('metadata', 'version'), platform=get_platform(),
|
979 |
-
)
|
980 |
-
|
981 |
-
# Convert the .exe to an unpacked egg
|
982 |
-
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
|
983 |
-
dist.location = egg_path
|
984 |
-
egg_tmp = egg_path + '.tmp'
|
985 |
-
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
|
986 |
-
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
|
987 |
-
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
|
988 |
-
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
|
989 |
-
self.exe_to_egg(dist_filename, egg_tmp)
|
990 |
-
|
991 |
-
# Write EGG-INFO/PKG-INFO
|
992 |
-
if not os.path.exists(pkg_inf):
|
993 |
-
f = open(pkg_inf, 'w')
|
994 |
-
f.write('Metadata-Version: 1.0\n')
|
995 |
-
for k, v in cfg.items('metadata'):
|
996 |
-
if k != 'target_version':
|
997 |
-
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
|
998 |
-
f.close()
|
999 |
-
script_dir = os.path.join(_egg_info, 'scripts')
|
1000 |
-
# delete entry-point scripts to avoid duping
|
1001 |
-
self.delete_blockers([
|
1002 |
-
os.path.join(script_dir, args[0])
|
1003 |
-
for args in ScriptWriter.get_args(dist)
|
1004 |
-
])
|
1005 |
-
# Build .egg file from tmpdir
|
1006 |
-
bdist_egg.make_zipfile(
|
1007 |
-
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
|
1008 |
-
)
|
1009 |
-
# install the .egg
|
1010 |
-
return self.install_egg(egg_path, tmpdir)
|
1011 |
-
|
1012 |
-
# FIXME: 'easy_install.exe_to_egg' is too complex (12)
|
1013 |
-
def exe_to_egg(self, dist_filename, egg_tmp): # noqa: C901
|
1014 |
-
"""Extract a bdist_wininst to the directories an egg would use"""
|
1015 |
-
# Check for .pth file and set up prefix translations
|
1016 |
-
prefixes = get_exe_prefixes(dist_filename)
|
1017 |
-
to_compile = []
|
1018 |
-
native_libs = []
|
1019 |
-
top_level = {}
|
1020 |
-
|
1021 |
-
def process(src, dst):
|
1022 |
-
s = src.lower()
|
1023 |
-
for old, new in prefixes:
|
1024 |
-
if s.startswith(old):
|
1025 |
-
src = new + src[len(old):]
|
1026 |
-
parts = src.split('/')
|
1027 |
-
dst = os.path.join(egg_tmp, *parts)
|
1028 |
-
dl = dst.lower()
|
1029 |
-
if dl.endswith('.pyd') or dl.endswith('.dll'):
|
1030 |
-
parts[-1] = bdist_egg.strip_module(parts[-1])
|
1031 |
-
top_level[os.path.splitext(parts[0])[0]] = 1
|
1032 |
-
native_libs.append(src)
|
1033 |
-
elif dl.endswith('.py') and old != 'SCRIPTS/':
|
1034 |
-
top_level[os.path.splitext(parts[0])[0]] = 1
|
1035 |
-
to_compile.append(dst)
|
1036 |
-
return dst
|
1037 |
-
if not src.endswith('.pth'):
|
1038 |
-
log.warn("WARNING: can't process %s", src)
|
1039 |
-
return None
|
1040 |
-
|
1041 |
-
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
|
1042 |
-
unpack_archive(dist_filename, egg_tmp, process)
|
1043 |
-
stubs = []
|
1044 |
-
for res in native_libs:
|
1045 |
-
if res.lower().endswith('.pyd'): # create stubs for .pyd's
|
1046 |
-
parts = res.split('/')
|
1047 |
-
resource = parts[-1]
|
1048 |
-
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
|
1049 |
-
pyfile = os.path.join(egg_tmp, *parts)
|
1050 |
-
to_compile.append(pyfile)
|
1051 |
-
stubs.append(pyfile)
|
1052 |
-
bdist_egg.write_stub(resource, pyfile)
|
1053 |
-
self.byte_compile(to_compile) # compile .py's
|
1054 |
-
bdist_egg.write_safety_flag(
|
1055 |
-
os.path.join(egg_tmp, 'EGG-INFO'),
|
1056 |
-
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
|
1057 |
-
|
1058 |
-
for name in 'top_level', 'native_libs':
|
1059 |
-
if locals()[name]:
|
1060 |
-
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
|
1061 |
-
if not os.path.exists(txt):
|
1062 |
-
f = open(txt, 'w')
|
1063 |
-
f.write('\n'.join(locals()[name]) + '\n')
|
1064 |
-
f.close()
|
1065 |
-
|
1066 |
-
def install_wheel(self, wheel_path, tmpdir):
|
1067 |
-
wheel = Wheel(wheel_path)
|
1068 |
-
assert wheel.is_compatible()
|
1069 |
-
destination = os.path.join(self.install_dir, wheel.egg_name())
|
1070 |
-
destination = os.path.abspath(destination)
|
1071 |
-
if not self.dry_run:
|
1072 |
-
ensure_directory(destination)
|
1073 |
-
if os.path.isdir(destination) and not os.path.islink(destination):
|
1074 |
-
dir_util.remove_tree(destination, dry_run=self.dry_run)
|
1075 |
-
elif os.path.exists(destination):
|
1076 |
-
self.execute(
|
1077 |
-
os.unlink,
|
1078 |
-
(destination,),
|
1079 |
-
"Removing " + destination,
|
1080 |
-
)
|
1081 |
-
try:
|
1082 |
-
self.execute(
|
1083 |
-
wheel.install_as_egg,
|
1084 |
-
(destination,),
|
1085 |
-
("Installing %s to %s") % (
|
1086 |
-
os.path.basename(wheel_path),
|
1087 |
-
os.path.dirname(destination)
|
1088 |
-
),
|
1089 |
-
)
|
1090 |
-
finally:
|
1091 |
-
update_dist_caches(destination, fix_zipimporter_caches=False)
|
1092 |
-
self.add_output(destination)
|
1093 |
-
return self.egg_distribution(destination)
|
1094 |
-
|
1095 |
-
__mv_warning = textwrap.dedent("""
|
1096 |
-
Because this distribution was installed --multi-version, before you can
|
1097 |
-
import modules from this package in an application, you will need to
|
1098 |
-
'import pkg_resources' and then use a 'require()' call similar to one of
|
1099 |
-
these examples, in order to select the desired version:
|
1100 |
-
|
1101 |
-
pkg_resources.require("%(name)s") # latest installed version
|
1102 |
-
pkg_resources.require("%(name)s==%(version)s") # this exact version
|
1103 |
-
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
|
1104 |
-
""").lstrip() # noqa
|
1105 |
-
|
1106 |
-
__id_warning = textwrap.dedent("""
|
1107 |
-
Note also that the installation directory must be on sys.path at runtime for
|
1108 |
-
this to work. (e.g. by being the application's script directory, by being on
|
1109 |
-
PYTHONPATH, or by being added to sys.path by your code.)
|
1110 |
-
""") # noqa
|
1111 |
-
|
1112 |
-
def installation_report(self, req, dist, what="Installed"):
|
1113 |
-
"""Helpful installation message for display to package users"""
|
1114 |
-
msg = "\n%(what)s %(eggloc)s%(extras)s"
|
1115 |
-
if self.multi_version and not self.no_report:
|
1116 |
-
msg += '\n' + self.__mv_warning
|
1117 |
-
if self.install_dir not in map(normalize_path, sys.path):
|
1118 |
-
msg += '\n' + self.__id_warning
|
1119 |
-
|
1120 |
-
eggloc = dist.location
|
1121 |
-
name = dist.project_name
|
1122 |
-
version = dist.version
|
1123 |
-
extras = '' # TODO: self.report_extras(req, dist)
|
1124 |
-
return msg % locals()
|
1125 |
-
|
1126 |
-
__editable_msg = textwrap.dedent("""
|
1127 |
-
Extracted editable version of %(spec)s to %(dirname)s
|
1128 |
-
|
1129 |
-
If it uses setuptools in its setup script, you can activate it in
|
1130 |
-
"development" mode by going to that directory and running::
|
1131 |
-
|
1132 |
-
%(python)s setup.py develop
|
1133 |
-
|
1134 |
-
See the setuptools documentation for the "develop" command for more info.
|
1135 |
-
""").lstrip() # noqa
|
1136 |
-
|
1137 |
-
def report_editable(self, spec, setup_script):
|
1138 |
-
dirname = os.path.dirname(setup_script)
|
1139 |
-
python = sys.executable
|
1140 |
-
return '\n' + self.__editable_msg % locals()
|
1141 |
-
|
1142 |
-
def run_setup(self, setup_script, setup_base, args):
|
1143 |
-
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
|
1144 |
-
sys.modules.setdefault('distutils.command.egg_info', egg_info)
|
1145 |
-
|
1146 |
-
args = list(args)
|
1147 |
-
if self.verbose > 2:
|
1148 |
-
v = 'v' * (self.verbose - 1)
|
1149 |
-
args.insert(0, '-' + v)
|
1150 |
-
elif self.verbose < 2:
|
1151 |
-
args.insert(0, '-q')
|
1152 |
-
if self.dry_run:
|
1153 |
-
args.insert(0, '-n')
|
1154 |
-
log.info(
|
1155 |
-
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
|
1156 |
-
)
|
1157 |
-
try:
|
1158 |
-
run_setup(setup_script, args)
|
1159 |
-
except SystemExit as v:
|
1160 |
-
raise DistutilsError(
|
1161 |
-
"Setup script exited with %s" % (v.args[0],)
|
1162 |
-
) from v
|
1163 |
-
|
1164 |
-
def build_and_install(self, setup_script, setup_base):
|
1165 |
-
args = ['bdist_egg', '--dist-dir']
|
1166 |
-
|
1167 |
-
dist_dir = tempfile.mkdtemp(
|
1168 |
-
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
|
1169 |
-
)
|
1170 |
-
try:
|
1171 |
-
self._set_fetcher_options(os.path.dirname(setup_script))
|
1172 |
-
args.append(dist_dir)
|
1173 |
-
|
1174 |
-
self.run_setup(setup_script, setup_base, args)
|
1175 |
-
all_eggs = Environment([dist_dir])
|
1176 |
-
eggs = []
|
1177 |
-
for key in all_eggs:
|
1178 |
-
for dist in all_eggs[key]:
|
1179 |
-
eggs.append(self.install_egg(dist.location, setup_base))
|
1180 |
-
if not eggs and not self.dry_run:
|
1181 |
-
log.warn("No eggs found in %s (setup script problem?)",
|
1182 |
-
dist_dir)
|
1183 |
-
return eggs
|
1184 |
-
finally:
|
1185 |
-
rmtree(dist_dir)
|
1186 |
-
log.set_verbosity(self.verbose) # restore our log verbosity
|
1187 |
-
|
1188 |
-
def _set_fetcher_options(self, base):
|
1189 |
-
"""
|
1190 |
-
When easy_install is about to run bdist_egg on a source dist, that
|
1191 |
-
source dist might have 'setup_requires' directives, requiring
|
1192 |
-
additional fetching. Ensure the fetcher options given to easy_install
|
1193 |
-
are available to that command as well.
|
1194 |
-
"""
|
1195 |
-
# find the fetch options from easy_install and write them out
|
1196 |
-
# to the setup.cfg file.
|
1197 |
-
ei_opts = self.distribution.get_option_dict('easy_install').copy()
|
1198 |
-
fetch_directives = (
|
1199 |
-
'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
|
1200 |
-
)
|
1201 |
-
fetch_options = {}
|
1202 |
-
for key, val in ei_opts.items():
|
1203 |
-
if key not in fetch_directives:
|
1204 |
-
continue
|
1205 |
-
fetch_options[key] = val[1]
|
1206 |
-
# create a settings dictionary suitable for `edit_config`
|
1207 |
-
settings = dict(easy_install=fetch_options)
|
1208 |
-
cfg_filename = os.path.join(base, 'setup.cfg')
|
1209 |
-
setopt.edit_config(cfg_filename, settings)
|
1210 |
-
|
1211 |
-
def update_pth(self, dist): # noqa: C901 # is too complex (11) # FIXME
|
1212 |
-
if self.pth_file is None:
|
1213 |
-
return
|
1214 |
-
|
1215 |
-
for d in self.pth_file[dist.key]: # drop old entries
|
1216 |
-
if not self.multi_version and d.location == dist.location:
|
1217 |
-
continue
|
1218 |
-
|
1219 |
-
log.info("Removing %s from easy-install.pth file", d)
|
1220 |
-
self.pth_file.remove(d)
|
1221 |
-
if d.location in self.shadow_path:
|
1222 |
-
self.shadow_path.remove(d.location)
|
1223 |
-
|
1224 |
-
if not self.multi_version:
|
1225 |
-
if dist.location in self.pth_file.paths:
|
1226 |
-
log.info(
|
1227 |
-
"%s is already the active version in easy-install.pth",
|
1228 |
-
dist,
|
1229 |
-
)
|
1230 |
-
else:
|
1231 |
-
log.info("Adding %s to easy-install.pth file", dist)
|
1232 |
-
self.pth_file.add(dist) # add new entry
|
1233 |
-
if dist.location not in self.shadow_path:
|
1234 |
-
self.shadow_path.append(dist.location)
|
1235 |
-
|
1236 |
-
if self.dry_run:
|
1237 |
-
return
|
1238 |
-
|
1239 |
-
self.pth_file.save()
|
1240 |
-
|
1241 |
-
if dist.key != 'setuptools':
|
1242 |
-
return
|
1243 |
-
|
1244 |
-
# Ensure that setuptools itself never becomes unavailable!
|
1245 |
-
# XXX should this check for latest version?
|
1246 |
-
filename = os.path.join(self.install_dir, 'setuptools.pth')
|
1247 |
-
if os.path.islink(filename):
|
1248 |
-
os.unlink(filename)
|
1249 |
-
with open(filename, 'wt') as f:
|
1250 |
-
f.write(self.pth_file.make_relative(dist.location) + '\n')
|
1251 |
-
|
1252 |
-
def unpack_progress(self, src, dst):
|
1253 |
-
# Progress filter for unpacking
|
1254 |
-
log.debug("Unpacking %s to %s", src, dst)
|
1255 |
-
return dst # only unpack-and-compile skips files for dry run
|
1256 |
-
|
1257 |
-
def unpack_and_compile(self, egg_path, destination):
|
1258 |
-
to_compile = []
|
1259 |
-
to_chmod = []
|
1260 |
-
|
1261 |
-
def pf(src, dst):
|
1262 |
-
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
|
1263 |
-
to_compile.append(dst)
|
1264 |
-
elif dst.endswith('.dll') or dst.endswith('.so'):
|
1265 |
-
to_chmod.append(dst)
|
1266 |
-
self.unpack_progress(src, dst)
|
1267 |
-
return not self.dry_run and dst or None
|
1268 |
-
|
1269 |
-
unpack_archive(egg_path, destination, pf)
|
1270 |
-
self.byte_compile(to_compile)
|
1271 |
-
if not self.dry_run:
|
1272 |
-
for f in to_chmod:
|
1273 |
-
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
|
1274 |
-
chmod(f, mode)
|
1275 |
-
|
1276 |
-
def byte_compile(self, to_compile):
|
1277 |
-
if sys.dont_write_bytecode:
|
1278 |
-
return
|
1279 |
-
|
1280 |
-
from distutils.util import byte_compile
|
1281 |
-
|
1282 |
-
try:
|
1283 |
-
# try to make the byte compile messages quieter
|
1284 |
-
log.set_verbosity(self.verbose - 1)
|
1285 |
-
|
1286 |
-
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
|
1287 |
-
if self.optimize:
|
1288 |
-
byte_compile(
|
1289 |
-
to_compile, optimize=self.optimize, force=1,
|
1290 |
-
dry_run=self.dry_run,
|
1291 |
-
)
|
1292 |
-
finally:
|
1293 |
-
log.set_verbosity(self.verbose) # restore original verbosity
|
1294 |
-
|
1295 |
-
__no_default_msg = textwrap.dedent("""
|
1296 |
-
bad install directory or PYTHONPATH
|
1297 |
-
|
1298 |
-
You are attempting to install a package to a directory that is not
|
1299 |
-
on PYTHONPATH and which Python does not read ".pth" files from. The
|
1300 |
-
installation directory you specified (via --install-dir, --prefix, or
|
1301 |
-
the distutils default setting) was:
|
1302 |
-
|
1303 |
-
%s
|
1304 |
-
|
1305 |
-
and your PYTHONPATH environment variable currently contains:
|
1306 |
-
|
1307 |
-
%r
|
1308 |
-
|
1309 |
-
Here are some of your options for correcting the problem:
|
1310 |
-
|
1311 |
-
* You can choose a different installation directory, i.e., one that is
|
1312 |
-
on PYTHONPATH or supports .pth files
|
1313 |
-
|
1314 |
-
* You can add the installation directory to the PYTHONPATH environment
|
1315 |
-
variable. (It must then also be on PYTHONPATH whenever you run
|
1316 |
-
Python and want to use the package(s) you are installing.)
|
1317 |
-
|
1318 |
-
* You can set up the installation directory to support ".pth" files by
|
1319 |
-
using one of the approaches described here:
|
1320 |
-
|
1321 |
-
https://setuptools.pypa.io/en/latest/deprecated/easy_install.html#custom-installation-locations
|
1322 |
-
|
1323 |
-
|
1324 |
-
Please make the appropriate changes for your system and try again.
|
1325 |
-
""").strip()
|
1326 |
-
|
1327 |
-
def create_home_path(self):
|
1328 |
-
"""Create directories under ~."""
|
1329 |
-
if not self.user:
|
1330 |
-
return
|
1331 |
-
home = convert_path(os.path.expanduser("~"))
|
1332 |
-
for path in only_strs(self.config_vars.values()):
|
1333 |
-
if path.startswith(home) and not os.path.isdir(path):
|
1334 |
-
self.debug_print("os.makedirs('%s', 0o700)" % path)
|
1335 |
-
os.makedirs(path, 0o700)
|
1336 |
-
|
1337 |
-
INSTALL_SCHEMES = dict(
|
1338 |
-
posix=dict(
|
1339 |
-
install_dir='$base/lib/python$py_version_short/site-packages',
|
1340 |
-
script_dir='$base/bin',
|
1341 |
-
),
|
1342 |
-
)
|
1343 |
-
|
1344 |
-
DEFAULT_SCHEME = dict(
|
1345 |
-
install_dir='$base/Lib/site-packages',
|
1346 |
-
script_dir='$base/Scripts',
|
1347 |
-
)
|
1348 |
-
|
1349 |
-
def _expand(self, *attrs):
|
1350 |
-
config_vars = self.get_finalized_command('install').config_vars
|
1351 |
-
|
1352 |
-
if self.prefix:
|
1353 |
-
# Set default install_dir/scripts from --prefix
|
1354 |
-
config_vars = dict(config_vars)
|
1355 |
-
config_vars['base'] = self.prefix
|
1356 |
-
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
|
1357 |
-
for attr, val in scheme.items():
|
1358 |
-
if getattr(self, attr, None) is None:
|
1359 |
-
setattr(self, attr, val)
|
1360 |
-
|
1361 |
-
from distutils.util import subst_vars
|
1362 |
-
|
1363 |
-
for attr in attrs:
|
1364 |
-
val = getattr(self, attr)
|
1365 |
-
if val is not None:
|
1366 |
-
val = subst_vars(val, config_vars)
|
1367 |
-
if os.name == 'posix':
|
1368 |
-
val = os.path.expanduser(val)
|
1369 |
-
setattr(self, attr, val)
|
1370 |
-
|
1371 |
-
|
1372 |
-
def _pythonpath():
|
1373 |
-
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
|
1374 |
-
return filter(None, items)
|
1375 |
-
|
1376 |
-
|
1377 |
-
def get_site_dirs():
|
1378 |
-
"""
|
1379 |
-
Return a list of 'site' dirs
|
1380 |
-
"""
|
1381 |
-
|
1382 |
-
sitedirs = []
|
1383 |
-
|
1384 |
-
# start with PYTHONPATH
|
1385 |
-
sitedirs.extend(_pythonpath())
|
1386 |
-
|
1387 |
-
prefixes = [sys.prefix]
|
1388 |
-
if sys.exec_prefix != sys.prefix:
|
1389 |
-
prefixes.append(sys.exec_prefix)
|
1390 |
-
for prefix in prefixes:
|
1391 |
-
if not prefix:
|
1392 |
-
continue
|
1393 |
-
|
1394 |
-
if sys.platform in ('os2emx', 'riscos'):
|
1395 |
-
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
|
1396 |
-
elif os.sep == '/':
|
1397 |
-
sitedirs.extend([
|
1398 |
-
os.path.join(
|
1399 |
-
prefix,
|
1400 |
-
"lib",
|
1401 |
-
"python{}.{}".format(*sys.version_info),
|
1402 |
-
"site-packages",
|
1403 |
-
),
|
1404 |
-
os.path.join(prefix, "lib", "site-python"),
|
1405 |
-
])
|
1406 |
-
else:
|
1407 |
-
sitedirs.extend([
|
1408 |
-
prefix,
|
1409 |
-
os.path.join(prefix, "lib", "site-packages"),
|
1410 |
-
])
|
1411 |
-
if sys.platform != 'darwin':
|
1412 |
-
continue
|
1413 |
-
|
1414 |
-
# for framework builds *only* we add the standard Apple
|
1415 |
-
# locations. Currently only per-user, but /Library and
|
1416 |
-
# /Network/Library could be added too
|
1417 |
-
if 'Python.framework' not in prefix:
|
1418 |
-
continue
|
1419 |
-
|
1420 |
-
home = os.environ.get('HOME')
|
1421 |
-
if not home:
|
1422 |
-
continue
|
1423 |
-
|
1424 |
-
home_sp = os.path.join(
|
1425 |
-
home,
|
1426 |
-
'Library',
|
1427 |
-
'Python',
|
1428 |
-
'{}.{}'.format(*sys.version_info),
|
1429 |
-
'site-packages',
|
1430 |
-
)
|
1431 |
-
sitedirs.append(home_sp)
|
1432 |
-
lib_paths = get_path('purelib'), get_path('platlib')
|
1433 |
-
|
1434 |
-
sitedirs.extend(s for s in lib_paths if s not in sitedirs)
|
1435 |
-
|
1436 |
-
if site.ENABLE_USER_SITE:
|
1437 |
-
sitedirs.append(site.USER_SITE)
|
1438 |
-
|
1439 |
-
with contextlib.suppress(AttributeError):
|
1440 |
-
sitedirs.extend(site.getsitepackages())
|
1441 |
-
|
1442 |
-
sitedirs = list(map(normalize_path, sitedirs))
|
1443 |
-
|
1444 |
-
return sitedirs
|
1445 |
-
|
1446 |
-
|
1447 |
-
def expand_paths(inputs): # noqa: C901 # is too complex (11) # FIXME
|
1448 |
-
"""Yield sys.path directories that might contain "old-style" packages"""
|
1449 |
-
|
1450 |
-
seen = {}
|
1451 |
-
|
1452 |
-
for dirname in inputs:
|
1453 |
-
dirname = normalize_path(dirname)
|
1454 |
-
if dirname in seen:
|
1455 |
-
continue
|
1456 |
-
|
1457 |
-
seen[dirname] = 1
|
1458 |
-
if not os.path.isdir(dirname):
|
1459 |
-
continue
|
1460 |
-
|
1461 |
-
files = os.listdir(dirname)
|
1462 |
-
yield dirname, files
|
1463 |
-
|
1464 |
-
for name in files:
|
1465 |
-
if not name.endswith('.pth'):
|
1466 |
-
# We only care about the .pth files
|
1467 |
-
continue
|
1468 |
-
if name in ('easy-install.pth', 'setuptools.pth'):
|
1469 |
-
# Ignore .pth files that we control
|
1470 |
-
continue
|
1471 |
-
|
1472 |
-
# Read the .pth file
|
1473 |
-
f = open(os.path.join(dirname, name))
|
1474 |
-
lines = list(yield_lines(f))
|
1475 |
-
f.close()
|
1476 |
-
|
1477 |
-
# Yield existing non-dupe, non-import directory lines from it
|
1478 |
-
for line in lines:
|
1479 |
-
if line.startswith("import"):
|
1480 |
-
continue
|
1481 |
-
|
1482 |
-
line = normalize_path(line.rstrip())
|
1483 |
-
if line in seen:
|
1484 |
-
continue
|
1485 |
-
|
1486 |
-
seen[line] = 1
|
1487 |
-
if not os.path.isdir(line):
|
1488 |
-
continue
|
1489 |
-
|
1490 |
-
yield line, os.listdir(line)
|
1491 |
-
|
1492 |
-
|
1493 |
-
def extract_wininst_cfg(dist_filename):
|
1494 |
-
"""Extract configuration data from a bdist_wininst .exe
|
1495 |
-
|
1496 |
-
Returns a configparser.RawConfigParser, or None
|
1497 |
-
"""
|
1498 |
-
f = open(dist_filename, 'rb')
|
1499 |
-
try:
|
1500 |
-
endrec = zipfile._EndRecData(f)
|
1501 |
-
if endrec is None:
|
1502 |
-
return None
|
1503 |
-
|
1504 |
-
prepended = (endrec[9] - endrec[5]) - endrec[6]
|
1505 |
-
if prepended < 12: # no wininst data here
|
1506 |
-
return None
|
1507 |
-
f.seek(prepended - 12)
|
1508 |
-
|
1509 |
-
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
|
1510 |
-
if tag not in (0x1234567A, 0x1234567B):
|
1511 |
-
return None # not a valid tag
|
1512 |
-
|
1513 |
-
f.seek(prepended - (12 + cfglen))
|
1514 |
-
init = {'version': '', 'target_version': ''}
|
1515 |
-
cfg = configparser.RawConfigParser(init)
|
1516 |
-
try:
|
1517 |
-
part = f.read(cfglen)
|
1518 |
-
# Read up to the first null byte.
|
1519 |
-
config = part.split(b'\0', 1)[0]
|
1520 |
-
# Now the config is in bytes, but for RawConfigParser, it should
|
1521 |
-
# be text, so decode it.
|
1522 |
-
config = config.decode(sys.getfilesystemencoding())
|
1523 |
-
cfg.read_file(io.StringIO(config))
|
1524 |
-
except configparser.Error:
|
1525 |
-
return None
|
1526 |
-
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
|
1527 |
-
return None
|
1528 |
-
return cfg
|
1529 |
-
|
1530 |
-
finally:
|
1531 |
-
f.close()
|
1532 |
-
|
1533 |
-
|
1534 |
-
def get_exe_prefixes(exe_filename):
|
1535 |
-
"""Get exe->egg path translations for a given .exe file"""
|
1536 |
-
|
1537 |
-
prefixes = [
|
1538 |
-
('PURELIB/', ''),
|
1539 |
-
('PLATLIB/pywin32_system32', ''),
|
1540 |
-
('PLATLIB/', ''),
|
1541 |
-
('SCRIPTS/', 'EGG-INFO/scripts/'),
|
1542 |
-
('DATA/lib/site-packages', ''),
|
1543 |
-
]
|
1544 |
-
z = zipfile.ZipFile(exe_filename)
|
1545 |
-
try:
|
1546 |
-
for info in z.infolist():
|
1547 |
-
name = info.filename
|
1548 |
-
parts = name.split('/')
|
1549 |
-
if len(parts) == 3 and parts[2] == 'PKG-INFO':
|
1550 |
-
if parts[1].endswith('.egg-info'):
|
1551 |
-
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
|
1552 |
-
break
|
1553 |
-
if len(parts) != 2 or not name.endswith('.pth'):
|
1554 |
-
continue
|
1555 |
-
if name.endswith('-nspkg.pth'):
|
1556 |
-
continue
|
1557 |
-
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
|
1558 |
-
contents = z.read(name).decode()
|
1559 |
-
for pth in yield_lines(contents):
|
1560 |
-
pth = pth.strip().replace('\\', '/')
|
1561 |
-
if not pth.startswith('import'):
|
1562 |
-
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
|
1563 |
-
finally:
|
1564 |
-
z.close()
|
1565 |
-
prefixes = [(x.lower(), y) for x, y in prefixes]
|
1566 |
-
prefixes.sort()
|
1567 |
-
prefixes.reverse()
|
1568 |
-
return prefixes
|
1569 |
-
|
1570 |
-
|
1571 |
-
class PthDistributions(Environment):
|
1572 |
-
"""A .pth file with Distribution paths in it"""
|
1573 |
-
|
1574 |
-
dirty = False
|
1575 |
-
|
1576 |
-
def __init__(self, filename, sitedirs=()):
|
1577 |
-
self.filename = filename
|
1578 |
-
self.sitedirs = list(map(normalize_path, sitedirs))
|
1579 |
-
self.basedir = normalize_path(os.path.dirname(self.filename))
|
1580 |
-
self._load()
|
1581 |
-
super().__init__([], None, None)
|
1582 |
-
for path in yield_lines(self.paths):
|
1583 |
-
list(map(self.add, find_distributions(path, True)))
|
1584 |
-
|
1585 |
-
def _load(self):
|
1586 |
-
self.paths = []
|
1587 |
-
saw_import = False
|
1588 |
-
seen = dict.fromkeys(self.sitedirs)
|
1589 |
-
if os.path.isfile(self.filename):
|
1590 |
-
f = open(self.filename, 'rt')
|
1591 |
-
for line in f:
|
1592 |
-
if line.startswith('import'):
|
1593 |
-
saw_import = True
|
1594 |
-
continue
|
1595 |
-
path = line.rstrip()
|
1596 |
-
self.paths.append(path)
|
1597 |
-
if not path.strip() or path.strip().startswith('#'):
|
1598 |
-
continue
|
1599 |
-
# skip non-existent paths, in case somebody deleted a package
|
1600 |
-
# manually, and duplicate paths as well
|
1601 |
-
path = self.paths[-1] = normalize_path(
|
1602 |
-
os.path.join(self.basedir, path)
|
1603 |
-
)
|
1604 |
-
if not os.path.exists(path) or path in seen:
|
1605 |
-
self.paths.pop() # skip it
|
1606 |
-
self.dirty = True # we cleaned up, so we're dirty now :)
|
1607 |
-
continue
|
1608 |
-
seen[path] = 1
|
1609 |
-
f.close()
|
1610 |
-
|
1611 |
-
if self.paths and not saw_import:
|
1612 |
-
self.dirty = True # ensure anything we touch has import wrappers
|
1613 |
-
while self.paths and not self.paths[-1].strip():
|
1614 |
-
self.paths.pop()
|
1615 |
-
|
1616 |
-
def save(self):
|
1617 |
-
"""Write changed .pth file back to disk"""
|
1618 |
-
if not self.dirty:
|
1619 |
-
return
|
1620 |
-
|
1621 |
-
rel_paths = list(map(self.make_relative, self.paths))
|
1622 |
-
if rel_paths:
|
1623 |
-
log.debug("Saving %s", self.filename)
|
1624 |
-
lines = self._wrap_lines(rel_paths)
|
1625 |
-
data = '\n'.join(lines) + '\n'
|
1626 |
-
|
1627 |
-
if os.path.islink(self.filename):
|
1628 |
-
os.unlink(self.filename)
|
1629 |
-
with open(self.filename, 'wt') as f:
|
1630 |
-
f.write(data)
|
1631 |
-
|
1632 |
-
elif os.path.exists(self.filename):
|
1633 |
-
log.debug("Deleting empty %s", self.filename)
|
1634 |
-
os.unlink(self.filename)
|
1635 |
-
|
1636 |
-
self.dirty = False
|
1637 |
-
|
1638 |
-
@staticmethod
|
1639 |
-
def _wrap_lines(lines):
|
1640 |
-
return lines
|
1641 |
-
|
1642 |
-
def add(self, dist):
|
1643 |
-
"""Add `dist` to the distribution map"""
|
1644 |
-
new_path = (
|
1645 |
-
dist.location not in self.paths and (
|
1646 |
-
dist.location not in self.sitedirs or
|
1647 |
-
# account for '.' being in PYTHONPATH
|
1648 |
-
dist.location == os.getcwd()
|
1649 |
-
)
|
1650 |
-
)
|
1651 |
-
if new_path:
|
1652 |
-
self.paths.append(dist.location)
|
1653 |
-
self.dirty = True
|
1654 |
-
super().add(dist)
|
1655 |
-
|
1656 |
-
def remove(self, dist):
|
1657 |
-
"""Remove `dist` from the distribution map"""
|
1658 |
-
while dist.location in self.paths:
|
1659 |
-
self.paths.remove(dist.location)
|
1660 |
-
self.dirty = True
|
1661 |
-
super().remove(dist)
|
1662 |
-
|
1663 |
-
def make_relative(self, path):
|
1664 |
-
npath, last = os.path.split(normalize_path(path))
|
1665 |
-
baselen = len(self.basedir)
|
1666 |
-
parts = [last]
|
1667 |
-
sep = os.altsep == '/' and '/' or os.sep
|
1668 |
-
while len(npath) >= baselen:
|
1669 |
-
if npath == self.basedir:
|
1670 |
-
parts.append(os.curdir)
|
1671 |
-
parts.reverse()
|
1672 |
-
return sep.join(parts)
|
1673 |
-
npath, last = os.path.split(npath)
|
1674 |
-
parts.append(last)
|
1675 |
-
else:
|
1676 |
-
return path
|
1677 |
-
|
1678 |
-
|
1679 |
-
class RewritePthDistributions(PthDistributions):
|
1680 |
-
@classmethod
|
1681 |
-
def _wrap_lines(cls, lines):
|
1682 |
-
yield cls.prelude
|
1683 |
-
for line in lines:
|
1684 |
-
yield line
|
1685 |
-
yield cls.postlude
|
1686 |
-
|
1687 |
-
prelude = _one_liner("""
|
1688 |
-
import sys
|
1689 |
-
sys.__plen = len(sys.path)
|
1690 |
-
""")
|
1691 |
-
postlude = _one_liner("""
|
1692 |
-
import sys
|
1693 |
-
new = sys.path[sys.__plen:]
|
1694 |
-
del sys.path[sys.__plen:]
|
1695 |
-
p = getattr(sys, '__egginsert', 0)
|
1696 |
-
sys.path[p:p] = new
|
1697 |
-
sys.__egginsert = p + len(new)
|
1698 |
-
""")
|
1699 |
-
|
1700 |
-
|
1701 |
-
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
|
1702 |
-
PthDistributions = RewritePthDistributions
|
1703 |
-
|
1704 |
-
|
1705 |
-
def _first_line_re():
|
1706 |
-
"""
|
1707 |
-
Return a regular expression based on first_line_re suitable for matching
|
1708 |
-
strings.
|
1709 |
-
"""
|
1710 |
-
if isinstance(first_line_re.pattern, str):
|
1711 |
-
return first_line_re
|
1712 |
-
|
1713 |
-
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
|
1714 |
-
return re.compile(first_line_re.pattern.decode())
|
1715 |
-
|
1716 |
-
|
1717 |
-
def auto_chmod(func, arg, exc):
|
1718 |
-
if func in [os.unlink, os.remove] and os.name == 'nt':
|
1719 |
-
chmod(arg, stat.S_IWRITE)
|
1720 |
-
return func(arg)
|
1721 |
-
et, ev, _ = sys.exc_info()
|
1722 |
-
# TODO: This code doesn't make sense. What is it trying to do?
|
1723 |
-
raise (ev[0], ev[1] + (" %s %s" % (func, arg)))
|
1724 |
-
|
1725 |
-
|
1726 |
-
def update_dist_caches(dist_path, fix_zipimporter_caches):
|
1727 |
-
"""
|
1728 |
-
Fix any globally cached `dist_path` related data
|
1729 |
-
|
1730 |
-
`dist_path` should be a path of a newly installed egg distribution (zipped
|
1731 |
-
or unzipped).
|
1732 |
-
|
1733 |
-
sys.path_importer_cache contains finder objects that have been cached when
|
1734 |
-
importing data from the original distribution. Any such finders need to be
|
1735 |
-
cleared since the replacement distribution might be packaged differently,
|
1736 |
-
e.g. a zipped egg distribution might get replaced with an unzipped egg
|
1737 |
-
folder or vice versa. Having the old finders cached may then cause Python
|
1738 |
-
to attempt loading modules from the replacement distribution using an
|
1739 |
-
incorrect loader.
|
1740 |
-
|
1741 |
-
zipimport.zipimporter objects are Python loaders charged with importing
|
1742 |
-
data packaged inside zip archives. If stale loaders referencing the
|
1743 |
-
original distribution, are left behind, they can fail to load modules from
|
1744 |
-
the replacement distribution. E.g. if an old zipimport.zipimporter instance
|
1745 |
-
is used to load data from a new zipped egg archive, it may cause the
|
1746 |
-
operation to attempt to locate the requested data in the wrong location -
|
1747 |
-
one indicated by the original distribution's zip archive directory
|
1748 |
-
information. Such an operation may then fail outright, e.g. report having
|
1749 |
-
read a 'bad local file header', or even worse, it may fail silently &
|
1750 |
-
return invalid data.
|
1751 |
-
|
1752 |
-
zipimport._zip_directory_cache contains cached zip archive directory
|
1753 |
-
information for all existing zipimport.zipimporter instances and all such
|
1754 |
-
instances connected to the same archive share the same cached directory
|
1755 |
-
information.
|
1756 |
-
|
1757 |
-
If asked, and the underlying Python implementation allows it, we can fix
|
1758 |
-
all existing zipimport.zipimporter instances instead of having to track
|
1759 |
-
them down and remove them one by one, by updating their shared cached zip
|
1760 |
-
archive directory information. This, of course, assumes that the
|
1761 |
-
replacement distribution is packaged as a zipped egg.
|
1762 |
-
|
1763 |
-
If not asked to fix existing zipimport.zipimporter instances, we still do
|
1764 |
-
our best to clear any remaining zipimport.zipimporter related cached data
|
1765 |
-
that might somehow later get used when attempting to load data from the new
|
1766 |
-
distribution and thus cause such load operations to fail. Note that when
|
1767 |
-
tracking down such remaining stale data, we can not catch every conceivable
|
1768 |
-
usage from here, and we clear only those that we know of and have found to
|
1769 |
-
cause problems if left alive. Any remaining caches should be updated by
|
1770 |
-
whomever is in charge of maintaining them, i.e. they should be ready to
|
1771 |
-
handle us replacing their zip archives with new distributions at runtime.
|
1772 |
-
|
1773 |
-
"""
|
1774 |
-
# There are several other known sources of stale zipimport.zipimporter
|
1775 |
-
# instances that we do not clear here, but might if ever given a reason to
|
1776 |
-
# do so:
|
1777 |
-
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
|
1778 |
-
# set') may contain distributions which may in turn contain their
|
1779 |
-
# zipimport.zipimporter loaders.
|
1780 |
-
# * Several zipimport.zipimporter loaders held by local variables further
|
1781 |
-
# up the function call stack when running the setuptools installation.
|
1782 |
-
# * Already loaded modules may have their __loader__ attribute set to the
|
1783 |
-
# exact loader instance used when importing them. Python 3.4 docs state
|
1784 |
-
# that this information is intended mostly for introspection and so is
|
1785 |
-
# not expected to cause us problems.
|
1786 |
-
normalized_path = normalize_path(dist_path)
|
1787 |
-
_uncache(normalized_path, sys.path_importer_cache)
|
1788 |
-
if fix_zipimporter_caches:
|
1789 |
-
_replace_zip_directory_cache_data(normalized_path)
|
1790 |
-
else:
|
1791 |
-
# Here, even though we do not want to fix existing and now stale
|
1792 |
-
# zipimporter cache information, we still want to remove it. Related to
|
1793 |
-
# Python's zip archive directory information cache, we clear each of
|
1794 |
-
# its stale entries in two phases:
|
1795 |
-
# 1. Clear the entry so attempting to access zip archive information
|
1796 |
-
# via any existing stale zipimport.zipimporter instances fails.
|
1797 |
-
# 2. Remove the entry from the cache so any newly constructed
|
1798 |
-
# zipimport.zipimporter instances do not end up using old stale
|
1799 |
-
# zip archive directory information.
|
1800 |
-
# This whole stale data removal step does not seem strictly necessary,
|
1801 |
-
# but has been left in because it was done before we started replacing
|
1802 |
-
# the zip archive directory information cache content if possible, and
|
1803 |
-
# there are no relevant unit tests that we can depend on to tell us if
|
1804 |
-
# this is really needed.
|
1805 |
-
_remove_and_clear_zip_directory_cache_data(normalized_path)
|
1806 |
-
|
1807 |
-
|
1808 |
-
def _collect_zipimporter_cache_entries(normalized_path, cache):
|
1809 |
-
"""
|
1810 |
-
Return zipimporter cache entry keys related to a given normalized path.
|
1811 |
-
|
1812 |
-
Alternative path spellings (e.g. those using different character case or
|
1813 |
-
those using alternative path separators) related to the same path are
|
1814 |
-
included. Any sub-path entries are included as well, i.e. those
|
1815 |
-
corresponding to zip archives embedded in other zip archives.
|
1816 |
-
|
1817 |
-
"""
|
1818 |
-
result = []
|
1819 |
-
prefix_len = len(normalized_path)
|
1820 |
-
for p in cache:
|
1821 |
-
np = normalize_path(p)
|
1822 |
-
if (np.startswith(normalized_path) and
|
1823 |
-
np[prefix_len:prefix_len + 1] in (os.sep, '')):
|
1824 |
-
result.append(p)
|
1825 |
-
return result
|
1826 |
-
|
1827 |
-
|
1828 |
-
def _update_zipimporter_cache(normalized_path, cache, updater=None):
|
1829 |
-
"""
|
1830 |
-
Update zipimporter cache data for a given normalized path.
|
1831 |
-
|
1832 |
-
Any sub-path entries are processed as well, i.e. those corresponding to zip
|
1833 |
-
archives embedded in other zip archives.
|
1834 |
-
|
1835 |
-
Given updater is a callable taking a cache entry key and the original entry
|
1836 |
-
(after already removing the entry from the cache), and expected to update
|
1837 |
-
the entry and possibly return a new one to be inserted in its place.
|
1838 |
-
Returning None indicates that the entry should not be replaced with a new
|
1839 |
-
one. If no updater is given, the cache entries are simply removed without
|
1840 |
-
any additional processing, the same as if the updater simply returned None.
|
1841 |
-
|
1842 |
-
"""
|
1843 |
-
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
|
1844 |
-
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
|
1845 |
-
# not support the complete dict interface:
|
1846 |
-
# * Does not support item assignment, thus not allowing this function
|
1847 |
-
# to be used only for removing existing cache entries.
|
1848 |
-
# * Does not support the dict.pop() method, forcing us to use the
|
1849 |
-
# get/del patterns instead. For more detailed information see the
|
1850 |
-
# following links:
|
1851 |
-
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
|
1852 |
-
# http://bit.ly/2h9itJX
|
1853 |
-
old_entry = cache[p]
|
1854 |
-
del cache[p]
|
1855 |
-
new_entry = updater and updater(p, old_entry)
|
1856 |
-
if new_entry is not None:
|
1857 |
-
cache[p] = new_entry
|
1858 |
-
|
1859 |
-
|
1860 |
-
def _uncache(normalized_path, cache):
|
1861 |
-
_update_zipimporter_cache(normalized_path, cache)
|
1862 |
-
|
1863 |
-
|
1864 |
-
def _remove_and_clear_zip_directory_cache_data(normalized_path):
|
1865 |
-
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
|
1866 |
-
old_entry.clear()
|
1867 |
-
|
1868 |
-
_update_zipimporter_cache(
|
1869 |
-
normalized_path, zipimport._zip_directory_cache,
|
1870 |
-
updater=clear_and_remove_cached_zip_archive_directory_data)
|
1871 |
-
|
1872 |
-
|
1873 |
-
# PyPy Python implementation does not allow directly writing to the
|
1874 |
-
# zipimport._zip_directory_cache and so prevents us from attempting to correct
|
1875 |
-
# its content. The best we can do there is clear the problematic cache content
|
1876 |
-
# and have PyPy repopulate it as needed. The downside is that if there are any
|
1877 |
-
# stale zipimport.zipimporter instances laying around, attempting to use them
|
1878 |
-
# will fail due to not having its zip archive directory information available
|
1879 |
-
# instead of being automatically corrected to use the new correct zip archive
|
1880 |
-
# directory information.
|
1881 |
-
if '__pypy__' in sys.builtin_module_names:
|
1882 |
-
_replace_zip_directory_cache_data = \
|
1883 |
-
_remove_and_clear_zip_directory_cache_data
|
1884 |
-
else:
|
1885 |
-
|
1886 |
-
def _replace_zip_directory_cache_data(normalized_path):
|
1887 |
-
def replace_cached_zip_archive_directory_data(path, old_entry):
|
1888 |
-
# N.B. In theory, we could load the zip directory information just
|
1889 |
-
# once for all updated path spellings, and then copy it locally and
|
1890 |
-
# update its contained path strings to contain the correct
|
1891 |
-
# spelling, but that seems like a way too invasive move (this cache
|
1892 |
-
# structure is not officially documented anywhere and could in
|
1893 |
-
# theory change with new Python releases) for no significant
|
1894 |
-
# benefit.
|
1895 |
-
old_entry.clear()
|
1896 |
-
zipimport.zipimporter(path)
|
1897 |
-
old_entry.update(zipimport._zip_directory_cache[path])
|
1898 |
-
return old_entry
|
1899 |
-
|
1900 |
-
_update_zipimporter_cache(
|
1901 |
-
normalized_path, zipimport._zip_directory_cache,
|
1902 |
-
updater=replace_cached_zip_archive_directory_data)
|
1903 |
-
|
1904 |
-
|
1905 |
-
def is_python(text, filename='<string>'):
|
1906 |
-
"Is this string a valid Python script?"
|
1907 |
-
try:
|
1908 |
-
compile(text, filename, 'exec')
|
1909 |
-
except (SyntaxError, TypeError):
|
1910 |
-
return False
|
1911 |
-
else:
|
1912 |
-
return True
|
1913 |
-
|
1914 |
-
|
1915 |
-
def is_sh(executable):
|
1916 |
-
"""Determine if the specified executable is a .sh (contains a #! line)"""
|
1917 |
-
try:
|
1918 |
-
with io.open(executable, encoding='latin-1') as fp:
|
1919 |
-
magic = fp.read(2)
|
1920 |
-
except (OSError, IOError):
|
1921 |
-
return executable
|
1922 |
-
return magic == '#!'
|
1923 |
-
|
1924 |
-
|
1925 |
-
def nt_quote_arg(arg):
|
1926 |
-
"""Quote a command line argument according to Windows parsing rules"""
|
1927 |
-
return subprocess.list2cmdline([arg])
|
1928 |
-
|
1929 |
-
|
1930 |
-
def is_python_script(script_text, filename):
|
1931 |
-
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
|
1932 |
-
"""
|
1933 |
-
if filename.endswith('.py') or filename.endswith('.pyw'):
|
1934 |
-
return True # extension says it's Python
|
1935 |
-
if is_python(script_text, filename):
|
1936 |
-
return True # it's syntactically valid Python
|
1937 |
-
if script_text.startswith('#!'):
|
1938 |
-
# It begins with a '#!' line, so check if 'python' is in it somewhere
|
1939 |
-
return 'python' in script_text.splitlines()[0].lower()
|
1940 |
-
|
1941 |
-
return False # Not any Python I can recognize
|
1942 |
-
|
1943 |
-
|
1944 |
-
try:
|
1945 |
-
from os import chmod as _chmod
|
1946 |
-
except ImportError:
|
1947 |
-
# Jython compatibility
|
1948 |
-
def _chmod(*args):
|
1949 |
-
pass
|
1950 |
-
|
1951 |
-
|
1952 |
-
def chmod(path, mode):
|
1953 |
-
log.debug("changing mode of %s to %o", path, mode)
|
1954 |
-
try:
|
1955 |
-
_chmod(path, mode)
|
1956 |
-
except os.error as e:
|
1957 |
-
log.debug("chmod failed: %s", e)
|
1958 |
-
|
1959 |
-
|
1960 |
-
class CommandSpec(list):
|
1961 |
-
"""
|
1962 |
-
A command spec for a #! header, specified as a list of arguments akin to
|
1963 |
-
those passed to Popen.
|
1964 |
-
"""
|
1965 |
-
|
1966 |
-
options = []
|
1967 |
-
split_args = dict()
|
1968 |
-
|
1969 |
-
@classmethod
|
1970 |
-
def best(cls):
|
1971 |
-
"""
|
1972 |
-
Choose the best CommandSpec class based on environmental conditions.
|
1973 |
-
"""
|
1974 |
-
return cls
|
1975 |
-
|
1976 |
-
@classmethod
|
1977 |
-
def _sys_executable(cls):
|
1978 |
-
_default = os.path.normpath(sys.executable)
|
1979 |
-
return os.environ.get('__PYVENV_LAUNCHER__', _default)
|
1980 |
-
|
1981 |
-
@classmethod
|
1982 |
-
def from_param(cls, param):
|
1983 |
-
"""
|
1984 |
-
Construct a CommandSpec from a parameter to build_scripts, which may
|
1985 |
-
be None.
|
1986 |
-
"""
|
1987 |
-
if isinstance(param, cls):
|
1988 |
-
return param
|
1989 |
-
if isinstance(param, list):
|
1990 |
-
return cls(param)
|
1991 |
-
if param is None:
|
1992 |
-
return cls.from_environment()
|
1993 |
-
# otherwise, assume it's a string.
|
1994 |
-
return cls.from_string(param)
|
1995 |
-
|
1996 |
-
@classmethod
|
1997 |
-
def from_environment(cls):
|
1998 |
-
return cls([cls._sys_executable()])
|
1999 |
-
|
2000 |
-
@classmethod
|
2001 |
-
def from_string(cls, string):
|
2002 |
-
"""
|
2003 |
-
Construct a command spec from a simple string representing a command
|
2004 |
-
line parseable by shlex.split.
|
2005 |
-
"""
|
2006 |
-
items = shlex.split(string, **cls.split_args)
|
2007 |
-
return cls(items)
|
2008 |
-
|
2009 |
-
def install_options(self, script_text):
|
2010 |
-
self.options = shlex.split(self._extract_options(script_text))
|
2011 |
-
cmdline = subprocess.list2cmdline(self)
|
2012 |
-
if not isascii(cmdline):
|
2013 |
-
self.options[:0] = ['-x']
|
2014 |
-
|
2015 |
-
@staticmethod
|
2016 |
-
def _extract_options(orig_script):
|
2017 |
-
"""
|
2018 |
-
Extract any options from the first line of the script.
|
2019 |
-
"""
|
2020 |
-
first = (orig_script + '\n').splitlines()[0]
|
2021 |
-
match = _first_line_re().match(first)
|
2022 |
-
options = match.group(1) or '' if match else ''
|
2023 |
-
return options.strip()
|
2024 |
-
|
2025 |
-
def as_header(self):
|
2026 |
-
return self._render(self + list(self.options))
|
2027 |
-
|
2028 |
-
@staticmethod
|
2029 |
-
def _strip_quotes(item):
|
2030 |
-
_QUOTES = '"\''
|
2031 |
-
for q in _QUOTES:
|
2032 |
-
if item.startswith(q) and item.endswith(q):
|
2033 |
-
return item[1:-1]
|
2034 |
-
return item
|
2035 |
-
|
2036 |
-
@staticmethod
|
2037 |
-
def _render(items):
|
2038 |
-
cmdline = subprocess.list2cmdline(
|
2039 |
-
CommandSpec._strip_quotes(item.strip()) for item in items)
|
2040 |
-
return '#!' + cmdline + '\n'
|
2041 |
-
|
2042 |
-
|
2043 |
-
# For pbr compat; will be removed in a future version.
|
2044 |
-
sys_executable = CommandSpec._sys_executable()
|
2045 |
-
|
2046 |
-
|
2047 |
-
class WindowsCommandSpec(CommandSpec):
|
2048 |
-
split_args = dict(posix=False)
|
2049 |
-
|
2050 |
-
|
2051 |
-
class ScriptWriter:
|
2052 |
-
"""
|
2053 |
-
Encapsulates behavior around writing entry point scripts for console and
|
2054 |
-
gui apps.
|
2055 |
-
"""
|
2056 |
-
|
2057 |
-
template = textwrap.dedent(r"""
|
2058 |
-
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
|
2059 |
-
import re
|
2060 |
-
import sys
|
2061 |
-
|
2062 |
-
# for compatibility with easy_install; see #2198
|
2063 |
-
__requires__ = %(spec)r
|
2064 |
-
|
2065 |
-
try:
|
2066 |
-
from importlib.metadata import distribution
|
2067 |
-
except ImportError:
|
2068 |
-
try:
|
2069 |
-
from importlib_metadata import distribution
|
2070 |
-
except ImportError:
|
2071 |
-
from pkg_resources import load_entry_point
|
2072 |
-
|
2073 |
-
|
2074 |
-
def importlib_load_entry_point(spec, group, name):
|
2075 |
-
dist_name, _, _ = spec.partition('==')
|
2076 |
-
matches = (
|
2077 |
-
entry_point
|
2078 |
-
for entry_point in distribution(dist_name).entry_points
|
2079 |
-
if entry_point.group == group and entry_point.name == name
|
2080 |
-
)
|
2081 |
-
return next(matches).load()
|
2082 |
-
|
2083 |
-
|
2084 |
-
globals().setdefault('load_entry_point', importlib_load_entry_point)
|
2085 |
-
|
2086 |
-
|
2087 |
-
if __name__ == '__main__':
|
2088 |
-
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
|
2089 |
-
sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
|
2090 |
-
""").lstrip()
|
2091 |
-
|
2092 |
-
command_spec_class = CommandSpec
|
2093 |
-
|
2094 |
-
@classmethod
|
2095 |
-
def get_script_args(cls, dist, executable=None, wininst=False):
|
2096 |
-
# for backward compatibility
|
2097 |
-
warnings.warn("Use get_args", EasyInstallDeprecationWarning)
|
2098 |
-
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
|
2099 |
-
header = cls.get_script_header("", executable, wininst)
|
2100 |
-
return writer.get_args(dist, header)
|
2101 |
-
|
2102 |
-
@classmethod
|
2103 |
-
def get_script_header(cls, script_text, executable=None, wininst=False):
|
2104 |
-
# for backward compatibility
|
2105 |
-
warnings.warn(
|
2106 |
-
"Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
|
2107 |
-
if wininst:
|
2108 |
-
executable = "python.exe"
|
2109 |
-
return cls.get_header(script_text, executable)
|
2110 |
-
|
2111 |
-
@classmethod
|
2112 |
-
def get_args(cls, dist, header=None):
|
2113 |
-
"""
|
2114 |
-
Yield write_script() argument tuples for a distribution's
|
2115 |
-
console_scripts and gui_scripts entry points.
|
2116 |
-
"""
|
2117 |
-
if header is None:
|
2118 |
-
header = cls.get_header()
|
2119 |
-
spec = str(dist.as_requirement())
|
2120 |
-
for type_ in 'console', 'gui':
|
2121 |
-
group = type_ + '_scripts'
|
2122 |
-
for name, ep in dist.get_entry_map(group).items():
|
2123 |
-
cls._ensure_safe_name(name)
|
2124 |
-
script_text = cls.template % locals()
|
2125 |
-
args = cls._get_script_args(type_, name, header, script_text)
|
2126 |
-
for res in args:
|
2127 |
-
yield res
|
2128 |
-
|
2129 |
-
@staticmethod
|
2130 |
-
def _ensure_safe_name(name):
|
2131 |
-
"""
|
2132 |
-
Prevent paths in *_scripts entry point names.
|
2133 |
-
"""
|
2134 |
-
has_path_sep = re.search(r'[\\/]', name)
|
2135 |
-
if has_path_sep:
|
2136 |
-
raise ValueError("Path separators not allowed in script names")
|
2137 |
-
|
2138 |
-
@classmethod
|
2139 |
-
def get_writer(cls, force_windows):
|
2140 |
-
# for backward compatibility
|
2141 |
-
warnings.warn("Use best", EasyInstallDeprecationWarning)
|
2142 |
-
return WindowsScriptWriter.best() if force_windows else cls.best()
|
2143 |
-
|
2144 |
-
@classmethod
|
2145 |
-
def best(cls):
|
2146 |
-
"""
|
2147 |
-
Select the best ScriptWriter for this environment.
|
2148 |
-
"""
|
2149 |
-
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
|
2150 |
-
return WindowsScriptWriter.best()
|
2151 |
-
else:
|
2152 |
-
return cls
|
2153 |
-
|
2154 |
-
@classmethod
|
2155 |
-
def _get_script_args(cls, type_, name, header, script_text):
|
2156 |
-
# Simply write the stub with no extension.
|
2157 |
-
yield (name, header + script_text)
|
2158 |
-
|
2159 |
-
@classmethod
|
2160 |
-
def get_header(cls, script_text="", executable=None):
|
2161 |
-
"""Create a #! line, getting options (if any) from script_text"""
|
2162 |
-
cmd = cls.command_spec_class.best().from_param(executable)
|
2163 |
-
cmd.install_options(script_text)
|
2164 |
-
return cmd.as_header()
|
2165 |
-
|
2166 |
-
|
2167 |
-
class WindowsScriptWriter(ScriptWriter):
|
2168 |
-
command_spec_class = WindowsCommandSpec
|
2169 |
-
|
2170 |
-
@classmethod
|
2171 |
-
def get_writer(cls):
|
2172 |
-
# for backward compatibility
|
2173 |
-
warnings.warn("Use best", EasyInstallDeprecationWarning)
|
2174 |
-
return cls.best()
|
2175 |
-
|
2176 |
-
@classmethod
|
2177 |
-
def best(cls):
|
2178 |
-
"""
|
2179 |
-
Select the best ScriptWriter suitable for Windows
|
2180 |
-
"""
|
2181 |
-
writer_lookup = dict(
|
2182 |
-
executable=WindowsExecutableLauncherWriter,
|
2183 |
-
natural=cls,
|
2184 |
-
)
|
2185 |
-
# for compatibility, use the executable launcher by default
|
2186 |
-
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
|
2187 |
-
return writer_lookup[launcher]
|
2188 |
-
|
2189 |
-
@classmethod
|
2190 |
-
def _get_script_args(cls, type_, name, header, script_text):
|
2191 |
-
"For Windows, add a .py extension"
|
2192 |
-
ext = dict(console='.pya', gui='.pyw')[type_]
|
2193 |
-
if ext not in os.environ['PATHEXT'].lower().split(';'):
|
2194 |
-
msg = (
|
2195 |
-
"{ext} not listed in PATHEXT; scripts will not be "
|
2196 |
-
"recognized as executables."
|
2197 |
-
).format(**locals())
|
2198 |
-
warnings.warn(msg, UserWarning)
|
2199 |
-
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
|
2200 |
-
old.remove(ext)
|
2201 |
-
header = cls._adjust_header(type_, header)
|
2202 |
-
blockers = [name + x for x in old]
|
2203 |
-
yield name + ext, header + script_text, 't', blockers
|
2204 |
-
|
2205 |
-
@classmethod
|
2206 |
-
def _adjust_header(cls, type_, orig_header):
|
2207 |
-
"""
|
2208 |
-
Make sure 'pythonw' is used for gui and 'python' is used for
|
2209 |
-
console (regardless of what sys.executable is).
|
2210 |
-
"""
|
2211 |
-
pattern = 'pythonw.exe'
|
2212 |
-
repl = 'python.exe'
|
2213 |
-
if type_ == 'gui':
|
2214 |
-
pattern, repl = repl, pattern
|
2215 |
-
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
|
2216 |
-
new_header = pattern_ob.sub(string=orig_header, repl=repl)
|
2217 |
-
return new_header if cls._use_header(new_header) else orig_header
|
2218 |
-
|
2219 |
-
@staticmethod
|
2220 |
-
def _use_header(new_header):
|
2221 |
-
"""
|
2222 |
-
Should _adjust_header use the replaced header?
|
2223 |
-
|
2224 |
-
On non-windows systems, always use. On
|
2225 |
-
Windows systems, only use the replaced header if it resolves
|
2226 |
-
to an executable on the system.
|
2227 |
-
"""
|
2228 |
-
clean_header = new_header[2:-1].strip('"')
|
2229 |
-
return sys.platform != 'win32' or find_executable(clean_header)
|
2230 |
-
|
2231 |
-
|
2232 |
-
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
|
2233 |
-
@classmethod
|
2234 |
-
def _get_script_args(cls, type_, name, header, script_text):
|
2235 |
-
"""
|
2236 |
-
For Windows, add a .py extension and an .exe launcher
|
2237 |
-
"""
|
2238 |
-
if type_ == 'gui':
|
2239 |
-
launcher_type = 'gui'
|
2240 |
-
ext = '-script.pyw'
|
2241 |
-
old = ['.pyw']
|
2242 |
-
else:
|
2243 |
-
launcher_type = 'cli'
|
2244 |
-
ext = '-script.py'
|
2245 |
-
old = ['.py', '.pyc', '.pyo']
|
2246 |
-
hdr = cls._adjust_header(type_, header)
|
2247 |
-
blockers = [name + x for x in old]
|
2248 |
-
yield (name + ext, hdr + script_text, 't', blockers)
|
2249 |
-
yield (
|
2250 |
-
name + '.exe', get_win_launcher(launcher_type),
|
2251 |
-
'b' # write in binary mode
|
2252 |
-
)
|
2253 |
-
if not is_64bit():
|
2254 |
-
# install a manifest for the launcher to prevent Windows
|
2255 |
-
# from detecting it as an installer (which it will for
|
2256 |
-
# launchers like easy_install.exe). Consider only
|
2257 |
-
# adding a manifest for launchers detected as installers.
|
2258 |
-
# See Distribute #143 for details.
|
2259 |
-
m_name = name + '.exe.manifest'
|
2260 |
-
yield (m_name, load_launcher_manifest(name), 't')
|
2261 |
-
|
2262 |
-
|
2263 |
-
# for backward-compatibility
|
2264 |
-
get_script_args = ScriptWriter.get_script_args
|
2265 |
-
get_script_header = ScriptWriter.get_script_header
|
2266 |
-
|
2267 |
-
|
2268 |
-
def get_win_launcher(type):
|
2269 |
-
"""
|
2270 |
-
Load the Windows launcher (executable) suitable for launching a script.
|
2271 |
-
|
2272 |
-
`type` should be either 'cli' or 'gui'
|
2273 |
-
|
2274 |
-
Returns the executable as a byte string.
|
2275 |
-
"""
|
2276 |
-
launcher_fn = '%s.exe' % type
|
2277 |
-
if is_64bit():
|
2278 |
-
if get_platform() == "win-arm64":
|
2279 |
-
launcher_fn = launcher_fn.replace(".", "-arm64.")
|
2280 |
-
else:
|
2281 |
-
launcher_fn = launcher_fn.replace(".", "-64.")
|
2282 |
-
else:
|
2283 |
-
launcher_fn = launcher_fn.replace(".", "-32.")
|
2284 |
-
return resource_string('setuptools', launcher_fn)
|
2285 |
-
|
2286 |
-
|
2287 |
-
def load_launcher_manifest(name):
|
2288 |
-
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
|
2289 |
-
return manifest.decode('utf-8') % vars()
|
2290 |
-
|
2291 |
-
|
2292 |
-
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
|
2293 |
-
return shutil.rmtree(path, ignore_errors, onerror)
|
2294 |
-
|
2295 |
-
|
2296 |
-
def current_umask():
|
2297 |
-
tmp = os.umask(0o022)
|
2298 |
-
os.umask(tmp)
|
2299 |
-
return tmp
|
2300 |
-
|
2301 |
-
|
2302 |
-
def only_strs(values):
|
2303 |
-
"""
|
2304 |
-
Exclude non-str values. Ref #3063.
|
2305 |
-
"""
|
2306 |
-
return filter(lambda val: isinstance(val, str), values)
|
2307 |
-
|
2308 |
-
|
2309 |
-
class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
|
2310 |
-
"""
|
2311 |
-
Warning for EasyInstall deprecations, bypassing suppression.
|
2312 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
|
2 |
-
dataloader,
|
3 |
-
lr_multiplier,
|
4 |
-
model,
|
5 |
-
optimizer,
|
6 |
-
train,
|
7 |
-
)
|
8 |
-
|
9 |
-
model.backbone.bottom_up.stages.depth = 101
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/dlafpn.py
DELETED
@@ -1,493 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
|
4 |
-
# this file is from https://github.com/ucbdrive/dla/blob/master/dla.py.
|
5 |
-
|
6 |
-
import math
|
7 |
-
from os.path import join
|
8 |
-
import numpy as np
|
9 |
-
|
10 |
-
import torch
|
11 |
-
from torch import nn
|
12 |
-
import torch.utils.model_zoo as model_zoo
|
13 |
-
import torch.nn.functional as F
|
14 |
-
import fvcore.nn.weight_init as weight_init
|
15 |
-
|
16 |
-
from detectron2.modeling.backbone import FPN
|
17 |
-
from detectron2.layers import ShapeSpec, ModulatedDeformConv, Conv2d
|
18 |
-
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
|
19 |
-
from detectron2.layers.batch_norm import get_norm
|
20 |
-
from detectron2.modeling.backbone import Backbone
|
21 |
-
|
22 |
-
WEB_ROOT = 'http://dl.yf.io/dla/models'
|
23 |
-
|
24 |
-
|
25 |
-
def get_model_url(data, name, hash):
|
26 |
-
return join(
|
27 |
-
'http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
|
28 |
-
|
29 |
-
|
30 |
-
def conv3x3(in_planes, out_planes, stride=1):
|
31 |
-
"3x3 convolution with padding"
|
32 |
-
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
33 |
-
padding=1, bias=False)
|
34 |
-
|
35 |
-
|
36 |
-
class BasicBlock(nn.Module):
|
37 |
-
def __init__(self, cfg, inplanes, planes, stride=1, dilation=1):
|
38 |
-
super(BasicBlock, self).__init__()
|
39 |
-
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
|
40 |
-
stride=stride, padding=dilation,
|
41 |
-
bias=False, dilation=dilation)
|
42 |
-
self.bn1 = get_norm(cfg.MODEL.DLA.NORM, planes)
|
43 |
-
self.relu = nn.ReLU(inplace=True)
|
44 |
-
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
|
45 |
-
stride=1, padding=dilation,
|
46 |
-
bias=False, dilation=dilation)
|
47 |
-
self.bn2 = get_norm(cfg.MODEL.DLA.NORM, planes)
|
48 |
-
self.stride = stride
|
49 |
-
|
50 |
-
def forward(self, x, residual=None):
|
51 |
-
if residual is None:
|
52 |
-
residual = x
|
53 |
-
|
54 |
-
out = self.conv1(x)
|
55 |
-
out = self.bn1(out)
|
56 |
-
out = self.relu(out)
|
57 |
-
|
58 |
-
out = self.conv2(out)
|
59 |
-
out = self.bn2(out)
|
60 |
-
|
61 |
-
out += residual
|
62 |
-
out = self.relu(out)
|
63 |
-
|
64 |
-
return out
|
65 |
-
|
66 |
-
|
67 |
-
class Bottleneck(nn.Module):
|
68 |
-
expansion = 2
|
69 |
-
|
70 |
-
def __init__(self, cfg, inplanes, planes, stride=1, dilation=1):
|
71 |
-
super(Bottleneck, self).__init__()
|
72 |
-
expansion = Bottleneck.expansion
|
73 |
-
bottle_planes = planes // expansion
|
74 |
-
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
|
75 |
-
kernel_size=1, bias=False)
|
76 |
-
self.bn1 = get_norm(cfg.MODEL.DLA.NORM, bottle_planes)
|
77 |
-
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
|
78 |
-
stride=stride, padding=dilation,
|
79 |
-
bias=False, dilation=dilation)
|
80 |
-
self.bn2 = get_norm(cfg.MODEL.DLA.NORM, bottle_planes)
|
81 |
-
self.conv3 = nn.Conv2d(bottle_planes, planes,
|
82 |
-
kernel_size=1, bias=False)
|
83 |
-
self.bn3 = get_norm(cfg.MODEL.DLA.NORM, planes)
|
84 |
-
self.relu = nn.ReLU(inplace=True)
|
85 |
-
self.stride = stride
|
86 |
-
|
87 |
-
def forward(self, x, residual=None):
|
88 |
-
if residual is None:
|
89 |
-
residual = x
|
90 |
-
|
91 |
-
out = self.conv1(x)
|
92 |
-
out = self.bn1(out)
|
93 |
-
out = self.relu(out)
|
94 |
-
|
95 |
-
out = self.conv2(out)
|
96 |
-
out = self.bn2(out)
|
97 |
-
out = self.relu(out)
|
98 |
-
|
99 |
-
out = self.conv3(out)
|
100 |
-
out = self.bn3(out)
|
101 |
-
|
102 |
-
out += residual
|
103 |
-
out = self.relu(out)
|
104 |
-
|
105 |
-
return out
|
106 |
-
|
107 |
-
|
108 |
-
class Root(nn.Module):
|
109 |
-
def __init__(self, cfg, in_channels, out_channels, kernel_size, residual):
|
110 |
-
super(Root, self).__init__()
|
111 |
-
self.conv = nn.Conv2d(
|
112 |
-
in_channels, out_channels, kernel_size,
|
113 |
-
stride=1, bias=False, padding=(kernel_size - 1) // 2)
|
114 |
-
self.bn = get_norm(cfg.MODEL.DLA.NORM, out_channels)
|
115 |
-
self.relu = nn.ReLU(inplace=True)
|
116 |
-
self.residual = residual
|
117 |
-
|
118 |
-
def forward(self, *x):
|
119 |
-
children = x
|
120 |
-
x = self.conv(torch.cat(x, 1))
|
121 |
-
x = self.bn(x)
|
122 |
-
if self.residual:
|
123 |
-
x += children[0]
|
124 |
-
x = self.relu(x)
|
125 |
-
|
126 |
-
return x
|
127 |
-
|
128 |
-
|
129 |
-
class Tree(nn.Module):
|
130 |
-
def __init__(self, cfg, levels, block, in_channels, out_channels, stride=1,
|
131 |
-
level_root=False, root_dim=0, root_kernel_size=1,
|
132 |
-
dilation=1, root_residual=False):
|
133 |
-
super(Tree, self).__init__()
|
134 |
-
if root_dim == 0:
|
135 |
-
root_dim = 2 * out_channels
|
136 |
-
if level_root:
|
137 |
-
root_dim += in_channels
|
138 |
-
if levels == 1:
|
139 |
-
self.tree1 = block(cfg, in_channels, out_channels, stride,
|
140 |
-
dilation=dilation)
|
141 |
-
self.tree2 = block(cfg, out_channels, out_channels, 1,
|
142 |
-
dilation=dilation)
|
143 |
-
else:
|
144 |
-
self.tree1 = Tree(cfg, levels - 1, block, in_channels, out_channels,
|
145 |
-
stride, root_dim=0,
|
146 |
-
root_kernel_size=root_kernel_size,
|
147 |
-
dilation=dilation, root_residual=root_residual)
|
148 |
-
self.tree2 = Tree(cfg, levels - 1, block, out_channels, out_channels,
|
149 |
-
root_dim=root_dim + out_channels,
|
150 |
-
root_kernel_size=root_kernel_size,
|
151 |
-
dilation=dilation, root_residual=root_residual)
|
152 |
-
if levels == 1:
|
153 |
-
self.root = Root(cfg, root_dim, out_channels, root_kernel_size,
|
154 |
-
root_residual)
|
155 |
-
self.level_root = level_root
|
156 |
-
self.root_dim = root_dim
|
157 |
-
self.downsample = None
|
158 |
-
self.project = None
|
159 |
-
self.levels = levels
|
160 |
-
if stride > 1:
|
161 |
-
self.downsample = nn.MaxPool2d(stride, stride=stride)
|
162 |
-
if in_channels != out_channels:
|
163 |
-
self.project = nn.Sequential(
|
164 |
-
nn.Conv2d(in_channels, out_channels,
|
165 |
-
kernel_size=1, stride=1, bias=False),
|
166 |
-
get_norm(cfg.MODEL.DLA.NORM, out_channels)
|
167 |
-
)
|
168 |
-
|
169 |
-
def forward(self, x, residual=None, children=None):
|
170 |
-
if self.training and residual is not None:
|
171 |
-
x = x + residual.sum() * 0.0
|
172 |
-
children = [] if children is None else children
|
173 |
-
bottom = self.downsample(x) if self.downsample else x
|
174 |
-
residual = self.project(bottom) if self.project else bottom
|
175 |
-
if self.level_root:
|
176 |
-
children.append(bottom)
|
177 |
-
x1 = self.tree1(x, residual)
|
178 |
-
if self.levels == 1:
|
179 |
-
x2 = self.tree2(x1)
|
180 |
-
x = self.root(x2, x1, *children)
|
181 |
-
else:
|
182 |
-
children.append(x1)
|
183 |
-
x = self.tree2(x1, children=children)
|
184 |
-
return x
|
185 |
-
|
186 |
-
|
187 |
-
class DLA(Backbone):
|
188 |
-
def __init__(self, cfg, levels, channels, block=BasicBlock, residual_root=False):
|
189 |
-
super(DLA, self).__init__()
|
190 |
-
self.cfg = cfg
|
191 |
-
self.channels = channels
|
192 |
-
|
193 |
-
self._out_features = ["dla{}".format(i) for i in range(6)]
|
194 |
-
self._out_feature_channels = {k: channels[i] for i, k in enumerate(self._out_features)}
|
195 |
-
self._out_feature_strides = {k: 2 ** i for i, k in enumerate(self._out_features)}
|
196 |
-
|
197 |
-
self.base_layer = nn.Sequential(
|
198 |
-
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
|
199 |
-
padding=3, bias=False),
|
200 |
-
get_norm(cfg.MODEL.DLA.NORM, channels[0]),
|
201 |
-
nn.ReLU(inplace=True))
|
202 |
-
self.level0 = self._make_conv_level(
|
203 |
-
channels[0], channels[0], levels[0])
|
204 |
-
self.level1 = self._make_conv_level(
|
205 |
-
channels[0], channels[1], levels[1], stride=2)
|
206 |
-
self.level2 = Tree(cfg, levels[2], block, channels[1], channels[2], 2,
|
207 |
-
level_root=False,
|
208 |
-
root_residual=residual_root)
|
209 |
-
self.level3 = Tree(cfg, levels[3], block, channels[2], channels[3], 2,
|
210 |
-
level_root=True, root_residual=residual_root)
|
211 |
-
self.level4 = Tree(cfg, levels[4], block, channels[3], channels[4], 2,
|
212 |
-
level_root=True, root_residual=residual_root)
|
213 |
-
self.level5 = Tree(cfg, levels[5], block, channels[4], channels[5], 2,
|
214 |
-
level_root=True, root_residual=residual_root)
|
215 |
-
|
216 |
-
for m in self.modules():
|
217 |
-
if isinstance(m, nn.Conv2d):
|
218 |
-
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
219 |
-
m.weight.data.normal_(0, math.sqrt(2. / n))
|
220 |
-
|
221 |
-
self.load_pretrained_model(
|
222 |
-
data='imagenet', name='dla34', hash='ba72cf86')
|
223 |
-
|
224 |
-
def load_pretrained_model(self, data, name, hash):
|
225 |
-
model_url = get_model_url(data, name, hash)
|
226 |
-
model_weights = model_zoo.load_url(model_url)
|
227 |
-
del model_weights['fc.weight']
|
228 |
-
del model_weights['fc.bias']
|
229 |
-
print('Loading pretrained DLA!')
|
230 |
-
self.load_state_dict(model_weights, strict=True)
|
231 |
-
|
232 |
-
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
|
233 |
-
modules = []
|
234 |
-
for i in range(convs):
|
235 |
-
modules.extend([
|
236 |
-
nn.Conv2d(inplanes, planes, kernel_size=3,
|
237 |
-
stride=stride if i == 0 else 1,
|
238 |
-
padding=dilation, bias=False, dilation=dilation),
|
239 |
-
get_norm(self.cfg.MODEL.DLA.NORM, planes),
|
240 |
-
nn.ReLU(inplace=True)])
|
241 |
-
inplanes = planes
|
242 |
-
return nn.Sequential(*modules)
|
243 |
-
|
244 |
-
def forward(self, x):
|
245 |
-
y = {}
|
246 |
-
x = self.base_layer(x)
|
247 |
-
for i in range(6):
|
248 |
-
name = 'level{}'.format(i)
|
249 |
-
x = getattr(self, name)(x)
|
250 |
-
y['dla{}'.format(i)] = x
|
251 |
-
return y
|
252 |
-
|
253 |
-
|
254 |
-
def fill_up_weights(up):
|
255 |
-
w = up.weight.data
|
256 |
-
f = math.ceil(w.size(2) / 2)
|
257 |
-
c = (2 * f - 1 - f % 2) / (2. * f)
|
258 |
-
for i in range(w.size(2)):
|
259 |
-
for j in range(w.size(3)):
|
260 |
-
w[0, 0, i, j] = \
|
261 |
-
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
|
262 |
-
for c in range(1, w.size(0)):
|
263 |
-
w[c, 0, :, :] = w[0, 0, :, :]
|
264 |
-
|
265 |
-
|
266 |
-
class Conv(nn.Module):
|
267 |
-
def __init__(self, chi, cho, norm):
|
268 |
-
super(Conv, self).__init__()
|
269 |
-
self.conv = nn.Sequential(
|
270 |
-
nn.Conv2d(chi, cho, kernel_size=1, stride=1, bias=False),
|
271 |
-
get_norm(norm, cho),
|
272 |
-
nn.ReLU(inplace=True))
|
273 |
-
|
274 |
-
def forward(self, x):
|
275 |
-
return self.conv(x)
|
276 |
-
|
277 |
-
|
278 |
-
class DeformConv(nn.Module):
|
279 |
-
def __init__(self, chi, cho, norm):
|
280 |
-
super(DeformConv, self).__init__()
|
281 |
-
self.actf = nn.Sequential(
|
282 |
-
get_norm(norm, cho),
|
283 |
-
nn.ReLU(inplace=True)
|
284 |
-
)
|
285 |
-
self.offset = Conv2d(
|
286 |
-
chi, 27, kernel_size=3, stride=1,
|
287 |
-
padding=1, dilation=1)
|
288 |
-
self.conv = ModulatedDeformConv(
|
289 |
-
chi, cho, kernel_size=3, stride=1, padding=1,
|
290 |
-
dilation=1, deformable_groups=1)
|
291 |
-
nn.init.constant_(self.offset.weight, 0)
|
292 |
-
nn.init.constant_(self.offset.bias, 0)
|
293 |
-
|
294 |
-
def forward(self, x):
|
295 |
-
offset_mask = self.offset(x)
|
296 |
-
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
|
297 |
-
offset = torch.cat((offset_x, offset_y), dim=1)
|
298 |
-
mask = mask.sigmoid()
|
299 |
-
x = self.conv(x, offset, mask)
|
300 |
-
x = self.actf(x)
|
301 |
-
return x
|
302 |
-
|
303 |
-
|
304 |
-
class IDAUp(nn.Module):
|
305 |
-
def __init__(self, o, channels, up_f, norm='FrozenBN', node_type=Conv):
|
306 |
-
super(IDAUp, self).__init__()
|
307 |
-
for i in range(1, len(channels)):
|
308 |
-
c = channels[i]
|
309 |
-
f = int(up_f[i])
|
310 |
-
proj = node_type(c, o, norm)
|
311 |
-
node = node_type(o, o, norm)
|
312 |
-
|
313 |
-
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
|
314 |
-
padding=f // 2, output_padding=0,
|
315 |
-
groups=o, bias=False)
|
316 |
-
fill_up_weights(up)
|
317 |
-
|
318 |
-
setattr(self, 'proj_' + str(i), proj)
|
319 |
-
setattr(self, 'up_' + str(i), up)
|
320 |
-
setattr(self, 'node_' + str(i), node)
|
321 |
-
|
322 |
-
|
323 |
-
def forward(self, layers, startp, endp):
|
324 |
-
for i in range(startp + 1, endp):
|
325 |
-
upsample = getattr(self, 'up_' + str(i - startp))
|
326 |
-
project = getattr(self, 'proj_' + str(i - startp))
|
327 |
-
layers[i] = upsample(project(layers[i]))
|
328 |
-
node = getattr(self, 'node_' + str(i - startp))
|
329 |
-
layers[i] = node(layers[i] + layers[i - 1])
|
330 |
-
|
331 |
-
|
332 |
-
DLAUP_NODE_MAP = {
|
333 |
-
'conv': Conv,
|
334 |
-
'dcn': DeformConv,
|
335 |
-
}
|
336 |
-
|
337 |
-
class DLAUP(Backbone):
|
338 |
-
def __init__(self, bottom_up, in_features, norm, dlaup_node='conv'):
|
339 |
-
super(DLAUP, self).__init__()
|
340 |
-
assert isinstance(bottom_up, Backbone)
|
341 |
-
self.bottom_up = bottom_up
|
342 |
-
input_shapes = bottom_up.output_shape()
|
343 |
-
in_strides = [input_shapes[f].stride for f in in_features]
|
344 |
-
in_channels = [input_shapes[f].channels for f in in_features]
|
345 |
-
in_levels = [int(math.log2(input_shapes[f].stride)) for f in in_features]
|
346 |
-
self.in_features = in_features
|
347 |
-
out_features = ['dlaup{}'.format(l) for l in in_levels]
|
348 |
-
self._out_features = out_features
|
349 |
-
self._out_feature_channels = {
|
350 |
-
'dlaup{}'.format(l): in_channels[i] for i, l in enumerate(in_levels)}
|
351 |
-
self._out_feature_strides = {
|
352 |
-
'dlaup{}'.format(l): 2 ** l for l in in_levels}
|
353 |
-
|
354 |
-
print('self._out_features', self._out_features)
|
355 |
-
print('self._out_feature_channels', self._out_feature_channels)
|
356 |
-
print('self._out_feature_strides', self._out_feature_strides)
|
357 |
-
self._size_divisibility = 32
|
358 |
-
|
359 |
-
node_type = DLAUP_NODE_MAP[dlaup_node]
|
360 |
-
|
361 |
-
self.startp = int(math.log2(in_strides[0]))
|
362 |
-
self.channels = in_channels
|
363 |
-
channels = list(in_channels)
|
364 |
-
scales = np.array([2 ** i for i in range(len(out_features))], dtype=int)
|
365 |
-
for i in range(len(channels) - 1):
|
366 |
-
j = -i - 2
|
367 |
-
setattr(self, 'ida_{}'.format(i),
|
368 |
-
IDAUp(channels[j], in_channels[j:],
|
369 |
-
scales[j:] // scales[j],
|
370 |
-
norm=norm,
|
371 |
-
node_type=node_type))
|
372 |
-
scales[j + 1:] = scales[j]
|
373 |
-
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
|
374 |
-
|
375 |
-
@property
|
376 |
-
def size_divisibility(self):
|
377 |
-
return self._size_divisibility
|
378 |
-
|
379 |
-
def forward(self, x):
|
380 |
-
bottom_up_features = self.bottom_up(x)
|
381 |
-
layers = [bottom_up_features[f] for f in self.in_features]
|
382 |
-
out = [layers[-1]] # start with 32
|
383 |
-
for i in range(len(layers) - 1):
|
384 |
-
ida = getattr(self, 'ida_{}'.format(i))
|
385 |
-
ida(layers, len(layers) - i - 2, len(layers))
|
386 |
-
out.insert(0, layers[-1])
|
387 |
-
ret = {}
|
388 |
-
for k, v in zip(self._out_features, out):
|
389 |
-
ret[k] = v
|
390 |
-
# import pdb; pdb.set_trace()
|
391 |
-
return ret
|
392 |
-
|
393 |
-
|
394 |
-
def dla34(cfg, pretrained=None): # DLA-34
|
395 |
-
model = DLA(cfg, [1, 1, 1, 2, 2, 1],
|
396 |
-
[16, 32, 64, 128, 256, 512],
|
397 |
-
block=BasicBlock)
|
398 |
-
return model
|
399 |
-
|
400 |
-
|
401 |
-
class LastLevelP6P7(nn.Module):
|
402 |
-
"""
|
403 |
-
This module is used in RetinaNet to generate extra layers, P6 and P7 from
|
404 |
-
C5 feature.
|
405 |
-
"""
|
406 |
-
|
407 |
-
def __init__(self, in_channels, out_channels):
|
408 |
-
super().__init__()
|
409 |
-
self.num_levels = 2
|
410 |
-
self.in_feature = "dla5"
|
411 |
-
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
|
412 |
-
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
|
413 |
-
for module in [self.p6, self.p7]:
|
414 |
-
weight_init.c2_xavier_fill(module)
|
415 |
-
|
416 |
-
def forward(self, c5):
|
417 |
-
p6 = self.p6(c5)
|
418 |
-
p7 = self.p7(F.relu(p6))
|
419 |
-
return [p6, p7]
|
420 |
-
|
421 |
-
|
422 |
-
@BACKBONE_REGISTRY.register()
|
423 |
-
def build_dla_fpn3_backbone(cfg, input_shape: ShapeSpec):
|
424 |
-
"""
|
425 |
-
Args:
|
426 |
-
cfg: a detectron2 CfgNode
|
427 |
-
Returns:
|
428 |
-
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
|
429 |
-
"""
|
430 |
-
|
431 |
-
depth_to_creator = {"dla34": dla34}
|
432 |
-
bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg)
|
433 |
-
in_features = cfg.MODEL.FPN.IN_FEATURES
|
434 |
-
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
|
435 |
-
|
436 |
-
backbone = FPN(
|
437 |
-
bottom_up=bottom_up,
|
438 |
-
in_features=in_features,
|
439 |
-
out_channels=out_channels,
|
440 |
-
norm=cfg.MODEL.FPN.NORM,
|
441 |
-
top_block=None,
|
442 |
-
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
|
443 |
-
)
|
444 |
-
|
445 |
-
return backbone
|
446 |
-
|
447 |
-
@BACKBONE_REGISTRY.register()
|
448 |
-
def build_dla_fpn5_backbone(cfg, input_shape: ShapeSpec):
|
449 |
-
"""
|
450 |
-
Args:
|
451 |
-
cfg: a detectron2 CfgNode
|
452 |
-
Returns:
|
453 |
-
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
|
454 |
-
"""
|
455 |
-
|
456 |
-
depth_to_creator = {"dla34": dla34}
|
457 |
-
bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg)
|
458 |
-
in_features = cfg.MODEL.FPN.IN_FEATURES
|
459 |
-
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
|
460 |
-
in_channels_top = bottom_up.output_shape()['dla5'].channels
|
461 |
-
|
462 |
-
backbone = FPN(
|
463 |
-
bottom_up=bottom_up,
|
464 |
-
in_features=in_features,
|
465 |
-
out_channels=out_channels,
|
466 |
-
norm=cfg.MODEL.FPN.NORM,
|
467 |
-
top_block=LastLevelP6P7(in_channels_top, out_channels),
|
468 |
-
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
|
469 |
-
)
|
470 |
-
|
471 |
-
return backbone
|
472 |
-
|
473 |
-
|
474 |
-
@BACKBONE_REGISTRY.register()
|
475 |
-
def build_dlaup_backbone(cfg, input_shape: ShapeSpec):
|
476 |
-
"""
|
477 |
-
Args:
|
478 |
-
cfg: a detectron2 CfgNode
|
479 |
-
Returns:
|
480 |
-
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
|
481 |
-
"""
|
482 |
-
|
483 |
-
depth_to_creator = {"dla34": dla34}
|
484 |
-
bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg)
|
485 |
-
|
486 |
-
backbone = DLAUP(
|
487 |
-
bottom_up=bottom_up,
|
488 |
-
in_features=cfg.MODEL.DLA.DLAUP_IN_FEATURES,
|
489 |
-
norm=cfg.MODEL.DLA.NORM,
|
490 |
-
dlaup_node=cfg.MODEL.DLA.DLAUP_NODE,
|
491 |
-
)
|
492 |
-
|
493 |
-
return backbone
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BaddaAshok0265/AshokGenAI/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AshokGenAI
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/lib/infer_pack/modules/F0Predictor/F0Predictor.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
class F0Predictor(object):
|
2 |
-
def compute_f0(self, wav, p_len):
|
3 |
-
"""
|
4 |
-
input: wav:[signal_length]
|
5 |
-
p_len:int
|
6 |
-
output: f0:[signal_length//hop_length]
|
7 |
-
"""
|
8 |
-
pass
|
9 |
-
|
10 |
-
def compute_f0_uv(self, wav, p_len):
|
11 |
-
"""
|
12 |
-
input: wav:[signal_length]
|
13 |
-
p_len:int
|
14 |
-
output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
|
15 |
-
"""
|
16 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bingyunhu/hoping/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Hoping
|
3 |
-
emoji: 🏆
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
license: mit
|
9 |
-
app_port: 8080
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/atomic.h
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
#include "vector.h"
|
5 |
-
#include "matrix.h"
|
6 |
-
|
7 |
-
// https://stackoverflow.com/questions/39274472/error-function-atomicadddouble-double-has-already-been-defined
|
8 |
-
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
|
9 |
-
#else
|
10 |
-
static inline DEVICE double atomicAdd(double *address, double val) {
|
11 |
-
unsigned long long int* address_as_ull = (unsigned long long int*)address;
|
12 |
-
unsigned long long int old = *address_as_ull, assumed;
|
13 |
-
if (val == 0.0)
|
14 |
-
return __longlong_as_double(old);
|
15 |
-
do {
|
16 |
-
assumed = old;
|
17 |
-
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
|
18 |
-
} while (assumed != old);
|
19 |
-
return __longlong_as_double(old);
|
20 |
-
}
|
21 |
-
#endif
|
22 |
-
|
23 |
-
#ifndef WIN32
|
24 |
-
template <typename T0, typename T1>
|
25 |
-
DEVICE
|
26 |
-
inline T0 atomic_add_(T0 &target, T1 source) {
|
27 |
-
#ifdef __CUDA_ARCH__
|
28 |
-
return atomicAdd(&target, (T0)source);
|
29 |
-
#else
|
30 |
-
T0 old_val;
|
31 |
-
T0 new_val;
|
32 |
-
do {
|
33 |
-
old_val = target;
|
34 |
-
new_val = old_val + source;
|
35 |
-
} while (!__atomic_compare_exchange(&target, &old_val, &new_val, true,
|
36 |
-
std::memory_order::memory_order_seq_cst,
|
37 |
-
std::memory_order::memory_order_seq_cst));
|
38 |
-
return old_val;
|
39 |
-
#endif
|
40 |
-
}
|
41 |
-
|
42 |
-
DEVICE
|
43 |
-
inline
|
44 |
-
float atomic_add(float &target, float source) {
|
45 |
-
return atomic_add_(target, source);
|
46 |
-
}
|
47 |
-
DEVICE
|
48 |
-
inline
|
49 |
-
double atomic_add(double &target, double source) {
|
50 |
-
return atomic_add_(target, source);
|
51 |
-
}
|
52 |
-
#else
|
53 |
-
float win_atomic_add(float &target, float source);
|
54 |
-
double win_atomic_add(double &target, double source);
|
55 |
-
DEVICE
|
56 |
-
static float atomic_add(float &target, float source) {
|
57 |
-
#ifdef __CUDA_ARCH__
|
58 |
-
return atomicAdd(&target, source);
|
59 |
-
#else
|
60 |
-
return win_atomic_add(target, source);
|
61 |
-
#endif
|
62 |
-
}
|
63 |
-
DEVICE
|
64 |
-
static double atomic_add(double &target, double source) {
|
65 |
-
#ifdef __CUDA_ARCH__
|
66 |
-
return atomicAdd(&target, (double)source);
|
67 |
-
#else
|
68 |
-
return win_atomic_add(target, source);
|
69 |
-
#endif
|
70 |
-
}
|
71 |
-
#endif
|
72 |
-
|
73 |
-
template <typename T0, typename T1>
|
74 |
-
DEVICE
|
75 |
-
inline T0 atomic_add(T0 *target, T1 source) {
|
76 |
-
return atomic_add(*target, (T0)source);
|
77 |
-
}
|
78 |
-
|
79 |
-
template <typename T0, typename T1>
|
80 |
-
DEVICE
|
81 |
-
inline TVector2<T0> atomic_add(TVector2<T0> &target, const TVector2<T1> &source) {
|
82 |
-
atomic_add(target[0], source[0]);
|
83 |
-
atomic_add(target[1], source[1]);
|
84 |
-
return target;
|
85 |
-
}
|
86 |
-
|
87 |
-
template <typename T0, typename T1>
|
88 |
-
DEVICE
|
89 |
-
inline void atomic_add(T0 *target, const TVector2<T1> &source) {
|
90 |
-
atomic_add(target[0], (T0)source[0]);
|
91 |
-
atomic_add(target[1], (T0)source[1]);
|
92 |
-
}
|
93 |
-
|
94 |
-
template <typename T0, typename T1>
|
95 |
-
DEVICE
|
96 |
-
inline TVector3<T0> atomic_add(TVector3<T0> &target, const TVector3<T1> &source) {
|
97 |
-
atomic_add(target[0], source[0]);
|
98 |
-
atomic_add(target[1], source[1]);
|
99 |
-
atomic_add(target[2], source[2]);
|
100 |
-
return target;
|
101 |
-
}
|
102 |
-
|
103 |
-
template <typename T0, typename T1>
|
104 |
-
DEVICE
|
105 |
-
inline void atomic_add(T0 *target, const TVector3<T1> &source) {
|
106 |
-
atomic_add(target[0], (T0)source[0]);
|
107 |
-
atomic_add(target[1], (T0)source[1]);
|
108 |
-
atomic_add(target[2], (T0)source[2]);
|
109 |
-
}
|
110 |
-
|
111 |
-
template <typename T0, typename T1>
|
112 |
-
DEVICE
|
113 |
-
inline TVector4<T0> atomic_add(TVector4<T0> &target, const TVector4<T1> &source) {
|
114 |
-
atomic_add(target[0], source[0]);
|
115 |
-
atomic_add(target[1], source[1]);
|
116 |
-
atomic_add(target[2], source[2]);
|
117 |
-
atomic_add(target[3], source[3]);
|
118 |
-
return target;
|
119 |
-
}
|
120 |
-
|
121 |
-
template <typename T0, typename T1>
|
122 |
-
DEVICE
|
123 |
-
inline void atomic_add(T0 *target, const TVector4<T1> &source) {
|
124 |
-
atomic_add(target[0], (T0)source[0]);
|
125 |
-
atomic_add(target[1], (T0)source[1]);
|
126 |
-
atomic_add(target[2], (T0)source[2]);
|
127 |
-
atomic_add(target[3], (T0)source[3]);
|
128 |
-
}
|
129 |
-
|
130 |
-
template <typename T0, typename T1>
|
131 |
-
DEVICE
|
132 |
-
inline void atomic_add(T0 *target, const TMatrix3x3<T1> &source) {
|
133 |
-
for (int i = 0; i < 3; i++) {
|
134 |
-
for (int j = 0; j < 3; j++) {
|
135 |
-
atomic_add(target[3 * i + j], (T0)source(i, j));
|
136 |
-
}
|
137 |
-
}
|
138 |
-
}
|
139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/diffvg.h
DELETED
@@ -1,156 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#ifdef __NVCC__
|
4 |
-
#define DEVICE __device__ __host__
|
5 |
-
#else
|
6 |
-
#define DEVICE
|
7 |
-
#endif
|
8 |
-
|
9 |
-
#ifndef __NVCC__
|
10 |
-
#include <cmath>
|
11 |
-
namespace {
|
12 |
-
inline float fmodf(float a, float b) {
|
13 |
-
return std::fmod(a, b);
|
14 |
-
}
|
15 |
-
inline double fmod(double a, double b) {
|
16 |
-
return std::fmod(a, b);
|
17 |
-
}
|
18 |
-
}
|
19 |
-
using std::isfinite;
|
20 |
-
#endif
|
21 |
-
|
22 |
-
#ifndef M_PI
|
23 |
-
#define M_PI 3.14159265358979323846
|
24 |
-
#endif
|
25 |
-
|
26 |
-
#include <cstdint>
|
27 |
-
#include <atomic>
|
28 |
-
|
29 |
-
// We use Real for most of the internal computation.
|
30 |
-
// However, for PyTorch interfaces, Optix Prime and Embree queries
|
31 |
-
// we use float
|
32 |
-
using Real = float;
|
33 |
-
|
34 |
-
template <typename T>
|
35 |
-
DEVICE
|
36 |
-
inline T square(const T &x) {
|
37 |
-
return x * x;
|
38 |
-
}
|
39 |
-
|
40 |
-
template <typename T>
|
41 |
-
DEVICE
|
42 |
-
inline T cubic(const T &x) {
|
43 |
-
return x * x * x;
|
44 |
-
}
|
45 |
-
|
46 |
-
template <typename T>
|
47 |
-
DEVICE
|
48 |
-
inline T clamp(const T &v, const T &lo, const T &hi) {
|
49 |
-
if (v < lo) return lo;
|
50 |
-
else if (v > hi) return hi;
|
51 |
-
else return v;
|
52 |
-
}
|
53 |
-
|
54 |
-
DEVICE
|
55 |
-
inline int modulo(int a, int b) {
|
56 |
-
auto r = a % b;
|
57 |
-
return (r < 0) ? r+b : r;
|
58 |
-
}
|
59 |
-
|
60 |
-
DEVICE
|
61 |
-
inline float modulo(float a, float b) {
|
62 |
-
float r = ::fmodf(a, b);
|
63 |
-
return (r < 0.0f) ? r+b : r;
|
64 |
-
}
|
65 |
-
|
66 |
-
DEVICE
|
67 |
-
inline double modulo(double a, double b) {
|
68 |
-
double r = ::fmod(a, b);
|
69 |
-
return (r < 0.0) ? r+b : r;
|
70 |
-
}
|
71 |
-
|
72 |
-
template <typename T>
|
73 |
-
DEVICE
|
74 |
-
inline T max(const T &a, const T &b) {
|
75 |
-
return a > b ? a : b;
|
76 |
-
}
|
77 |
-
|
78 |
-
template <typename T>
|
79 |
-
DEVICE
|
80 |
-
inline T min(const T &a, const T &b) {
|
81 |
-
return a < b ? a : b;
|
82 |
-
}
|
83 |
-
|
84 |
-
/// Return ceil(x/y) for integers x and y
|
85 |
-
inline int idiv_ceil(int x, int y) {
|
86 |
-
return (x + y-1) / y;
|
87 |
-
}
|
88 |
-
|
89 |
-
template <typename T>
|
90 |
-
DEVICE
|
91 |
-
inline void swap_(T &a, T &b) {
|
92 |
-
T tmp = a;
|
93 |
-
a = b;
|
94 |
-
b = tmp;
|
95 |
-
}
|
96 |
-
|
97 |
-
inline double log2(double x) {
|
98 |
-
return log(x) / log(Real(2));
|
99 |
-
}
|
100 |
-
|
101 |
-
template <typename T>
|
102 |
-
DEVICE
|
103 |
-
inline T safe_acos(const T &x) {
|
104 |
-
if (x >= 1) return T(0);
|
105 |
-
else if(x <= -1) return T(M_PI);
|
106 |
-
return acos(x);
|
107 |
-
}
|
108 |
-
|
109 |
-
// For Morton code computation. This can be made faster.
|
110 |
-
DEVICE
|
111 |
-
inline uint32_t expand_bits(uint32_t x) {
|
112 |
-
// Insert one zero after every bit given a 10-bit integer
|
113 |
-
constexpr uint64_t mask = 0x1u;
|
114 |
-
// We start from LSB (bit 31)
|
115 |
-
auto result = (x & (mask << 0u));
|
116 |
-
result |= ((x & (mask << 1u)) << 1u);
|
117 |
-
result |= ((x & (mask << 2u)) << 2u);
|
118 |
-
result |= ((x & (mask << 3u)) << 3u);
|
119 |
-
result |= ((x & (mask << 4u)) << 4u);
|
120 |
-
result |= ((x & (mask << 5u)) << 5u);
|
121 |
-
result |= ((x & (mask << 6u)) << 6u);
|
122 |
-
result |= ((x & (mask << 7u)) << 7u);
|
123 |
-
result |= ((x & (mask << 8u)) << 8u);
|
124 |
-
result |= ((x & (mask << 9u)) << 9u);
|
125 |
-
return result;
|
126 |
-
}
|
127 |
-
|
128 |
-
// DEVICE
|
129 |
-
// inline int clz(uint64_t x) {
|
130 |
-
// #ifdef __CUDA_ARCH__
|
131 |
-
// return __clzll(x);
|
132 |
-
// #else
|
133 |
-
// // TODO: use _BitScanReverse in windows
|
134 |
-
// return x == 0 ? 64 : __builtin_clzll(x);
|
135 |
-
// #endif
|
136 |
-
// }
|
137 |
-
|
138 |
-
// DEVICE
|
139 |
-
// inline int ffs(uint8_t x) {
|
140 |
-
// #ifdef __CUDA_ARCH__
|
141 |
-
// return __ffs(x);
|
142 |
-
// #else
|
143 |
-
// // TODO: use _BitScanReverse in windows
|
144 |
-
// return __builtin_ffs(x);
|
145 |
-
// #endif
|
146 |
-
// }
|
147 |
-
|
148 |
-
// DEVICE
|
149 |
-
// inline int popc(uint8_t x) {
|
150 |
-
// #ifdef __CUDA_ARCH__
|
151 |
-
// return __popc(x);
|
152 |
-
// #else
|
153 |
-
// // TODO: use _popcnt in windows
|
154 |
-
// return __builtin_popcount(x);
|
155 |
-
// #endif
|
156 |
-
// }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/device_malloc.h
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file device_malloc.h
|
19 |
-
* \brief Allocates storage in device memory
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/device_ptr.h>
|
26 |
-
#include <cstddef> // for std::size_t
|
27 |
-
|
28 |
-
namespace thrust
|
29 |
-
{
|
30 |
-
|
31 |
-
/*! \addtogroup allocation_functions Allocation Functions
|
32 |
-
* \ingroup memory_management_functions
|
33 |
-
* \{
|
34 |
-
*/
|
35 |
-
|
36 |
-
/*! This version of \p device_malloc allocates sequential device storage
|
37 |
-
* for bytes.
|
38 |
-
*
|
39 |
-
* \param n The number of bytes to allocate sequentially
|
40 |
-
* in device memory.
|
41 |
-
* \return A \p device_ptr to the newly allocated memory.
|
42 |
-
*
|
43 |
-
* The following code snippet demonstrates how to use \p device_malloc to
|
44 |
-
* allocate a range of device memory.
|
45 |
-
*
|
46 |
-
* \code
|
47 |
-
* #include <thrust/device_malloc.h>
|
48 |
-
* #include <thrust/device_free.h>
|
49 |
-
* ...
|
50 |
-
* // allocate some memory with device_malloc
|
51 |
-
* const int N = 100;
|
52 |
-
* thrust::device_ptr<void> void_ptr = thrust::device_malloc(N);
|
53 |
-
*
|
54 |
-
* // manipulate memory
|
55 |
-
* ...
|
56 |
-
*
|
57 |
-
* // deallocate with device_free
|
58 |
-
* thrust::device_free(void_ptr);
|
59 |
-
* \endcode
|
60 |
-
*
|
61 |
-
* \see device_ptr
|
62 |
-
* \see device_free
|
63 |
-
*/
|
64 |
-
inline thrust::device_ptr<void> device_malloc(const std::size_t n);
|
65 |
-
|
66 |
-
/*! This version of \p device_malloc allocates sequential device storage for
|
67 |
-
* new objects of the given type.
|
68 |
-
*
|
69 |
-
* \param n The number of objects of type T to allocate
|
70 |
-
* sequentially in device memory.
|
71 |
-
* \return A \p device_ptr to the newly allocated memory.
|
72 |
-
*
|
73 |
-
* The following code snippet demonstrates how to use \p device_malloc to
|
74 |
-
* allocate a range of device memory.
|
75 |
-
*
|
76 |
-
* \code
|
77 |
-
* #include <thrust/device_malloc.h>
|
78 |
-
* #include <thrust/device_free.h>
|
79 |
-
* ...
|
80 |
-
* // allocate some integers with device_malloc
|
81 |
-
* const int N = 100;
|
82 |
-
* thrust::device_ptr<int> int_array = thrust::device_malloc<int>(N);
|
83 |
-
*
|
84 |
-
* // manipulate integers
|
85 |
-
* ...
|
86 |
-
*
|
87 |
-
* // deallocate with device_free
|
88 |
-
* thrust::device_free(int_array);
|
89 |
-
* \endcode
|
90 |
-
*
|
91 |
-
* \see device_ptr
|
92 |
-
* \see device_free
|
93 |
-
*/
|
94 |
-
template<typename T>
|
95 |
-
inline thrust::device_ptr<T> device_malloc(const std::size_t n);
|
96 |
-
|
97 |
-
/*! \}
|
98 |
-
*/
|
99 |
-
|
100 |
-
} // end thrust
|
101 |
-
|
102 |
-
#include <thrust/detail/device_malloc.inl>
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/select_system.h
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
|
2 |
-
/*
|
3 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
4 |
-
*
|
5 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
* you may not use this file except in compliance with the License.
|
7 |
-
* You may obtain a copy of the License at
|
8 |
-
*
|
9 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
*
|
11 |
-
* Unless required by applicable law or agreed to in writing, software
|
12 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
* See the License for the specific language governing permissions and
|
15 |
-
* limitations under the License.
|
16 |
-
*/
|
17 |
-
|
18 |
-
#pragma once
|
19 |
-
|
20 |
-
#include <thrust/detail/config.h>
|
21 |
-
#include <thrust/detail/execution_policy.h>
|
22 |
-
#include <thrust/detail/type_traits.h>
|
23 |
-
#include <thrust/iterator/detail/minimum_system.h>
|
24 |
-
#include <thrust/iterator/detail/device_system_tag.h>
|
25 |
-
#include <thrust/iterator/detail/any_system_tag.h>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
namespace system
|
30 |
-
{
|
31 |
-
namespace detail
|
32 |
-
{
|
33 |
-
namespace generic
|
34 |
-
{
|
35 |
-
|
36 |
-
template<typename Tag>
|
37 |
-
struct select_system1_exists;
|
38 |
-
|
39 |
-
template<typename Tag1, typename Tag2>
|
40 |
-
struct select_system2_exists;
|
41 |
-
|
42 |
-
template<typename Tag1, typename Tag2, typename Tag3>
|
43 |
-
struct select_system3_exists;
|
44 |
-
|
45 |
-
template<typename Tag1, typename Tag2, typename Tag3, typename Tag4>
|
46 |
-
struct select_system4_exists;
|
47 |
-
|
48 |
-
template<typename Tag1, typename Tag2, typename Tag3, typename Tag4, typename Tag5>
|
49 |
-
struct select_system5_exists;
|
50 |
-
|
51 |
-
template<typename Tag1, typename Tag2, typename Tag3, typename Tag4, typename Tag5, typename Tag6>
|
52 |
-
struct select_system6_exists;
|
53 |
-
|
54 |
-
template<typename System>
|
55 |
-
__host__ __device__
|
56 |
-
typename thrust::detail::disable_if<
|
57 |
-
select_system1_exists<System>::value,
|
58 |
-
System &
|
59 |
-
>::type
|
60 |
-
select_system(thrust::execution_policy<System> &system);
|
61 |
-
|
62 |
-
template<typename System1, typename System2>
|
63 |
-
__host__ __device__
|
64 |
-
typename thrust::detail::enable_if_defined<
|
65 |
-
thrust::detail::minimum_system<System1,System2>
|
66 |
-
>::type
|
67 |
-
&select_system(thrust::execution_policy<System1> &system1,
|
68 |
-
thrust::execution_policy<System2> &system2);
|
69 |
-
|
70 |
-
template<typename System1, typename System2, typename System3>
|
71 |
-
__host__ __device__
|
72 |
-
typename thrust::detail::lazy_disable_if<
|
73 |
-
select_system3_exists<System1,System2,System3>::value,
|
74 |
-
thrust::detail::minimum_system<System1,System2,System3>
|
75 |
-
>::type
|
76 |
-
&select_system(thrust::execution_policy<System1> &system1,
|
77 |
-
thrust::execution_policy<System2> &system2,
|
78 |
-
thrust::execution_policy<System3> &system3);
|
79 |
-
|
80 |
-
template<typename System1, typename System2, typename System3, typename System4>
|
81 |
-
__host__ __device__
|
82 |
-
typename thrust::detail::lazy_disable_if<
|
83 |
-
select_system4_exists<System1,System2,System3,System4>::value,
|
84 |
-
thrust::detail::minimum_system<System1,System2,System3,System4>
|
85 |
-
>::type
|
86 |
-
&select_system(thrust::execution_policy<System1> &system1,
|
87 |
-
thrust::execution_policy<System2> &system2,
|
88 |
-
thrust::execution_policy<System3> &system3,
|
89 |
-
thrust::execution_policy<System4> &system4);
|
90 |
-
|
91 |
-
template<typename System1, typename System2, typename System3, typename System4, typename System5>
|
92 |
-
__host__ __device__
|
93 |
-
typename thrust::detail::lazy_disable_if<
|
94 |
-
select_system5_exists<System1,System2,System3,System4,System5>::value,
|
95 |
-
thrust::detail::minimum_system<System1,System2,System3,System4,System5>
|
96 |
-
>::type
|
97 |
-
&select_system(thrust::execution_policy<System1> &system1,
|
98 |
-
thrust::execution_policy<System2> &system2,
|
99 |
-
thrust::execution_policy<System3> &system3,
|
100 |
-
thrust::execution_policy<System4> &system4,
|
101 |
-
thrust::execution_policy<System5> &system5);
|
102 |
-
|
103 |
-
template<typename System1, typename System2, typename System3, typename System4, typename System5, typename System6>
|
104 |
-
__host__ __device__
|
105 |
-
typename thrust::detail::lazy_disable_if<
|
106 |
-
select_system6_exists<System1,System2,System3,System4,System5,System6>::value,
|
107 |
-
thrust::detail::minimum_system<System1,System2,System3,System4,System5,System6>
|
108 |
-
>::type
|
109 |
-
&select_system(thrust::execution_policy<System1> &system1,
|
110 |
-
thrust::execution_policy<System2> &system2,
|
111 |
-
thrust::execution_policy<System3> &system3,
|
112 |
-
thrust::execution_policy<System4> &system4,
|
113 |
-
thrust::execution_policy<System5> &system5,
|
114 |
-
thrust::execution_policy<System6> &system6);
|
115 |
-
|
116 |
-
// Map a single any_system_tag to device_system_tag.
|
117 |
-
inline __host__ __device__
|
118 |
-
thrust::device_system_tag select_system(thrust::any_system_tag);
|
119 |
-
|
120 |
-
} // end generic
|
121 |
-
} // end detail
|
122 |
-
} // end system
|
123 |
-
} // end thrust
|
124 |
-
|
125 |
-
#include <thrust/system/detail/generic/select_system.inl>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/data/datasets/coco_panoptic.py
DELETED
@@ -1,228 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import copy
|
3 |
-
import json
|
4 |
-
import os
|
5 |
-
|
6 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog
|
7 |
-
from detectron2.utils.file_io import PathManager
|
8 |
-
|
9 |
-
from .coco import load_coco_json, load_sem_seg
|
10 |
-
|
11 |
-
__all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"]
|
12 |
-
|
13 |
-
|
14 |
-
def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta):
|
15 |
-
"""
|
16 |
-
Args:
|
17 |
-
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
|
18 |
-
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
|
19 |
-
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
|
20 |
-
|
21 |
-
Returns:
|
22 |
-
list[dict]: a list of dicts in Detectron2 standard format. (See
|
23 |
-
`Using Custom Datasets </tutorials/datasets.html>`_ )
|
24 |
-
"""
|
25 |
-
|
26 |
-
def _convert_category_id(segment_info, meta):
|
27 |
-
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
|
28 |
-
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
|
29 |
-
segment_info["category_id"]
|
30 |
-
]
|
31 |
-
segment_info["isthing"] = True
|
32 |
-
else:
|
33 |
-
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
|
34 |
-
segment_info["category_id"]
|
35 |
-
]
|
36 |
-
segment_info["isthing"] = False
|
37 |
-
return segment_info
|
38 |
-
|
39 |
-
with PathManager.open(json_file) as f:
|
40 |
-
json_info = json.load(f)
|
41 |
-
|
42 |
-
ret = []
|
43 |
-
for ann in json_info["annotations"]:
|
44 |
-
image_id = int(ann["image_id"])
|
45 |
-
# TODO: currently we assume image and label has the same filename but
|
46 |
-
# different extension, and images have extension ".jpg" for COCO. Need
|
47 |
-
# to make image extension a user-provided argument if we extend this
|
48 |
-
# function to support other COCO-like datasets.
|
49 |
-
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
|
50 |
-
label_file = os.path.join(gt_dir, ann["file_name"])
|
51 |
-
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
|
52 |
-
ret.append(
|
53 |
-
{
|
54 |
-
"file_name": image_file,
|
55 |
-
"image_id": image_id,
|
56 |
-
"pan_seg_file_name": label_file,
|
57 |
-
"segments_info": segments_info,
|
58 |
-
}
|
59 |
-
)
|
60 |
-
assert len(ret), f"No images found in {image_dir}!"
|
61 |
-
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
|
62 |
-
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
|
63 |
-
return ret
|
64 |
-
|
65 |
-
|
66 |
-
def register_coco_panoptic(
|
67 |
-
name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None
|
68 |
-
):
|
69 |
-
"""
|
70 |
-
Register a "standard" version of COCO panoptic segmentation dataset named `name`.
|
71 |
-
The dictionaries in this registered dataset follows detectron2's standard format.
|
72 |
-
Hence it's called "standard".
|
73 |
-
|
74 |
-
Args:
|
75 |
-
name (str): the name that identifies a dataset,
|
76 |
-
e.g. "coco_2017_train_panoptic"
|
77 |
-
metadata (dict): extra metadata associated with this dataset.
|
78 |
-
image_root (str): directory which contains all the images
|
79 |
-
panoptic_root (str): directory which contains panoptic annotation images in COCO format
|
80 |
-
panoptic_json (str): path to the json panoptic annotation file in COCO format
|
81 |
-
sem_seg_root (none): not used, to be consistent with
|
82 |
-
`register_coco_panoptic_separated`.
|
83 |
-
instances_json (str): path to the json instance annotation file
|
84 |
-
"""
|
85 |
-
panoptic_name = name
|
86 |
-
DatasetCatalog.register(
|
87 |
-
panoptic_name,
|
88 |
-
lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata),
|
89 |
-
)
|
90 |
-
MetadataCatalog.get(panoptic_name).set(
|
91 |
-
panoptic_root=panoptic_root,
|
92 |
-
image_root=image_root,
|
93 |
-
panoptic_json=panoptic_json,
|
94 |
-
json_file=instances_json,
|
95 |
-
evaluator_type="coco_panoptic_seg",
|
96 |
-
ignore_label=255,
|
97 |
-
label_divisor=1000,
|
98 |
-
**metadata,
|
99 |
-
)
|
100 |
-
|
101 |
-
|
102 |
-
def register_coco_panoptic_separated(
|
103 |
-
name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
|
104 |
-
):
|
105 |
-
"""
|
106 |
-
Register a "separated" version of COCO panoptic segmentation dataset named `name`.
|
107 |
-
The annotations in this registered dataset will contain both instance annotations and
|
108 |
-
semantic annotations, each with its own contiguous ids. Hence it's called "separated".
|
109 |
-
|
110 |
-
It follows the setting used by the PanopticFPN paper:
|
111 |
-
|
112 |
-
1. The instance annotations directly come from polygons in the COCO
|
113 |
-
instances annotation task, rather than from the masks in the COCO panoptic annotations.
|
114 |
-
|
115 |
-
The two format have small differences:
|
116 |
-
Polygons in the instance annotations may have overlaps.
|
117 |
-
The mask annotations are produced by labeling the overlapped polygons
|
118 |
-
with depth ordering.
|
119 |
-
|
120 |
-
2. The semantic annotations are converted from panoptic annotations, where
|
121 |
-
all "things" are assigned a semantic id of 0.
|
122 |
-
All semantic categories will therefore have ids in contiguous
|
123 |
-
range [1, #stuff_categories].
|
124 |
-
|
125 |
-
This function will also register a pure semantic segmentation dataset
|
126 |
-
named ``name + '_stuffonly'``.
|
127 |
-
|
128 |
-
Args:
|
129 |
-
name (str): the name that identifies a dataset,
|
130 |
-
e.g. "coco_2017_train_panoptic"
|
131 |
-
metadata (dict): extra metadata associated with this dataset.
|
132 |
-
image_root (str): directory which contains all the images
|
133 |
-
panoptic_root (str): directory which contains panoptic annotation images
|
134 |
-
panoptic_json (str): path to the json panoptic annotation file
|
135 |
-
sem_seg_root (str): directory which contains all the ground truth segmentation annotations.
|
136 |
-
instances_json (str): path to the json instance annotation file
|
137 |
-
"""
|
138 |
-
panoptic_name = name + "_separated"
|
139 |
-
DatasetCatalog.register(
|
140 |
-
panoptic_name,
|
141 |
-
lambda: merge_to_panoptic(
|
142 |
-
load_coco_json(instances_json, image_root, panoptic_name),
|
143 |
-
load_sem_seg(sem_seg_root, image_root),
|
144 |
-
),
|
145 |
-
)
|
146 |
-
MetadataCatalog.get(panoptic_name).set(
|
147 |
-
panoptic_root=panoptic_root,
|
148 |
-
image_root=image_root,
|
149 |
-
panoptic_json=panoptic_json,
|
150 |
-
sem_seg_root=sem_seg_root,
|
151 |
-
json_file=instances_json, # TODO rename
|
152 |
-
evaluator_type="coco_panoptic_seg",
|
153 |
-
ignore_label=255,
|
154 |
-
**metadata,
|
155 |
-
)
|
156 |
-
|
157 |
-
semantic_name = name + "_stuffonly"
|
158 |
-
DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))
|
159 |
-
MetadataCatalog.get(semantic_name).set(
|
160 |
-
sem_seg_root=sem_seg_root,
|
161 |
-
image_root=image_root,
|
162 |
-
evaluator_type="sem_seg",
|
163 |
-
ignore_label=255,
|
164 |
-
**metadata,
|
165 |
-
)
|
166 |
-
|
167 |
-
|
168 |
-
def merge_to_panoptic(detection_dicts, sem_seg_dicts):
|
169 |
-
"""
|
170 |
-
Create dataset dicts for panoptic segmentation, by
|
171 |
-
merging two dicts using "file_name" field to match their entries.
|
172 |
-
|
173 |
-
Args:
|
174 |
-
detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation.
|
175 |
-
sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation.
|
176 |
-
|
177 |
-
Returns:
|
178 |
-
list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in
|
179 |
-
both detection_dicts and sem_seg_dicts that correspond to the same image.
|
180 |
-
The function assumes that the same key in different dicts has the same value.
|
181 |
-
"""
|
182 |
-
results = []
|
183 |
-
sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts}
|
184 |
-
assert len(sem_seg_file_to_entry) > 0
|
185 |
-
|
186 |
-
for det_dict in detection_dicts:
|
187 |
-
dic = copy.copy(det_dict)
|
188 |
-
dic.update(sem_seg_file_to_entry[dic["file_name"]])
|
189 |
-
results.append(dic)
|
190 |
-
return results
|
191 |
-
|
192 |
-
|
193 |
-
if __name__ == "__main__":
|
194 |
-
"""
|
195 |
-
Test the COCO panoptic dataset loader.
|
196 |
-
|
197 |
-
Usage:
|
198 |
-
python -m detectron2.data.datasets.coco_panoptic \
|
199 |
-
path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10
|
200 |
-
|
201 |
-
"dataset_name" can be "coco_2017_train_panoptic", or other
|
202 |
-
pre-registered ones
|
203 |
-
"""
|
204 |
-
from detectron2.utils.logger import setup_logger
|
205 |
-
from detectron2.utils.visualizer import Visualizer
|
206 |
-
import detectron2.data.datasets # noqa # add pre-defined metadata
|
207 |
-
import sys
|
208 |
-
from PIL import Image
|
209 |
-
import numpy as np
|
210 |
-
|
211 |
-
logger = setup_logger(name=__name__)
|
212 |
-
assert sys.argv[4] in DatasetCatalog.list()
|
213 |
-
meta = MetadataCatalog.get(sys.argv[4])
|
214 |
-
|
215 |
-
dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict())
|
216 |
-
logger.info("Done loading {} samples.".format(len(dicts)))
|
217 |
-
|
218 |
-
dirname = "coco-data-vis"
|
219 |
-
os.makedirs(dirname, exist_ok=True)
|
220 |
-
num_imgs_to_vis = int(sys.argv[5])
|
221 |
-
for i, d in enumerate(dicts):
|
222 |
-
img = np.array(Image.open(d["file_name"]))
|
223 |
-
visualizer = Visualizer(img, metadata=meta)
|
224 |
-
vis = visualizer.draw_dataset_dict(d)
|
225 |
-
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
|
226 |
-
vis.save(fpath)
|
227 |
-
if i + 1 >= num_imgs_to_vis:
|
228 |
-
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/twitter.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import tweepy
|
4 |
-
from dotenv import load_dotenv
|
5 |
-
|
6 |
-
load_dotenv()
|
7 |
-
|
8 |
-
|
9 |
-
def send_tweet(tweet_text):
|
10 |
-
consumer_key = os.environ.get("TW_CONSUMER_KEY")
|
11 |
-
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
|
12 |
-
access_token = os.environ.get("TW_ACCESS_TOKEN")
|
13 |
-
access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
|
14 |
-
# Authenticate to Twitter
|
15 |
-
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
|
16 |
-
auth.set_access_token(access_token, access_token_secret)
|
17 |
-
|
18 |
-
# Create API object
|
19 |
-
api = tweepy.API(auth)
|
20 |
-
|
21 |
-
# Send tweet
|
22 |
-
try:
|
23 |
-
api.update_status(tweet_text)
|
24 |
-
print("Tweet sent successfully!")
|
25 |
-
except tweepy.TweepyException as e:
|
26 |
-
print("Error sending tweet: {}".format(e.reason))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/lib/config/check.js
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
import fs from 'fs'
|
2 |
-
import { createRequire } from 'module'
|
3 |
-
const require = createRequire(import.meta.url)
|
4 |
-
const { exec } = require('child_process')
|
5 |
-
|
6 |
-
export async function checkRun () {
|
7 |
-
if (process.argv[1].includes('pm2')) return
|
8 |
-
if (process.argv[1].includes('test')) return
|
9 |
-
|
10 |
-
let cfg = pm2Cfg()
|
11 |
-
let status = await execSync(`pm2 show ${cfg.apps[0].name}`)
|
12 |
-
|
13 |
-
if (status.stdout.includes('online')) {
|
14 |
-
logger.mark('检测到后台正在运行')
|
15 |
-
logger.mark('已停止后台进程,防止重复运行')
|
16 |
-
execSync(`pm2 stop ${cfg.apps[0].name}`)
|
17 |
-
}
|
18 |
-
}
|
19 |
-
|
20 |
-
async function execSync (cmd) {
|
21 |
-
return new Promise((resolve, reject) => {
|
22 |
-
exec(cmd, (error, stdout, stderr) => {
|
23 |
-
resolve({ error, stdout, stderr })
|
24 |
-
})
|
25 |
-
})
|
26 |
-
}
|
27 |
-
|
28 |
-
function pm2Cfg () {
|
29 |
-
let cfg = fs.readFileSync('config/pm2/pm2.json')
|
30 |
-
cfg = JSON.parse(cfg)
|
31 |
-
return cfg
|
32 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|