Commit
·
6e31c22
1
Parent(s):
0e4bcc1
Update parquet files (step 23 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aria Band Parde Awal Mp3 Download.md +0 -26
- spaces/1gistliPinn/ChatGPT4/Examples/Athlean X Xero Pdf Download [BETTER].md +0 -46
- spaces/1gistliPinn/ChatGPT4/Examples/COD Black Ops II [ALL DLC Multi5 Crack In] The Game.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Database Workbench Pro 5.3.2.176 Crack.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Download Game Imperial Glory PORTABLE Full Version.md +0 -30
- spaces/1phancelerku/anime-remove-background/Create Your Own Custom Monsters and Join the Fight in Animal Revolt Battle Simulator on PC.md +0 -163
- spaces/1phancelerku/anime-remove-background/Download Pear Live and Discover the Best Live Streaming Content for Adults.md +0 -131
- spaces/232labs/VToonify/vtoonify/model/stylegan/model.py +0 -719
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_mbf.py +0 -26
- spaces/AIConsultant/MusicGen/audiocraft/models/__init__.py +0 -18
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/dataset.py +0 -147
- spaces/ALR03/gradiolangchainChatbotOpenAI/README.md +0 -12
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192.py +0 -172
- spaces/AUBADA-ALARABI/poetry20233/README.md +0 -13
- spaces/Abhilashvj/planogram-compliance/utils/segment/general.py +0 -190
- spaces/Adesoji1/Panel_PDF_QA/README.md +0 -11
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/restorabledata-plugin.d.ts +0 -10
- spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/__init__.py +0 -3
- spaces/Andres99/Tune-A-Video-Training-UI/utils.py +0 -65
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/spectrogram_diffusion.md +0 -37
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/latent_upscale.md +0 -38
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/other-formats.md +0 -191
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py +0 -164
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/unet_2d.py +0 -329
- spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/evaluations/inception.py +0 -322
- spaces/Aravindsssss/GradiolangchainChatBoatOpenAI/README.md +0 -12
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/certifi/__main__.py +0 -12
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/wait.py +0 -152
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_rpm.py +0 -40
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin.py +0 -259
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_engine.py +0 -186
- spaces/Benson/text-generation/Examples/Baixar Pou Reggae Apk.md +0 -108
- spaces/Benson/text-generation/Examples/Bajar Deh Cancin Descargar Mp3 Pagalworld Ringtone.md +0 -77
- spaces/Benson/text-generation/Examples/Descargar Gratis Poppy Playtime.md +0 -66
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/specifiers.py +0 -802
- spaces/BigSalmon/TestAnyGPTModel/app.py +0 -67
- spaces/CALM/Dashboard/perso/get_usernames.py +0 -14
- spaces/CVPR/LIVE/thrust/thrust/uninitialized_copy.h +0 -303
- spaces/CVPR/monoscene_lite/monoscene/flosp.py +0 -41
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/misc.py +0 -717
- spaces/Chenyuwen/playground/app.py +0 -230
- spaces/CjangCjengh/Sanskrit-TTS/mel_processing.py +0 -101
- spaces/CognitiveLabs/GPT-4-Vision-Chat/langsmith_config.py +0 -8
- spaces/CompVis/stable-diffusion-license/app.py +0 -14
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/_subprocesses.py +0 -135
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-f2292b12.css +0 -1
- spaces/DaFujaTyping/hf-Chat-ui/src/hooks.server.ts +0 -72
- spaces/Daniton/MagicPrompt-Stable-Diffusion/README.md +0 -14
- spaces/DataScienceEngineering/6-TreemapAndSunburst/app.py +0 -230
- spaces/DeclK/pose/model_zoo/rtmdet/rtmdet_tiny_8xb32-300e_coco/detection_onnxruntime_static.py +0 -23
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aria Band Parde Awal Mp3 Download.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Aria Band's Parde Awal Mp3 for Free</h1>
|
3 |
-
<p>If you are a fan of Aria Band, you might be looking for a way to download their latest single, Parde Awal, in mp3 format for free. Parde Awal is a catchy and upbeat song that showcases the band's talent and style. It is one of the most popular songs by Aria Band, a group of Afghan singers and musicians who perform traditional and modern music.</p>
|
4 |
-
<h2>Aria Band Parde Awal Mp3 Download</h2><br /><p><b><b>DOWNLOAD</b> »»» <a href="https://byltly.com/2uKAcQ">https://byltly.com/2uKAcQ</a></b></p><br /><br />
|
5 |
-
<p>Parde Awal was released on December 16, 2019 by Aria Band and is available on various streaming platforms such as Spotify, Shazam, and Qobuz[^2^] [^1^] [^3^]. However, if you want to download the song in mp3 format for free, you might have some difficulty finding a reliable and legal source. That's why we have prepared this guide to help you download Aria Band's Parde Awal mp3 for free without any hassle.</p>
|
6 |
-
<h2>Step 1: Find a reputable website that offers free mp3 downloads</h2>
|
7 |
-
<p>The first step to download Aria Band's Parde Awal mp3 for free is to find a website that offers free mp3 downloads of songs that are not protected by copyright. There are many websites that claim to offer free mp3 downloads, but some of them might be unsafe, illegal, or low-quality. Therefore, you need to be careful and do some research before choosing a website.</p>
|
8 |
-
<p>One way to find a reputable website is to use Bing as your search engine and type in "Aria Band Parde Awal Mp3 Download" in the search box. Bing will show you a list of websites that match your query and rank them according to their relevance and quality. You can also use Bing's filters and tools to narrow down your search results by date, language, region, or file type.</p>
|
9 |
-
<p>Another way to find a reputable website is to look for reviews and ratings from other users who have downloaded the song before. You can check out online forums, blogs, social media platforms, or comments sections where people share their experiences and opinions about different websites. You can also ask your friends or family members who are fans of Aria Band for recommendations.</p>
|
10 |
-
<p></p>
|
11 |
-
<h2>Step 2: Download the song in mp3 format</h2>
|
12 |
-
<p>Once you have found a website that offers free mp3 downloads of Aria Band's Parde Awal, you can proceed to download the song in mp3 format. The exact steps might vary depending on the website, but generally, you need to follow these steps:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Click on the link or button that says "Download" or "Download Mp3" next to the song title.</li>
|
15 |
-
<li>Choose a location on your device where you want to save the file.</li>
|
16 |
-
<li>Wait for the download to complete.</li>
|
17 |
-
<li>Enjoy listening to Aria Band's Parde Awal mp3 on your device.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>Step 3: Check the quality and legality of the downloaded file</h2>
|
20 |
-
<p>The final step to download Aria Band's Parde Awal mp3 for free is to check the quality and legality of the downloaded file. You want to make sure that the file is not corrupted, infected with malware, or infringing any copyright laws. Here are some tips to check the quality and legality of the downloaded file:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Check the file size and duration of the mp3 file. A typical mp3 file of a song should be around 3-5 MB and 3-5 minutes long. If the file size or duration is too small or too large, it might indicate that the file is incomplete, damaged, or modified.</li>
|
23 |
-
<li>Check the audio quality of the mp3 file. You can use an audio player or editor software to play and analyze the sound quality of the file. You can also compare it with the original version of the song on streaming platforms or YouTube. If the audio quality is poor, distorted, or different from the original version, it might indicate that the file is low-quality or altered.</li>
|
24 |
-
<li>Check the legal status of the mp3 file. You can use an</p> cec2833e83<br />
|
25 |
-
<br />
|
26 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Athlean X Xero Pdf Download [BETTER].md
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Athlean X Xero Review: A Bodyweight Workout Program for Muscle and Fat Loss</h1>
|
3 |
-
<p>If you are looking for a bodyweight workout program that can help you build muscle and lose fat without any equipment, you might have come across Athlean X Xero. This is a 90-day program created by Jeff Cavaliere, a physical therapist and strength coach who runs the popular YouTube channel Athlean-X. But is this program worth your time and money? In this article, we will review Athlean X Xero and see what it has to offer.</p>
|
4 |
-
<h2>What is Athlean X Xero?</h2>
|
5 |
-
<p>Athlean X Xero is a bodyweight workout program that claims to deliver results with absolutely no equipment. It consists of six weeks of workouts, each with three training days and one challenge day. The workouts are designed to challenge your strength, endurance, power, agility, and core stability using various bodyweight exercises. The challenge days are meant to test your progress and push you to your limits.</p>
|
6 |
-
<h2>athlean x xero pdf download</h2><br /><p><b><b>DOWNLOAD</b> ✦✦✦ <a href="https://imgfil.com/2uxYrZ">https://imgfil.com/2uxYrZ</a></b></p><br /><br />
|
7 |
-
<h2>What are the benefits of Athlean X Xero?</h2>
|
8 |
-
<p>Some of the benefits of Athlean X Xero are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It can be done anywhere, anytime, with no equipment needed.</li>
|
11 |
-
<li>It can help you build muscle and lose fat by increasing your metabolic rate and stimulating muscle growth.</li>
|
12 |
-
<li>It can improve your athletic performance by enhancing your mobility, coordination, balance, and explosiveness.</li>
|
13 |
-
<li>It can prevent injuries by strengthening your joints, tendons, and ligaments.</li>
|
14 |
-
<li>It can keep you motivated and engaged by providing variety and challenge in every workout.</li>
|
15 |
-
</ul>
|
16 |
-
<h2>What are the drawbacks of Athlean X Xero?</h2>
|
17 |
-
<p>Some of the drawbacks of Athlean X Xero are:</p>
|
18 |
-
<ul>
|
19 |
-
<li>It can be too difficult for beginners or too easy for advanced trainees, depending on your fitness level.</li>
|
20 |
-
<li>It can be hard to measure your progress without any weights or equipment.</li>
|
21 |
-
<li>It can be boring or repetitive for some people who prefer more diversity in their training.</li>
|
22 |
-
<li>It can be expensive compared to other bodyweight workout programs available online.</li>
|
23 |
-
</ul>
|
24 |
-
<h2>Is Athlean X Xero worth it?</h2>
|
25 |
-
<p>Athlean X Xero is a pretty decent bodyweight workout program if youâre low on equipment, just starting your fitness journey, or have both muscle gain and fat loss goals. You can be sure that each workout is unique and interesting, choose a difficulty that matches your current fitness level, use no equipment, and finish this grueling program in 90 days[^2^]. However, if you are looking for a free or cheaper alternative, or if you have access to a gym or some basic equipment, you might want to consider other options. Ultimately, the best workout program is the one that suits your needs, preferences, and goals.</p><h2>How to get started with Athlean X Xero?</h2>
|
26 |
-
<p>If you are interested in trying Athlean X Xero, you can purchase the program from the official website. You will get access to the digital PDF guide, which contains all the instructions, exercises, and schedules for the 90-day program. You will also get access to the online portal, where you can watch the video demonstrations of each exercise, track your progress, and interact with other users. The program costs $79.95, which is a one-time payment with no recurring fees or subscriptions.</p>
|
27 |
-
<h2>What are some tips for succeeding with Athlean X Xero?</h2>
|
28 |
-
<p>Athlean X Xero is not an easy program to follow. It requires dedication, discipline, and hard work. Here are some tips to help you get the most out of it:</p>
|
29 |
-
<ul>
|
30 |
-
<li>Follow the program as prescribed. Don't skip any workouts, challenges, or rest days.</li>
|
31 |
-
<li>Choose the right difficulty level for you. Don't overestimate or underestimate your abilities.</li>
|
32 |
-
<li>Warm up properly before each workout. This will prevent injuries and improve your performance.</li>
|
33 |
-
<li>Cool down and stretch after each workout. This will reduce soreness and speed up your recovery.</li>
|
34 |
-
<li>Eat a balanced and nutritious diet. This will fuel your body and support your muscle growth and fat loss.</li>
|
35 |
-
<li>Drink plenty of water. This will keep you hydrated and prevent dehydration.</li>
|
36 |
-
<li>Get enough sleep. This will allow your body and mind to rest and recover.</li>
|
37 |
-
<li>Be consistent and patient. Don't expect to see results overnight. Trust the process and enjoy the journey.</li>
|
38 |
-
</ul>
|
39 |
-
<h2>Conclusion</h2>
|
40 |
-
<p>Athlean X Xero is a bodyweight workout program that can help you build muscle and lose fat without any equipment. It is created by Jeff Cavaliere, a physical therapist and strength coach who runs the popular YouTube channel Athlean-X. The program consists of six weeks of workouts, each with three training days and one challenge day. The workouts are designed to challenge your strength, endurance, power, agility, and core stability using various bodyweight exercises. The challenge days are meant to test your progress and push you to your limits.</p>
|
41 |
-
<p></p>
|
42 |
-
<p>Athlean X Xero has some benefits and drawbacks that you should consider before buying it. It can be done anywhere, anytime, with no equipment needed. It can help you build muscle and lose fat by increasing your metabolic rate and stimulating muscle growth. It can improve your athletic performance by enhancing your mobility, coordination, balance, and explosiveness. It can prevent injuries by strengthening your joints, tendons, and ligaments. It can keep you motivated and engaged by providing variety and challenge in every workout.</p>
|
43 |
-
<p>However, it can also be too difficult for beginners or too easy for advanced trainees, depending on your fitness level. It can be hard to measure your progress without any weights or equipment. It can be boring or repetitive for some people who prefer more diversity in their training. It can be expensive compared to other bodyweight workout programs available online.</p>
|
44 |
-
<p>If you are looking for a bodyweight workout program that can help you build muscle and lose fat without any equipment, Athlean X Xero might be a good option for you. However, if you are looking for a free or cheaper alternative, or if you have access to a gym or some basic equipment, you might want to consider other options. Ultimately, the best workout program is the one that suits your needs, preferences, and goals.</p> d5da3c52bf<br />
|
45 |
-
<br />
|
46 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/COD Black Ops II [ALL DLC Multi5 Crack In] The Game.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>As time passes, vassals grow restless and unrest on the continent grows until a larger war is spurred upon to resolve the matter. You then scramble to build up your military to aid you in the war. Civilizations are destroyed, roads are built, and people are indoctrinated into various religions. The graphical flourishes that came along with Black & White's initial hype have long since waned, replaced by large amounts of low-grade placeholder assets. The game's only features are destructible environments and a vast number of pedestrian villagers. Combined, these give the game an apparently limited scope, but left little room for improvement.</p>
|
3 |
-
<h2>COD Black Ops II [ALL DLC, multi5, crack in] the game</h2><br /><p><b><b>Download</b> » <a href="https://imgfil.com/2uy0Tc">https://imgfil.com/2uy0Tc</a></b></p><br /><br />
|
4 |
-
<p>The game's single defining feature is its complex AI system. This AI is unique to Black & White and is built on a heavily layered decision-making system, one that has to be triggered by multiple events, and in response to its own decisions. Essentially, Black & White is the game that proves that tactical AI is possible in a large scale sandbox RTS. It is an extremely hard and difficult system to build in the short time limit that Black & White places on its player, resulting in a very short game, despite its ambitious framework.</p>
|
5 |
-
<p>Each country is defined by a set of values that influence the political standing of its subjects, so that some countries might rely on their culture while others focus on science. This is then represented in their interactions with their subjects, who will become more or less loyal to the values of their countries.</p>
|
6 |
-
<p>The graphical load times and performance is also of the very high standard expected from us. In fact, should you find your PC unable to run this game properly, contact us and we'll try to get your PC fixed for you (http://www.NovaLogic.com).</p>
|
7 |
-
<p></p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Database Workbench Pro 5.3.2.176 Crack.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Database Workbench Pro 5.3.2.176 Crack</h2><br /><p><b><b>DOWNLOAD</b> ☆ <a href="https://imgfil.com/2uxYg0">https://imgfil.com/2uxYg0</a></b></p><br /><br />
|
2 |
-
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Game Imperial Glory PORTABLE Full Version.md
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
<h2>download game imperial glory full version</h2><br /><p><b><b>DOWNLOAD</b> ↔ <a href="https://imgfil.com/2uy1zm">https://imgfil.com/2uy1zm</a></b></p><br /><br />
|
2 |
-
|
3 |
-
The Prussian King is on a quest to explore the Americas. On his way back to Germany, he stops over in France, where he is greeted by a young Prince of the House of Orleans .In a world of towering European capitals, in a domain of large states where the most brutal politics reign, a few armies, a few men and a few ideas form the basis of the reality . It is here, in these humble mountains of Auvergne, that the world changes forever .
|
4 |
-
|
5 |
-
■Full Version is at GameFAQs.com.
|
6 |
-
|
7 |
-
■"Table of Contents" is in the PC version's readme.
|
8 |
-
|
9 |
-
■Legend
|
10 |
-
|
11 |
-
Legend of the Five Rings is a turn-based tactical RPG. The game features gorgeous hand-drawn 2D graphics, three tactical player classes (Rulers, Masters and Adepts), a wide variety of units, weapons and items, a compelling storyline, complex strategic gameplay, interesting maps, and a focus on realism. The game is set in feudal Japan with a 'Rinkei' system. Players begin the game in a clanless status, but through careful planning, can rise through the ranks of the game's clan tree to become a samurai.
|
12 |
-
|
13 |
-
Stories are told through a series of battles, which are accompanied by musical interludes and animations. It is a game focused on the art of war, where players will be challenged by the game's difficulty, and they can choose to play a single character, play multiple characters in tandem or change characters in battle with others.
|
14 |
-
|
15 |
-
The Legend of the Five Rings game is unique in the sense that the battle system allows for a high degree of freedom within a highly calculated system. This means that the players can choose to follow the most logical, powerful, efficient route to victory, or instead, they can play with chance by rolling dice at the start of each battle.
|
16 |
-
|
17 |
-
■Dedicated Server
|
18 |
-
|
19 |
-
■PC Game
|
20 |
-
|
21 |
-
■Have you ever wanted to create your own clan? Become a Rulers in your own house? Gather a party and learn your own skills in Legend of the Five Rings? Then, it is time to build your own army with the PC version!
|
22 |
-
|
23 |
-
The PC version includes options for dedicated server: LAN server and online server. Dedicated server can be accessed through the Options menu of the PC version.
|
24 |
-
|
25 |
-
■Support for Dedicated Server
|
26 |
-
|
27 |
-
■Graphical Changes 4fefd39f24<br />
|
28 |
-
<br />
|
29 |
-
<br />
|
30 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Create Your Own Custom Monsters and Join the Fight in Animal Revolt Battle Simulator on PC.md
DELETED
@@ -1,163 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Animal Revolt Battle Simulator: A Physics-Based Sandbox Game</h1>
|
3 |
-
<p>Have you ever wondered what would happen if you put a T-rex against a dragon, or a shark against a gorilla, or a goat against a tank? If you have, then you will love <strong>Animal Revolt Battle Simulator</strong>, a physics-based sandbox game where you can create funny battles between all sorts of ragdoll creatures!</p>
|
4 |
-
<p>In this game, you can build your own maps or pick from a selection of ready-made ones, place up to seven opposing armies made of different types of beasts, and watch them tear each other apart in an epic battle! You can also join the fight yourself in the first-person mode and blow the enemy away with some powerful guns!</p>
|
5 |
-
<h2>animal revolt battle simulator download 2021 pc</h2><br /><p><b><b>Download</b> 🗸🗸🗸 <a href="https://jinyurl.com/2uNP22">https://jinyurl.com/2uNP22</a></b></p><br /><br />
|
6 |
-
<p>You will have a huge selection of more than 70 creatures to pick from, ranging from ancient dinosaurs, mythical dragons, aquatic animals, jungle animals, and many more. You can also create your own custom monsters by combining different body parts and weapons. You can even download and upload user-generated content from the workshop.</p>
|
7 |
-
<p>If you are looking for a challenge, you can also try the campaign mode, where you have to pick the right beasts, place them in the right place, and command them to defeat the enemy. The most expensive army doesn't always win. You have to learn the strengths and weaknesses of each creature type and use them wisely on the battlefield.</p>
|
8 |
-
<p>Animal Revolt Battle Simulator is a game that will make you laugh, scream, and cheer as you watch the hilarious physics-based combat unfold. It is a game that will unleash your creativity and imagination as you make your own monsters and battles. It is a game that will test your strategy and tactics as you face different scenarios and enemies.</p>
|
9 |
-
<p>If you are interested in playing this game, read on to find out how to download it for PC in 2021, how to play it, what features it offers, and some tips and tricks to help you get started.</p>
|
10 |
-
<h2>How to Download Animal Revolt Battle Simulator for PC in 2021</h2>
|
11 |
-
<p>If you want to play Animal Revolt Battle Simulator on your PC, you have two options. You can either visit the official website of the game or go to its Steam store page. Here are the steps to download the game from either source:</p>
|
12 |
-
<h3>Step 1: Visit the official website or Steam store page of the game</h3>
|
13 |
-
<p>The official website of Animal Revolt Battle Simulator is <a href="">https://animalrevoltbattlesimulator.com/</a>. Here you can find more information about the game, such as its features, screenshots, videos, and reviews. You can also subscribe to the newsletter to get updates and news about the game.</p>
|
14 |
-
<p>animal revolt battle simulator pc game download<br />
|
15 |
-
how to download animal revolt battle simulator on pc<br />
|
16 |
-
animal revolt battle simulator free download for pc<br />
|
17 |
-
animal revolt battle simulator steam download pc<br />
|
18 |
-
animal revolt battle simulator full version download pc<br />
|
19 |
-
animal revolt battle simulator download windows 10 pc<br />
|
20 |
-
animal revolt battle simulator bluestacks download pc<br />
|
21 |
-
animal revolt battle simulator mumu player download pc<br />
|
22 |
-
animal revolt battle simulator official download pc<br />
|
23 |
-
animal revolt battle simulator cracked download pc<br />
|
24 |
-
animal revolt battle simulator torrent download pc<br />
|
25 |
-
animal revolt battle simulator latest update download pc<br />
|
26 |
-
animal revolt battle simulator online play on pc<br />
|
27 |
-
animal revolt battle simulator offline mode pc download<br />
|
28 |
-
animal revolt battle simulator system requirements for pc<br />
|
29 |
-
animal revolt battle simulator gameplay on pc<br />
|
30 |
-
animal revolt battle simulator review for pc<br />
|
31 |
-
animal revolt battle simulator tips and tricks pc<br />
|
32 |
-
animal revolt battle simulator cheats and hacks pc<br />
|
33 |
-
animal revolt battle simulator mods and custom units pc<br />
|
34 |
-
animal revolt battle simulator best units and maps pc<br />
|
35 |
-
animal revolt battle simulator sandbox mode on pc<br />
|
36 |
-
animal revolt battle simulator campaign mode on pc<br />
|
37 |
-
animal revolt battle simulator first-person mode on pc<br />
|
38 |
-
animal revolt battle simulator unit creator on pc<br />
|
39 |
-
animal revolt battle simulator dinosaurs and monsters pc<br />
|
40 |
-
animal revolt battle simulator ragdoll physics on pc<br />
|
41 |
-
animal revolt battle simulator realistic battles on pc<br />
|
42 |
-
animal revolt battle simulator funny moments on pc<br />
|
43 |
-
animal revolt battle simulator epic wars on pc<br />
|
44 |
-
animal revolt battle simulator vs totally accurate battle simulator pc<br />
|
45 |
-
animal revolt battle simulator vs ravenfield on pc<br />
|
46 |
-
animal revolt battle simulator vs kerbal space program on pc<br />
|
47 |
-
animal revolt battle simulator vs tabletop simulator on pc<br />
|
48 |
-
animal revolt battle simulator vs steel division 2 on pc<br />
|
49 |
-
animal revolt battle simulator vs project wunderwaffe on pc<br />
|
50 |
-
animal revolt battle simulator vs toribash on pc<br />
|
51 |
-
animal revolt battle simulator vs sapiens on pc<br />
|
52 |
-
animal revolt battle simulator vs diplomacy is not an option on pc<br />
|
53 |
-
animal revolt battle simulator vs blooming business casino on pc<br />
|
54 |
-
animal revolt battle simulator vs shieldwall on pc<br />
|
55 |
-
animal revolt battle simulator vs regiments on pc<br />
|
56 |
-
buy animal revolt battle simulator for pc 2021 <br />
|
57 |
-
download and install animal revolt battle simulator for windows 7/8/10 PC <br />
|
58 |
-
how to get Animal Revolt Battle Simulator for free PC <br />
|
59 |
-
Animal Revolt Battle Simulator PC game size and price <br />
|
60 |
-
Animal Revolt Battle Simulator PC game features and enhancements <br />
|
61 |
-
Animal Revolt Battle Simulator PC game release date and developer <br />
|
62 |
-
Animal Revolt Battle Simulator PC game trailer and screenshots</p>
|
63 |
-
<p>The Steam store page of Animal Revolt Battle Simulator is <a href="">https://store.steampowered.com/app/1211630/Animal_Revolt_Battle_Simulator/</a>. Here you can find similar information as on the official website, as well as user reviews, community discussions, and achievements. You can also add the game to your wishlist or follow it to get notified when it is on sale or updated.</p>
|
64 |
-
<h3>Step 2: Choose your preferred platform and payment method</h3>
|
65 |
-
<p>If you visit the official website of the game, you will see that it is available for Windows, Mac, and Linux platforms. You can choose your preferred platform by clicking on the corresponding icon. You will then be redirected to a secure payment page where you can pay with PayPal or credit card. The game costs $14.99 USD.</p>
|
66 |
-
<p>If you visit the Steam store page of the game, you will see that it is only available for Windows platform. You can buy the game by clicking on the "Add to Cart" button. You will then need to log in to your Steam account or create one if you don't have one. You can pay with various methods, such as Steam Wallet, PayPal, credit card, or gift card. The game also costs $14.99 USD on Steam.</p>
|
67 |
-
<h3>Step 3: Download and install the game on your PC</h3>
|
68 |
-
<p>If you buy the game from the official website, you will receive an email with a download link and a license key. You can click on the link to download the game installer file. Then you can run the file and follow the instructions to install the game on your PC. You will need to enter your license key when prompted.</p>
|
69 |
-
<p>If you buy the game from Steam, you will need to download and install the Steam client on your PC if you don't have it already. You can download it from <a href="">https://store.steampowered.com/about/</a>. Then you can launch the Steam client and log in to your account. You will find Animal Revolt Battle Simulator in your library. You can click on it and then click on the "Install" button to download and install the game on your PC.</p>
|
70 |
-
<h2>How to Play Animal Revolt Battle Simulator on PC</h2>
|
71 |
-
<p>Now that you have downloaded and installed Animal Revolt Battle Simulator on your PC, you are ready to play it! Here are the steps to play the game:</p>
|
72 |
-
<h3>Step 1: Launch the game and choose your game mode</h3>
|
73 |
-
<p>When you launch Animal Revolt Battle Simulator, you will see a main menu with four options: Play, Workshop, Options, and Quit. You can click on Options to adjust your settings, such as graphics, sound, controls, and language. You can click on Workshop to access user-generated content, such as maps, units, and battles. You can click on Quit to exit the game.</p>
|
74 |
-
<p>To start playing, you need to click on Play. You will then see three game modes: Sandbox, Campaign, and First Person Shooter. Sandbox mode is where you can create your own battles between different armies of beasts. Campaign mode is where you can play through a series of missions with different objectives and challenges. First Person Shooter mode is where you can join the battle yourself and use weapons to fight.</p>
|
75 |
-
<h3>Step 2: Build your own maps or select from the ready-made ones</h3>
|
76 |
-
<p>If you choose Sandbox mode, you will need to build your own maps or select from the ready-made ones. To build your own map, you need to click on Map Editor. You will then see a grid where you can place different types of terrain tiles, such as grass, sand, water, snow, lava, etc. You can also place different types of obstacles, such as rocks, trees, buildings, bridges, etc. You can use the mouse wheel to zoom in and out, and drag the mouse to rotate and move the camera.</p>
|
77 |
-
<p>To select a ready-made map, you need to click on Map Selection. You will then see a list of maps that are already available in the game. You can scroll through them and click on one that you like. Some maps have special features or events that make them more interesting or challenging.</p>
|
78 |
-
<h3>Step 3 : Place your armies of different types of beasts and watch them fight</h3>
|
79 |
-
<p>After you have chosen or created your map, you need to place your armies of different types of beasts. You can have up to seven armies, each with a different color and flag. To place an army, you need to click on Army Editor. You will then see a list of categories of creatures, such as Dinosaurs, Dragons, Aquatic, Jungle, etc. You can click on a category to see the available creatures in that category. You can also click on Custom to see the creatures that you or other users have created.</p>
|
80 |
-
<p>To place a creature, you need to drag and drop it from the list to the map. You can adjust its position, rotation, and scale by using the mouse and keyboard. You can also duplicate, delete, or lock a creature by using the buttons on the bottom right corner. You can see the cost and stats of each creature on the top right corner. You have a limited budget for each army, so you need to balance quantity and quality.</p>
|
81 |
-
<p>When you are done placing your armies, you need to click on Start Battle. You will then see a countdown and then the battle will begin. You can watch the battle from different angles by using the mouse and keyboard. You can also pause, resume, or restart the battle by using the buttons on the top left corner. You can see the health and number of each army on the bottom left corner. The battle will end when only one army remains or when the time runs out.</p>
|
82 |
-
<h3>Step 4: Join the battle yourself in the first-person mode and use weapons</h3>
|
83 |
-
<p>If you want to join the battle yourself, you need to choose First Person Shooter mode. You will then see a list of maps that are compatible with this mode. You can choose one that you like or create your own in Map Editor. Then you need to place your armies as in Sandbox mode.</p>
|
84 |
-
<p>When you start the battle, you will spawn as a human soldier with a gun. You can move around with WASD keys, aim with the mouse, shoot with left click, reload with R key, and switch weapons with Q key. You can also jump with spacebar and crouch with C key. You have a health bar on the top left corner and an ammo counter on the bottom right corner.</p>
|
85 |
-
<p>You can fight alongside your army or against them. You can also fight against other human players if you enable multiplayer mode in Options. You can communicate with other players by using voice chat or text chat. You can see the score and time of the battle on the top center of the screen. The battle will end when one army wins or when the time runs out.</p>
|
86 |
-
<h2>Features of Animal Revolt Battle Simulator</h2>
|
87 |
-
<p>Animal Revolt Battle Simulator is a game that offers many features that make it fun and unique. Here are some of them:</p>
|
88 |
-
<h3>Feature 1: A huge variety of creatures from dinosaurs to dragons to sharks</h3>
|
89 |
-
<p>One of the main attractions of Animal Revolt Battle Simulator is the huge variety of creatures that you can use in your battles. There are more than 70 creatures in total, divided into different categories based on their origin, habitat, or theme. Some examples are:</p>
|
90 |
-
<ul>
|
91 |
-
<li>Dinosaurs: Tyrannosaurus Rex, Triceratops, Velociraptor, Spinosaurus, etc.</li>
|
92 |
-
<li>Dragons: Fire Dragon, Ice Dragon, Lightning Dragon, Bone Dragon, etc.</li>
|
93 |
-
<li>Aquatic: Shark, Whale, Dolphin, Crocodile, etc.</li>
|
94 |
-
<li>Jungle: Gorilla, Tiger, Elephant, Snake, etc.</li>
|
95 |
-
<li>Farm: Cow, Pig, Sheep, Chicken, etc.</li>
|
96 |
-
<li>Military: Tank, Helicopter, Soldier, Missile Launcher, etc.</li>
|
97 |
-
<li>Fantasy: Unicorn, Pegasus, Minotaur, Cyclops, etc.</li>
|
98 |
-
<li>Alien: Alien Queen, Alien Warrior, Alien Drone, Alien Egg, etc.</li>
|
99 |
-
</ul>
|
100 |
-
<p>Each creature has its own appearance, behavior, and abilities. Some creatures can fly, swim, breathe fire, shoot lasers, or lay eggs. Some creatures are stronger, faster, or smarter than others. Some creatures can work together, while others are loners. You can mix and match different creatures to create interesting and unpredictable battles.</p>
|
101 |
-
<h3>Feature 2: A custom unit creator that allows you to make your own monsters</h3>
|
102 |
-
<p>If you are not satisfied with the existing creatures in the game, you can also create your own custom monsters by using the custom unit creator. This feature allows you to combine different body parts and weapons from different creatures to make your own unique creations. You can also customize their color, size, name, and stats.</p>
|
103 |
-
<p>To access the custom unit creator, you need to click on Custom in the Army Editor. You will then see a list of body parts and weapons that you can use. You can drag and drop them to the center of the screen to attach them to your monster. You can also use the buttons on the bottom right corner to rotate, scale, or delete them. You can see the cost and stats of your monster on the top right corner.</p>
|
104 |
-
<p>When you are done creating your monster, you need to click on Save. You will then be able to use your monster in your battles. You can also share your monster with other players by uploading it to the workshop or downloading other players' monsters from there.</p>
|
105 |
-
<h3>Feature 3: A workshop where you can download and upload user-generated content</h3>
|
106 |
-
<p>Another feature that makes Animal Revolt Battle Simulator more fun and diverse is the workshop where you can download and upload user-generated content. The workshop is a place where you can find and share maps, units, and battles that other players have created or that you have created yourself.</p>
|
107 |
-
<p>To access the workshop, you need to click on Workshop in the main menu. You will then see a list of categories, such as Maps, Units, Battles, etc. You can click on a category to see the available content in that category. You can also use the filters and search bar to find specific content that you are looking for.</p>
|
108 |
-
<p>To download content from the workshop, you need to click on it and then click on Subscribe. The content will then be added to your library and you will be able to use it in your game. To upload content to the workshop, you need to click on Upload and then select the content that you want to share. You will then need to fill in some information, such as title, description, tags, etc. The content will then be published and other players will be able to see it and download it.</p>
|
109 |
-
<h3>Feature 4: A campaign mode where you can test your strategy and tactics</h3>
|
110 |
-
<p>If you are looking for a challenge, you can also try the campaign mode where you can test your strategy and tactics against different enemies and scenarios. The campaign mode consists of 20 missions that vary in difficulty and objectives. Some missions require you to defeat a certain number of enemies or survive for a certain amount of time. Other missions require you to protect a base or capture a flag.</p>
|
111 |
-
<p>To play the campaign mode, you need to click on Campaign in the Play menu. You will then see a map with different locations that represent the missions. You can click on a location to start the mission or see its details. You will then see a brief introduction of the mission and its objectives.</p>
|
112 |
-
<p>In each mission, you will have a limited budget and a limited number of units that you can use. You will also have a limited time to place your units before the battle starts. You will need to think carefully about which units to use and where to place them in order to achieve your objectives and win the mission.</p>
|
113 |
-
<p>When you complete a mission, you will receive a score based on your performance and some coins that you can use to unlock new units or upgrade existing ones. You will also unlock new locations and missions as you progress through the campaign.</p>
|
114 |
-
<h2>Tips and Tricks for Animal Revolt Battle Simulator</h2>
|
115 |
-
<p>Animal Revolt Battle Simulator is a game that requires some skill and strategy as well as creativity and imagination. Here are some tips and tricks that will help you get better at the game:</p>
|
116 |
-
<h3>Tip 1: Learn the strengths and weaknesses of each creature type</h3>
|
117 |
-
<p>One of the most important things in Animal Revolt Battle Simulator is to know the strengths and weaknesses of each creature type. Different creatures have different abilities, stats, behaviors, and interactions with other creatures. For example:</p>
|
118 |
-
<ul>
|
119 |
-
<li>Dinosaurs are generally strong, fast, and aggressive, but they are vulnerable to fire and explosives.</li>
|
120 |
-
<li>Dragons can fly, breathe fire, and shoot lasers, but they are expensive and have low health.</li>
|
121 |
-
<li>Aquatic animals can swim and bite , but they are slow and weak on land.</li>
|
122 |
-
<li>Jungle animals are versatile and adaptable, but they are not very specialized or powerful.</li>
|
123 |
-
<li>Farm animals are cheap and numerous, but they are weak and useless in combat.</li>
|
124 |
-
<li>Military units are armed and armored, but they are expensive and have limited ammo.</li>
|
125 |
-
<li>Fantasy creatures have magical abilities, but they are rare and unpredictable.</li>
|
126 |
-
<li>Alien creatures are advanced and intelligent, but they are hostile to everyone and hard to control.</li>
|
127 |
-
</ul>
|
128 |
-
<p>You need to learn the pros and cons of each creature type and use them accordingly. You also need to consider the terrain, the weather, and the enemy when choosing your units. For example, flying creatures are good on open maps, but not on maps with many obstacles. Fire-breathing creatures are good on cold maps, but not on hot maps. Aquatic creatures are good on water maps, but not on land maps. And so on.</p>
|
129 |
-
<h3>Tip 2: Use the terrain and obstacles to your advantage</h3>
|
130 |
-
<p>Another important thing in Animal Revolt Battle Simulator is to use the terrain and obstacles to your advantage. Different types of terrain and obstacles can affect the movement, visibility, and combat of your units. For example:</p>
|
131 |
-
<ul>
|
132 |
-
<li>Grass is good for hiding and camouflage, but it can catch fire easily.</li>
|
133 |
-
<li>Sand is good for digging and burrowing, but it can slow down your units.</li>
|
134 |
-
<li>Water is good for swimming and cooling down, but it can drown or electrocute your units.</li>
|
135 |
-
<li>Snow is good for sliding and throwing snowballs, but it can freeze or melt your units.</li>
|
136 |
-
<li>Lava is good for burning and melting your enemies, but it can also burn and melt your units.</li>
|
137 |
-
<li>Rocks are good for blocking and smashing your enemies, but they can also block and smash your units.</li>
|
138 |
-
<li>Trees are good for climbing and swinging, but they can also fall or catch fire.</li>
|
139 |
-
<li>Buildings are good for sheltering and sniping, but they can also collapse or explode.</li>
|
140 |
-
<li>Bridges are good for crossing gaps and rivers, but they can also break or collapse.</li>
|
141 |
-
</ul>
|
142 |
-
<p>You need to use the terrain and obstacles to your advantage by placing your units in strategic positions, avoiding or exploiting hazards, and creating or destroying obstacles. You also need to be aware of the special features or events that some maps have, such as volcanoes, tornadoes, meteors, etc. These can change the terrain and obstacles drastically and affect the outcome of the battle.</p>
|
143 |
-
<h3>Tip 3: Experiment with different combinations of creatures and weapons</h3>
|
144 |
-
<p>The last tip for Animal Revolt Battle Simulator is to experiment with different combinations of creatures and weapons. There are many possible combinations that you can try in this game, some of them more effective or hilarious than others. For example:</p>
|
145 |
-
<ul>
|
146 |
-
<li>A T-rex with a rocket launcher on its back</li>
|
147 |
-
<li>A dragon with a chainsaw on its tail</li>
|
148 |
-
<li>A shark with a laser on its head</li>
|
149 |
-
<li>A gorilla with a banana gun in its hand</li>
|
150 |
-
<li>A cow with a jetpack on its udder</li>
|
151 |
-
<li>A tank with a goat on its turret</li>
|
152 |
-
<li>A unicorn with a rainbow cannon on its horn</li>
|
153 |
-
<li>An alien queen with a human baby in its mouth</li>
|
154 |
-
</ul>
|
155 |
-
<p>You can create these combinations by using the custom unit creator or by downloading them from the workshop. You can also find some hidden combinations by using certain codes or cheats. You can experiment with different combinations of creatures and weapons to see how they perform in battle, how they interact with each other, and how funny they look. You might discover some new strategies or secrets that will help you win or make you laugh.</p>
|
156 |
-
<h2>Conclusion and FAQs</h2>
|
157 |
-
<p>Animal Revolt Battle Simulator is a physics-based sandbox game where you can create funny battles between all sorts of ragdoll creatures. You can build your own maps or select from the ready-made ones, place your armies of different types of beasts game: <a href="">https://discord.gg/animalrevoltbattlesimulator</a></li>
|
158 |
-
<li>The official Reddit community of the game: <a href="">https://www.reddit.com/r/AnimalRevoltBattleSim/</a></li>
|
159 |
-
</ul>
|
160 |
-
<p>These sources will provide you with more details, tips, guides, videos, screenshots, reviews, feedback, and discussions about Animal Revolt Battle Simulator. You can also contact the developers or other players if you have any questions, suggestions, or issues about the game.</p>
|
161 |
-
<p>I hope you enjoyed this article and learned something new about Animal Revolt Battle Simulator. If you did, please share it with your friends and let me know what you think in the comments below. And if you haven't already, go ahead and download the game and start creating your own epic battles!</p> 197e85843d<br />
|
162 |
-
<br />
|
163 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Pear Live and Discover the Best Live Streaming Content for Adults.md
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Pear Live: A Fun and Exciting Live Streaming App</h1>
|
3 |
-
<p>Are you looking for a new way to have fun and entertain yourself? Do you want to watch live streams of your favorite topics and interact with other people? Do you want to show your talent and personality to the world? If you answered yes to any of these questions, then you should download Pear Live, a fun and exciting live streaming app that will make your day more enjoyable.</p>
|
4 |
-
<p>Pear Live is a live streaming app that allows you to watch and create live broadcasts of various categories, such as music, dance, comedy, food, and more. You can also chat with attractive and talented hosts who will keep you company anytime, anywhere. You can also challenge your friends and other streamers in real-time with the Live PK feature. And if you want to enhance your appearance, you can use the Magical Beauty feature that will make you look more beautiful and charming in an instant.</p>
|
5 |
-
<h2>download pear live</h2><br /><p><b><b>Download Zip</b> >>>>> <a href="https://jinyurl.com/2uNQYW">https://jinyurl.com/2uNQYW</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will show you the features, benefits, tips, and tricks of Pear Live. We will also show you how to download and use this amazing app. So, if you are ready to join the fun, read on!</p>
|
7 |
-
<h2>Features of Pear Live</h2>
|
8 |
-
<p>Pear Live has many features that make it stand out from other live streaming apps. Here are some of them:</p>
|
9 |
-
<h3>Live PK</h3>
|
10 |
-
<p>Live PK is a feature that allows you to challenge your friends and other streamers in real-time. You can choose a topic or a game and see who can get more votes from the viewers. The winner will get rewards and bragging rights, while the loser will face a punishment. Are you brave enough to try it?</p>
|
11 |
-
<h3>Live Hosts</h3>
|
12 |
-
<p>Live Hosts are the streamers who provide 24/7 entertainment for their fans. They are handsome men and beautiful women who have various talents and skills. You can chat with them anytime, anywhere, and send them gifts to show your appreciation. You can also join their fan clubs and get exclusive benefits.</p>
|
13 |
-
<h3>Magical Beauty</h3>
|
14 |
-
<p>Magical Beauty is a feature that allows you to enhance your appearance with amazing filters and effects. You can choose from different styles and themes that suit your mood and personality. You can also adjust the brightness, contrast, saturation, and other parameters to make yourself look more stunning.</p>
|
15 |
-
<h2>How to Download Pear Live</h2>
|
16 |
-
<p>Downloading Pear Live is very easy and fast. You just need to follow these steps:</p>
|
17 |
-
<h3>Step 1: Go to the official website or app store </h3>
|
18 |
-
<p>You can download Pear Live from its official website or from the app store of your device. The official website is <a href="">https://pearlive.com</a> and the app store links are <a href="">https://play.google.com/store/apps/details?id=com.pear.live</a> for Android and <a href="">https://apps.apple.com/us/app/pear-live/id1535077080</a> for iOS.</p>
|
19 |
-
<p>download pear live apk<br />
|
20 |
-
download pear live app for android<br />
|
21 |
-
download pear live streaming app<br />
|
22 |
-
download pear live mod apk<br />
|
23 |
-
download pear live latest version<br />
|
24 |
-
download pear live for pc<br />
|
25 |
-
download pear live app for ios<br />
|
26 |
-
download pear live apk terbaru 2022<br />
|
27 |
-
download pear live show app<br />
|
28 |
-
download pear live apk no banned<br />
|
29 |
-
download pear live apk free coins<br />
|
30 |
-
download pear live app for windows<br />
|
31 |
-
download pear live apk versi lama<br />
|
32 |
-
download pear live apk unlimited money<br />
|
33 |
-
download pear live app for mac<br />
|
34 |
-
download pear live apk 2022<br />
|
35 |
-
download pear live app for laptop<br />
|
36 |
-
download pear live apk mod unlock all<br />
|
37 |
-
download pear live apk tanpa password<br />
|
38 |
-
download pear live app for iphone<br />
|
39 |
-
download pear live apk full version<br />
|
40 |
-
download pear live app for ipad<br />
|
41 |
-
download pear live apk mod vip<br />
|
42 |
-
download pear live apk no watermark<br />
|
43 |
-
download pear live app for chromebook<br />
|
44 |
-
download pear live apk pro<br />
|
45 |
-
download pear live app online<br />
|
46 |
-
download pear live apk premium<br />
|
47 |
-
download pear live apk no ads<br />
|
48 |
-
download pear live app review<br />
|
49 |
-
download pear live apk cracked<br />
|
50 |
-
download pear live app update<br />
|
51 |
-
download pear live apk hack<br />
|
52 |
-
download pear live app features<br />
|
53 |
-
download pear live apk unlocked<br />
|
54 |
-
download pear live app tutorial<br />
|
55 |
-
download pear live apk cheat<br />
|
56 |
-
download pear live app support<br />
|
57 |
-
download pear live apk original<br />
|
58 |
-
download pear live app tips and tricks</p>
|
59 |
-
<h3>Step 2: Choose your preferred version (APK or iOS)</h3>
|
60 |
-
<p>If you are downloading from the official website, you can choose between the APK version or the iOS version. The APK version is for Android devices and the iOS version is for iPhone and iPad devices. The APK version is 64.4 MB and the iOS version is 138.9 MB.</p>
|
61 |
-
<h3>Step 3: Install the app and register for a free account</h3>
|
62 |
-
<p>After downloading the app, you need to install it on your device. You may need to allow unknown sources or trust the app if prompted. Then, you need to register for a free account using your phone number, email, or social media account. You can also log in with your existing account if you have one.</p>
|
63 |
-
<h2>How to Use Pear Live</h2>
|
64 |
-
<p>Using Pear Live is very simple and fun. You just need to follow these steps:</p>
|
65 |
-
<h3>Step 1: Browse the live streams and find your favorite ones</h3>
|
66 |
-
<p>When you open the app, you will see a list of live streams that are currently on air. You can swipe left or right to see more streams or use the search function to find specific topics or hosts. You can also filter the streams by category, such as music, dance, comedy, food, and more.</p>
|
67 |
-
<h3>Step 2: Interact with the streamers and other viewers by sending gifts, comments, and likes</h3>
|
68 |
-
<p>When you enter a live stream, you can interact with the streamer and other viewers by sending gifts, comments, and likes. Gifts are virtual items that you can buy with coins or diamonds, which are the currencies of the app. You can earn coins by watching ads or completing tasks, or buy them with real money. Diamonds are earned by receiving gifts from your fans or by exchanging coins. Gifts can show your support and appreciation to the streamer and also help them rank higher on the app.</p>
|
69 |
-
<p>Comments are messages that you can send to the streamer or other viewers. You can also use emojis, stickers, or voice messages to express yourself better. Comments can help you communicate and socialize with others on the app.</p>
|
70 |
-
<p>Likes are hearts that you can tap on the screen to show your love and admiration to the streamer. Likes can also help the streamer gain more popularity and exposure on the app.</p>
|
71 |
-
<h3>Step 3: Start your own live stream and show your talent to the world</h3>
|
72 |
-
<p>If you want to start your own live stream, you need to tap on the camera icon on the bottom of the screen. You can then choose a title, a category, a cover photo, and a location for your stream. You can also enable or disable the Live PK and Magical Beauty features if you want. Then, you can start streaming and show your talent and personality to the world.</p>
|
73 |
-
<p>You can also invite your friends or other streamers to join your stream by tapping on the invite button on the top of the screen. You can also share your stream link to your social media platforms by tapping on the share button on the bottom of the screen.</p>
|
74 |
-
<h2>Benefits of Pear Live</h2>
|
75 |
-
<p>Pear Live has many benefits that make it worth downloading and using. Here are some of them:</p>
|
76 |
-
<h3>Entertainment</h3>
|
77 |
-
<p>Pear Live is a great source of entertainment for anyone who loves watching or creating live streams. You can enjoy a variety of content from music, dance, comedy, food, and more. You can also discover new talents and interests that you may not have known before.</p>
|
78 |
-
<h3>Socialization</h3>
|
79 |
-
<p>Pear Live is also a great platform for socialization for anyone who wants to meet new friends and connect with like-minded people. You can chat with attractive and talented hosts who will keep you company anytime, anywhere. You can also interact with other viewers who share your hobbies and passions.</p>
|
80 |
-
<h3>Income</h3>
|
81 |
-
<p>Pear Live is also a great opportunity for income for anyone who wants to earn money from their live streams and gifts from their fans. You can monetize your live streams by receiving gifts from your viewers, which can be exchanged for cash or diamonds. You can also join events and competitions that offer cash prizes and rewards.</p> <h2>Tips and Tricks for Pear Live</h2>
|
82 |
-
<p>Pear Live is a fun and exciting live streaming app, but it also has some rules and guidelines that you need to follow to have a smooth and safe experience. Here are some tips and tricks that you can use to make the most out of Pear Live:</p>
|
83 |
-
<h3>Tip 1: Follow the rules and guidelines of the app to avoid being banned or reported</h3>
|
84 |
-
<p>Pear Live is a community-based app that respects the rights and dignity of its users. Therefore, you need to follow the rules and guidelines of the app to avoid being banned or reported by other users or the app administrators. Some of the rules and guidelines are:</p>
|
85 |
-
<ul>
|
86 |
-
<li>Do not post or stream any illegal, harmful, abusive, hateful, violent, pornographic, or inappropriate content.</li>
|
87 |
-
<li>Do not harass, bully, threaten, or impersonate other users or streamers.</li>
|
88 |
-
<li>Do not spam, scam, or solicit other users or streamers for money, personal information, or other favors.</li>
|
89 |
-
<li>Do not use any third-party software, bots, hacks, or cheats to manipulate the app or gain unfair advantages.</li>
|
90 |
-
<li>Do not violate any intellectual property rights, such as copyrights, trademarks, or patents.</li>
|
91 |
-
</ul>
|
92 |
-
<p>If you violate any of these rules and guidelines, you may face consequences such as warnings, suspensions, bans, or legal actions. So, be respectful and responsible when using Pear Live.</p>
|
93 |
-
<h3>Tip 2: Be yourself and have fun while streaming, don't be shy or nervous</h3>
|
94 |
-
<p>Pear Live is a live streaming app that allows you to show your talent and personality to the world. Therefore, you should be yourself and have fun while streaming, don't be shy or nervous. Here are some ways to do that:</p>
|
95 |
-
<ul>
|
96 |
-
<li>Choose a topic or a category that you are passionate about and enjoy doing.</li>
|
97 |
-
<li>Prepare some content or activities that you can do during your stream, such as singing, dancing, cooking, playing games, etc.</li>
|
98 |
-
<li>Practice your stream before going live, such as checking your camera angle, lighting, sound quality, internet connection, etc.</li>
|
99 |
-
<li>Relax and smile while streaming, don't worry too much about making mistakes or being perfect.</li>
|
100 |
-
<li>Be confident and positive while streaming, don't let negative comments or feedback affect you.</li>
|
101 |
-
</ul>
|
102 |
-
<p>If you follow these tips, you will have a more enjoyable and successful streaming experience on Pear Live.</p>
|
103 |
-
<h3>Tip 3: Engage with your audience and respond to their feedback, don't ignore them</h3>
|
104 |
-
<p>Pear Live is a live streaming app that allows you to interact with your audience and receive their feedback. Therefore, you should engage with your audience and respond to their feedback, don't ignore them. Here are some ways to do that:</p>
|
105 |
-
<ul>
|
106 |
-
<li>Greet your viewers when they enter your stream and thank them when they leave.</li>
|
107 |
-
<li>Ask your viewers questions and opinions about your stream or other topics.</li>
|
108 |
-
<li>Answer your viewers' questions and comments politely and honestly.</li>
|
109 |
-
<li>Acknowledge your viewers' gifts and likes by saying their names and expressing your gratitude.</li>
|
110 |
-
<li>Invite your viewers to join your fan club or follow your social media accounts.</li>
|
111 |
-
</ul>
|
112 |
-
<p>If you follow these tips, you will have a more loyal and engaged audience on Pear Live.</p>
|
113 |
-
<h2>Conclusion</h2>
|
114 |
-
<p>Pear Live is a fun and exciting live streaming app that allows you to watch and create live broadcasts of various categories. You can also chat with attractive and talented hosts who will keep you company anytime, anywhere. You can also challenge your friends and other streamers in real-time with the Live PK feature. And if you want to enhance your appearance, you can use the Magical Beauty feature that will make you look more beautiful and charming in an instant.</p>
|
115 |
-
<p>Pear Live has many features, benefits, tips, and tricks that make it worth downloading and using. You can download it from its official website or from the app store of your device. You can also use it easily and safely by following the steps and guidelines in this article.</p>
|
116 |
-
<p>So what are you waiting for? Download Pear Live today and join the fun!</p>
|
117 |
-
<h2>Frequently Asked Questions</h2>
|
118 |
-
<h4>Q: Is Pear Live free to use?</h4>
|
119 |
-
<p>A: Yes, Pear Live is free to download and use. You can watch unlimited live streams without paying anything. However, if you want to send gifts to your favorite streamers or buy coins or diamonds for yourself, you need to spend real money.</p>
|
120 |
-
<h4>Q: How can I become a host on Pear Live?</h4>
|
121 |
-
<p>A: If you want to become a host on Pear Live, you need to apply for the host certification on the app. You need to fill out some information, such as your name, age, gender, location, and category. You also need to upload some photos and videos of yourself. Then, you need to wait for the app administrators to review and approve your application. Once you are approved, you can start streaming as a host and earn money from your fans.</p>
|
122 |
-
<h4>Q: How can I join the Live PK feature on Pear Live?</h4>
|
123 |
-
<p>A: If you want to join the Live PK feature on Pear Live, you need to have a certain level of popularity and influence on the app. You can increase your level by streaming more often, receiving more gifts and likes, and gaining more fans and followers. Once you reach a certain level, you can challenge or accept challenges from other streamers who have the same or higher level than you. You can also invite your friends or other streamers to join your PK team.</p>
|
124 |
-
<h4>Q: How can I use the Magical Beauty feature on Pear Live?</h4>
|
125 |
-
<p>A: If you want to use the Magical Beauty feature on Pear Live, you need to enable it before or during your stream. You can find it on the bottom right corner of the screen. You can then choose from different filters and effects that suit your style and mood. You can also adjust the intensity and parameters of each filter and effect to make yourself look more stunning.</p>
|
126 |
-
<h4>Q: How can I contact the customer service of Pear Live?</h4>
|
127 |
-
<p>A: If you have any questions, problems, or feedback about Pear Live, you can contact the customer service of the app by tapping on the settings icon on the top left corner of the screen. You can then choose the feedback option and write your message. You can also attach screenshots or videos if needed. The customer service will reply to you as soon as possible.</p>
|
128 |
-
<h4>Q: How can I delete my account on Pear Live?</h4>
|
129 |
-
<p>A: If you want to delete your account on Pear Live, you need to contact the customer service of the app and request for account deletion. You need to provide your account information, such as your phone number, email, or social media account. You also need to explain why you want to delete your account. The customer service will verify your identity and process your request. Once your account is deleted, you will lose all your data, such as your coins, diamonds, gifts, fans, followers, streams, etc.</p> 197e85843d<br />
|
130 |
-
<br />
|
131 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/stylegan/model.py
DELETED
@@ -1,719 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import random
|
3 |
-
import functools
|
4 |
-
import operator
|
5 |
-
|
6 |
-
import torch
|
7 |
-
from torch import nn
|
8 |
-
from torch.nn import functional as F
|
9 |
-
from torch.autograd import Function
|
10 |
-
|
11 |
-
from model.stylegan.op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d, conv2d_gradfix
|
12 |
-
|
13 |
-
class PixelNorm(nn.Module):
|
14 |
-
def __init__(self):
|
15 |
-
super().__init__()
|
16 |
-
|
17 |
-
def forward(self, input):
|
18 |
-
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
|
19 |
-
|
20 |
-
|
21 |
-
def make_kernel(k):
|
22 |
-
k = torch.tensor(k, dtype=torch.float32)
|
23 |
-
|
24 |
-
if k.ndim == 1:
|
25 |
-
k = k[None, :] * k[:, None]
|
26 |
-
|
27 |
-
k /= k.sum()
|
28 |
-
|
29 |
-
return k
|
30 |
-
|
31 |
-
|
32 |
-
class Upsample(nn.Module):
|
33 |
-
def __init__(self, kernel, factor=2):
|
34 |
-
super().__init__()
|
35 |
-
|
36 |
-
self.factor = factor
|
37 |
-
kernel = make_kernel(kernel) * (factor ** 2)
|
38 |
-
self.register_buffer("kernel", kernel)
|
39 |
-
|
40 |
-
p = kernel.shape[0] - factor
|
41 |
-
|
42 |
-
pad0 = (p + 1) // 2 + factor - 1
|
43 |
-
pad1 = p // 2
|
44 |
-
|
45 |
-
self.pad = (pad0, pad1)
|
46 |
-
|
47 |
-
def forward(self, input):
|
48 |
-
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
|
49 |
-
|
50 |
-
return out
|
51 |
-
|
52 |
-
|
53 |
-
class Downsample(nn.Module):
|
54 |
-
def __init__(self, kernel, factor=2):
|
55 |
-
super().__init__()
|
56 |
-
|
57 |
-
self.factor = factor
|
58 |
-
kernel = make_kernel(kernel)
|
59 |
-
self.register_buffer("kernel", kernel)
|
60 |
-
|
61 |
-
p = kernel.shape[0] - factor
|
62 |
-
|
63 |
-
pad0 = (p + 1) // 2
|
64 |
-
pad1 = p // 2
|
65 |
-
|
66 |
-
self.pad = (pad0, pad1)
|
67 |
-
|
68 |
-
def forward(self, input):
|
69 |
-
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
|
70 |
-
|
71 |
-
return out
|
72 |
-
|
73 |
-
|
74 |
-
class Blur(nn.Module):
|
75 |
-
def __init__(self, kernel, pad, upsample_factor=1):
|
76 |
-
super().__init__()
|
77 |
-
|
78 |
-
kernel = make_kernel(kernel)
|
79 |
-
|
80 |
-
if upsample_factor > 1:
|
81 |
-
kernel = kernel * (upsample_factor ** 2)
|
82 |
-
|
83 |
-
self.register_buffer("kernel", kernel)
|
84 |
-
|
85 |
-
self.pad = pad
|
86 |
-
|
87 |
-
def forward(self, input):
|
88 |
-
out = upfirdn2d(input, self.kernel, pad=self.pad)
|
89 |
-
|
90 |
-
return out
|
91 |
-
|
92 |
-
|
93 |
-
class EqualConv2d(nn.Module):
|
94 |
-
def __init__(
|
95 |
-
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, dilation=1 ## modified
|
96 |
-
):
|
97 |
-
super().__init__()
|
98 |
-
|
99 |
-
self.weight = nn.Parameter(
|
100 |
-
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
|
101 |
-
)
|
102 |
-
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
|
103 |
-
|
104 |
-
self.stride = stride
|
105 |
-
self.padding = padding
|
106 |
-
self.dilation = dilation ## modified
|
107 |
-
|
108 |
-
if bias:
|
109 |
-
self.bias = nn.Parameter(torch.zeros(out_channel))
|
110 |
-
|
111 |
-
else:
|
112 |
-
self.bias = None
|
113 |
-
|
114 |
-
def forward(self, input):
|
115 |
-
out = conv2d_gradfix.conv2d(
|
116 |
-
input,
|
117 |
-
self.weight * self.scale,
|
118 |
-
bias=self.bias,
|
119 |
-
stride=self.stride,
|
120 |
-
padding=self.padding,
|
121 |
-
dilation=self.dilation, ## modified
|
122 |
-
)
|
123 |
-
|
124 |
-
return out
|
125 |
-
|
126 |
-
def __repr__(self):
|
127 |
-
return (
|
128 |
-
f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
|
129 |
-
f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding}, dilation={self.dilation})" ## modified
|
130 |
-
)
|
131 |
-
|
132 |
-
|
133 |
-
class EqualLinear(nn.Module):
|
134 |
-
def __init__(
|
135 |
-
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
|
136 |
-
):
|
137 |
-
super().__init__()
|
138 |
-
|
139 |
-
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
|
140 |
-
|
141 |
-
if bias:
|
142 |
-
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
|
143 |
-
|
144 |
-
else:
|
145 |
-
self.bias = None
|
146 |
-
|
147 |
-
self.activation = activation
|
148 |
-
|
149 |
-
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
|
150 |
-
self.lr_mul = lr_mul
|
151 |
-
|
152 |
-
def forward(self, input):
|
153 |
-
if self.activation:
|
154 |
-
out = F.linear(input, self.weight * self.scale)
|
155 |
-
out = fused_leaky_relu(out, self.bias * self.lr_mul)
|
156 |
-
|
157 |
-
else:
|
158 |
-
out = F.linear(
|
159 |
-
input, self.weight * self.scale, bias=self.bias * self.lr_mul
|
160 |
-
)
|
161 |
-
|
162 |
-
return out
|
163 |
-
|
164 |
-
def __repr__(self):
|
165 |
-
return (
|
166 |
-
f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
|
167 |
-
)
|
168 |
-
|
169 |
-
|
170 |
-
class ModulatedConv2d(nn.Module):
|
171 |
-
def __init__(
|
172 |
-
self,
|
173 |
-
in_channel,
|
174 |
-
out_channel,
|
175 |
-
kernel_size,
|
176 |
-
style_dim,
|
177 |
-
demodulate=True,
|
178 |
-
upsample=False,
|
179 |
-
downsample=False,
|
180 |
-
blur_kernel=[1, 3, 3, 1],
|
181 |
-
fused=True,
|
182 |
-
):
|
183 |
-
super().__init__()
|
184 |
-
|
185 |
-
self.eps = 1e-8
|
186 |
-
self.kernel_size = kernel_size
|
187 |
-
self.in_channel = in_channel
|
188 |
-
self.out_channel = out_channel
|
189 |
-
self.upsample = upsample
|
190 |
-
self.downsample = downsample
|
191 |
-
|
192 |
-
if upsample:
|
193 |
-
factor = 2
|
194 |
-
p = (len(blur_kernel) - factor) - (kernel_size - 1)
|
195 |
-
pad0 = (p + 1) // 2 + factor - 1
|
196 |
-
pad1 = p // 2 + 1
|
197 |
-
|
198 |
-
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
|
199 |
-
|
200 |
-
if downsample:
|
201 |
-
factor = 2
|
202 |
-
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
203 |
-
pad0 = (p + 1) // 2
|
204 |
-
pad1 = p // 2
|
205 |
-
|
206 |
-
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
|
207 |
-
|
208 |
-
fan_in = in_channel * kernel_size ** 2
|
209 |
-
self.scale = 1 / math.sqrt(fan_in)
|
210 |
-
self.padding = kernel_size // 2
|
211 |
-
|
212 |
-
self.weight = nn.Parameter(
|
213 |
-
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
|
214 |
-
)
|
215 |
-
|
216 |
-
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
|
217 |
-
|
218 |
-
self.demodulate = demodulate
|
219 |
-
self.fused = fused
|
220 |
-
|
221 |
-
def __repr__(self):
|
222 |
-
return (
|
223 |
-
f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, "
|
224 |
-
f"upsample={self.upsample}, downsample={self.downsample})"
|
225 |
-
)
|
226 |
-
|
227 |
-
def forward(self, input, style, externalweight=None):
|
228 |
-
batch, in_channel, height, width = input.shape
|
229 |
-
|
230 |
-
if not self.fused:
|
231 |
-
weight = self.scale * self.weight.squeeze(0)
|
232 |
-
style = self.modulation(style)
|
233 |
-
|
234 |
-
if self.demodulate:
|
235 |
-
w = weight.unsqueeze(0) * style.view(batch, 1, in_channel, 1, 1)
|
236 |
-
dcoefs = (w.square().sum((2, 3, 4)) + 1e-8).rsqrt()
|
237 |
-
|
238 |
-
input = input * style.reshape(batch, in_channel, 1, 1)
|
239 |
-
|
240 |
-
if self.upsample:
|
241 |
-
weight = weight.transpose(0, 1)
|
242 |
-
out = conv2d_gradfix.conv_transpose2d(
|
243 |
-
input, weight, padding=0, stride=2
|
244 |
-
)
|
245 |
-
out = self.blur(out)
|
246 |
-
|
247 |
-
elif self.downsample:
|
248 |
-
input = self.blur(input)
|
249 |
-
out = conv2d_gradfix.conv2d(input, weight, padding=0, stride=2)
|
250 |
-
|
251 |
-
else:
|
252 |
-
out = conv2d_gradfix.conv2d(input, weight, padding=self.padding)
|
253 |
-
|
254 |
-
if self.demodulate:
|
255 |
-
out = out * dcoefs.view(batch, -1, 1, 1)
|
256 |
-
|
257 |
-
return out
|
258 |
-
|
259 |
-
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
|
260 |
-
if externalweight is None:
|
261 |
-
weight = self.scale * self.weight * style
|
262 |
-
else:
|
263 |
-
weight = self.scale * (self.weight + externalweight) * style
|
264 |
-
|
265 |
-
if self.demodulate:
|
266 |
-
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
|
267 |
-
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
|
268 |
-
|
269 |
-
weight = weight.view(
|
270 |
-
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
271 |
-
)
|
272 |
-
|
273 |
-
if self.upsample:
|
274 |
-
input = input.view(1, batch * in_channel, height, width)
|
275 |
-
weight = weight.view(
|
276 |
-
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
277 |
-
)
|
278 |
-
weight = weight.transpose(1, 2).reshape(
|
279 |
-
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
|
280 |
-
)
|
281 |
-
out = conv2d_gradfix.conv_transpose2d(
|
282 |
-
input, weight, padding=0, stride=2, groups=batch
|
283 |
-
)
|
284 |
-
_, _, height, width = out.shape
|
285 |
-
out = out.view(batch, self.out_channel, height, width)
|
286 |
-
out = self.blur(out)
|
287 |
-
|
288 |
-
elif self.downsample:
|
289 |
-
input = self.blur(input)
|
290 |
-
_, _, height, width = input.shape
|
291 |
-
input = input.view(1, batch * in_channel, height, width)
|
292 |
-
out = conv2d_gradfix.conv2d(
|
293 |
-
input, weight, padding=0, stride=2, groups=batch
|
294 |
-
)
|
295 |
-
_, _, height, width = out.shape
|
296 |
-
out = out.view(batch, self.out_channel, height, width)
|
297 |
-
|
298 |
-
else:
|
299 |
-
input = input.view(1, batch * in_channel, height, width)
|
300 |
-
out = conv2d_gradfix.conv2d(
|
301 |
-
input, weight, padding=self.padding, groups=batch
|
302 |
-
)
|
303 |
-
_, _, height, width = out.shape
|
304 |
-
out = out.view(batch, self.out_channel, height, width)
|
305 |
-
|
306 |
-
return out
|
307 |
-
|
308 |
-
|
309 |
-
class NoiseInjection(nn.Module):
|
310 |
-
def __init__(self):
|
311 |
-
super().__init__()
|
312 |
-
|
313 |
-
self.weight = nn.Parameter(torch.zeros(1))
|
314 |
-
|
315 |
-
def forward(self, image, noise=None):
|
316 |
-
if noise is None:
|
317 |
-
batch, _, height, width = image.shape
|
318 |
-
noise = image.new_empty(batch, 1, height, width).normal_()
|
319 |
-
|
320 |
-
return image + self.weight * noise
|
321 |
-
|
322 |
-
|
323 |
-
class ConstantInput(nn.Module):
|
324 |
-
def __init__(self, channel, size=4):
|
325 |
-
super().__init__()
|
326 |
-
|
327 |
-
self.input = nn.Parameter(torch.randn(1, channel, size, size))
|
328 |
-
|
329 |
-
def forward(self, input):
|
330 |
-
batch = input.shape[0]
|
331 |
-
out = self.input.repeat(batch, 1, 1, 1)
|
332 |
-
|
333 |
-
return out
|
334 |
-
|
335 |
-
|
336 |
-
class StyledConv(nn.Module):
|
337 |
-
def __init__(
|
338 |
-
self,
|
339 |
-
in_channel,
|
340 |
-
out_channel,
|
341 |
-
kernel_size,
|
342 |
-
style_dim,
|
343 |
-
upsample=False,
|
344 |
-
blur_kernel=[1, 3, 3, 1],
|
345 |
-
demodulate=True,
|
346 |
-
):
|
347 |
-
super().__init__()
|
348 |
-
|
349 |
-
self.conv = ModulatedConv2d(
|
350 |
-
in_channel,
|
351 |
-
out_channel,
|
352 |
-
kernel_size,
|
353 |
-
style_dim,
|
354 |
-
upsample=upsample,
|
355 |
-
blur_kernel=blur_kernel,
|
356 |
-
demodulate=demodulate,
|
357 |
-
)
|
358 |
-
|
359 |
-
self.noise = NoiseInjection()
|
360 |
-
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
|
361 |
-
# self.activate = ScaledLeakyReLU(0.2)
|
362 |
-
self.activate = FusedLeakyReLU(out_channel)
|
363 |
-
|
364 |
-
def forward(self, input, style, noise=None, externalweight=None):
|
365 |
-
out = self.conv(input, style, externalweight)
|
366 |
-
out = self.noise(out, noise=noise)
|
367 |
-
# out = out + self.bias
|
368 |
-
out = self.activate(out)
|
369 |
-
|
370 |
-
return out
|
371 |
-
|
372 |
-
|
373 |
-
class ToRGB(nn.Module):
|
374 |
-
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
|
375 |
-
super().__init__()
|
376 |
-
|
377 |
-
if upsample:
|
378 |
-
self.upsample = Upsample(blur_kernel)
|
379 |
-
|
380 |
-
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
|
381 |
-
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
|
382 |
-
|
383 |
-
def forward(self, input, style, skip=None, externalweight=None):
|
384 |
-
out = self.conv(input, style, externalweight)
|
385 |
-
out = out + self.bias
|
386 |
-
|
387 |
-
if skip is not None:
|
388 |
-
skip = self.upsample(skip)
|
389 |
-
|
390 |
-
out = out + skip
|
391 |
-
|
392 |
-
return out
|
393 |
-
|
394 |
-
|
395 |
-
class Generator(nn.Module):
|
396 |
-
def __init__(
|
397 |
-
self,
|
398 |
-
size,
|
399 |
-
style_dim,
|
400 |
-
n_mlp,
|
401 |
-
channel_multiplier=2,
|
402 |
-
blur_kernel=[1, 3, 3, 1],
|
403 |
-
lr_mlp=0.01,
|
404 |
-
):
|
405 |
-
super().__init__()
|
406 |
-
|
407 |
-
self.size = size
|
408 |
-
|
409 |
-
self.style_dim = style_dim
|
410 |
-
|
411 |
-
layers = [PixelNorm()]
|
412 |
-
|
413 |
-
for i in range(n_mlp):
|
414 |
-
layers.append(
|
415 |
-
EqualLinear(
|
416 |
-
style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu"
|
417 |
-
)
|
418 |
-
)
|
419 |
-
|
420 |
-
self.style = nn.Sequential(*layers)
|
421 |
-
|
422 |
-
self.channels = {
|
423 |
-
4: 512,
|
424 |
-
8: 512,
|
425 |
-
16: 512,
|
426 |
-
32: 512,
|
427 |
-
64: 256 * channel_multiplier,
|
428 |
-
128: 128 * channel_multiplier,
|
429 |
-
256: 64 * channel_multiplier,
|
430 |
-
512: 32 * channel_multiplier,
|
431 |
-
1024: 16 * channel_multiplier,
|
432 |
-
}
|
433 |
-
|
434 |
-
self.input = ConstantInput(self.channels[4])
|
435 |
-
self.conv1 = StyledConv(
|
436 |
-
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
|
437 |
-
)
|
438 |
-
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
|
439 |
-
|
440 |
-
self.log_size = int(math.log(size, 2))
|
441 |
-
self.num_layers = (self.log_size - 2) * 2 + 1
|
442 |
-
|
443 |
-
self.convs = nn.ModuleList()
|
444 |
-
self.upsamples = nn.ModuleList()
|
445 |
-
self.to_rgbs = nn.ModuleList()
|
446 |
-
self.noises = nn.Module()
|
447 |
-
|
448 |
-
in_channel = self.channels[4]
|
449 |
-
|
450 |
-
for layer_idx in range(self.num_layers):
|
451 |
-
res = (layer_idx + 5) // 2
|
452 |
-
shape = [1, 1, 2 ** res, 2 ** res]
|
453 |
-
self.noises.register_buffer(f"noise_{layer_idx}", torch.randn(*shape))
|
454 |
-
|
455 |
-
for i in range(3, self.log_size + 1):
|
456 |
-
out_channel = self.channels[2 ** i]
|
457 |
-
|
458 |
-
self.convs.append(
|
459 |
-
StyledConv(
|
460 |
-
in_channel,
|
461 |
-
out_channel,
|
462 |
-
3,
|
463 |
-
style_dim,
|
464 |
-
upsample=True,
|
465 |
-
blur_kernel=blur_kernel,
|
466 |
-
)
|
467 |
-
)
|
468 |
-
|
469 |
-
self.convs.append(
|
470 |
-
StyledConv(
|
471 |
-
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
|
472 |
-
)
|
473 |
-
)
|
474 |
-
|
475 |
-
self.to_rgbs.append(ToRGB(out_channel, style_dim))
|
476 |
-
|
477 |
-
in_channel = out_channel
|
478 |
-
|
479 |
-
self.n_latent = self.log_size * 2 - 2
|
480 |
-
|
481 |
-
def make_noise(self):
|
482 |
-
device = self.input.input.device
|
483 |
-
|
484 |
-
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
|
485 |
-
|
486 |
-
for i in range(3, self.log_size + 1):
|
487 |
-
for _ in range(2):
|
488 |
-
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
|
489 |
-
|
490 |
-
return noises
|
491 |
-
|
492 |
-
def mean_latent(self, n_latent):
|
493 |
-
latent_in = torch.randn(
|
494 |
-
n_latent, self.style_dim, device=self.input.input.device
|
495 |
-
)
|
496 |
-
latent = self.style(latent_in).mean(0, keepdim=True)
|
497 |
-
|
498 |
-
return latent
|
499 |
-
|
500 |
-
def get_latent(self, input):
|
501 |
-
return self.style(input)
|
502 |
-
|
503 |
-
def forward(
|
504 |
-
self,
|
505 |
-
styles,
|
506 |
-
return_latents=False,
|
507 |
-
inject_index=None,
|
508 |
-
truncation=1,
|
509 |
-
truncation_latent=None,
|
510 |
-
input_is_latent=False,
|
511 |
-
noise=None,
|
512 |
-
randomize_noise=True,
|
513 |
-
z_plus_latent=False,
|
514 |
-
return_feature_ind=999,
|
515 |
-
):
|
516 |
-
if not input_is_latent:
|
517 |
-
if not z_plus_latent:
|
518 |
-
styles = [self.style(s) for s in styles]
|
519 |
-
else:
|
520 |
-
styles_ = []
|
521 |
-
for s in styles:
|
522 |
-
style_ = []
|
523 |
-
for i in range(s.shape[1]):
|
524 |
-
style_.append(self.style(s[:,i]).unsqueeze(1))
|
525 |
-
styles_.append(torch.cat(style_,dim=1))
|
526 |
-
styles = styles_
|
527 |
-
|
528 |
-
if noise is None:
|
529 |
-
if randomize_noise:
|
530 |
-
noise = [None] * self.num_layers
|
531 |
-
else:
|
532 |
-
noise = [
|
533 |
-
getattr(self.noises, f"noise_{i}") for i in range(self.num_layers)
|
534 |
-
]
|
535 |
-
|
536 |
-
if truncation < 1:
|
537 |
-
style_t = []
|
538 |
-
|
539 |
-
for style in styles:
|
540 |
-
style_t.append(
|
541 |
-
truncation_latent + truncation * (style - truncation_latent)
|
542 |
-
)
|
543 |
-
|
544 |
-
styles = style_t
|
545 |
-
|
546 |
-
if len(styles) < 2:
|
547 |
-
inject_index = self.n_latent
|
548 |
-
|
549 |
-
if styles[0].ndim < 3:
|
550 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
551 |
-
|
552 |
-
else:
|
553 |
-
latent = styles[0]
|
554 |
-
|
555 |
-
else:
|
556 |
-
if inject_index is None:
|
557 |
-
inject_index = random.randint(1, self.n_latent - 1)
|
558 |
-
|
559 |
-
if styles[0].ndim < 3:
|
560 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
561 |
-
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
|
562 |
-
|
563 |
-
latent = torch.cat([latent, latent2], 1)
|
564 |
-
else:
|
565 |
-
latent = torch.cat([styles[0][:,0:inject_index], styles[1][:,inject_index:]], 1)
|
566 |
-
|
567 |
-
out = self.input(latent)
|
568 |
-
out = self.conv1(out, latent[:, 0], noise=noise[0])
|
569 |
-
|
570 |
-
skip = self.to_rgb1(out, latent[:, 1])
|
571 |
-
|
572 |
-
i = 1
|
573 |
-
for conv1, conv2, noise1, noise2, to_rgb in zip(
|
574 |
-
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
|
575 |
-
):
|
576 |
-
out = conv1(out, latent[:, i], noise=noise1)
|
577 |
-
out = conv2(out, latent[:, i + 1], noise=noise2)
|
578 |
-
skip = to_rgb(out, latent[:, i + 2], skip)
|
579 |
-
|
580 |
-
i += 2
|
581 |
-
if i > return_feature_ind:
|
582 |
-
return out, skip
|
583 |
-
|
584 |
-
image = skip
|
585 |
-
|
586 |
-
if return_latents:
|
587 |
-
return image, latent
|
588 |
-
|
589 |
-
else:
|
590 |
-
return image, None
|
591 |
-
|
592 |
-
|
593 |
-
class ConvLayer(nn.Sequential):
|
594 |
-
def __init__(
|
595 |
-
self,
|
596 |
-
in_channel,
|
597 |
-
out_channel,
|
598 |
-
kernel_size,
|
599 |
-
downsample=False,
|
600 |
-
blur_kernel=[1, 3, 3, 1],
|
601 |
-
bias=True,
|
602 |
-
activate=True,
|
603 |
-
dilation=1, ## modified
|
604 |
-
):
|
605 |
-
layers = []
|
606 |
-
|
607 |
-
if downsample:
|
608 |
-
factor = 2
|
609 |
-
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
610 |
-
pad0 = (p + 1) // 2
|
611 |
-
pad1 = p // 2
|
612 |
-
|
613 |
-
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
|
614 |
-
|
615 |
-
stride = 2
|
616 |
-
self.padding = 0
|
617 |
-
|
618 |
-
else:
|
619 |
-
stride = 1
|
620 |
-
self.padding = kernel_size // 2 + dilation-1 ## modified
|
621 |
-
|
622 |
-
layers.append(
|
623 |
-
EqualConv2d(
|
624 |
-
in_channel,
|
625 |
-
out_channel,
|
626 |
-
kernel_size,
|
627 |
-
padding=self.padding,
|
628 |
-
stride=stride,
|
629 |
-
bias=bias and not activate,
|
630 |
-
dilation=dilation, ## modified
|
631 |
-
)
|
632 |
-
)
|
633 |
-
|
634 |
-
if activate:
|
635 |
-
layers.append(FusedLeakyReLU(out_channel, bias=bias))
|
636 |
-
|
637 |
-
super().__init__(*layers)
|
638 |
-
|
639 |
-
|
640 |
-
class ResBlock(nn.Module):
|
641 |
-
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
|
642 |
-
super().__init__()
|
643 |
-
|
644 |
-
self.conv1 = ConvLayer(in_channel, in_channel, 3)
|
645 |
-
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
|
646 |
-
|
647 |
-
self.skip = ConvLayer(
|
648 |
-
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
|
649 |
-
)
|
650 |
-
|
651 |
-
def forward(self, input):
|
652 |
-
out = self.conv1(input)
|
653 |
-
out = self.conv2(out)
|
654 |
-
|
655 |
-
skip = self.skip(input)
|
656 |
-
out = (out + skip) / math.sqrt(2)
|
657 |
-
|
658 |
-
return out
|
659 |
-
|
660 |
-
|
661 |
-
class Discriminator(nn.Module):
|
662 |
-
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
|
663 |
-
super().__init__()
|
664 |
-
|
665 |
-
channels = {
|
666 |
-
4: 512,
|
667 |
-
8: 512,
|
668 |
-
16: 512,
|
669 |
-
32: 512,
|
670 |
-
64: 256 * channel_multiplier,
|
671 |
-
128: 128 * channel_multiplier,
|
672 |
-
256: 64 * channel_multiplier,
|
673 |
-
512: 32 * channel_multiplier,
|
674 |
-
1024: 16 * channel_multiplier,
|
675 |
-
}
|
676 |
-
|
677 |
-
convs = [ConvLayer(3, channels[size], 1)]
|
678 |
-
|
679 |
-
log_size = int(math.log(size, 2))
|
680 |
-
|
681 |
-
in_channel = channels[size]
|
682 |
-
|
683 |
-
for i in range(log_size, 2, -1):
|
684 |
-
out_channel = channels[2 ** (i - 1)]
|
685 |
-
|
686 |
-
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
|
687 |
-
|
688 |
-
in_channel = out_channel
|
689 |
-
|
690 |
-
self.convs = nn.Sequential(*convs)
|
691 |
-
|
692 |
-
self.stddev_group = 4
|
693 |
-
self.stddev_feat = 1
|
694 |
-
|
695 |
-
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
|
696 |
-
self.final_linear = nn.Sequential(
|
697 |
-
EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"),
|
698 |
-
EqualLinear(channels[4], 1),
|
699 |
-
)
|
700 |
-
|
701 |
-
def forward(self, input):
|
702 |
-
out = self.convs(input)
|
703 |
-
|
704 |
-
batch, channel, height, width = out.shape
|
705 |
-
group = min(batch, self.stddev_group)
|
706 |
-
stddev = out.view(
|
707 |
-
group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
|
708 |
-
)
|
709 |
-
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
|
710 |
-
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
|
711 |
-
stddev = stddev.repeat(group, 1, height, width)
|
712 |
-
out = torch.cat([out, stddev], 1)
|
713 |
-
|
714 |
-
out = self.final_conv(out)
|
715 |
-
|
716 |
-
out = out.view(batch, -1)
|
717 |
-
out = self.final_linear(out)
|
718 |
-
|
719 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_mbf.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
from easydict import EasyDict as edict
|
2 |
-
|
3 |
-
# make training faster
|
4 |
-
# our RAM is 256G
|
5 |
-
# mount -t tmpfs -o size=140G tmpfs /train_tmp
|
6 |
-
|
7 |
-
config = edict()
|
8 |
-
config.loss = "cosface"
|
9 |
-
config.network = "mbf"
|
10 |
-
config.resume = False
|
11 |
-
config.output = None
|
12 |
-
config.embedding_size = 512
|
13 |
-
config.sample_rate = 0.1
|
14 |
-
config.fp16 = True
|
15 |
-
config.momentum = 0.9
|
16 |
-
config.weight_decay = 2e-4
|
17 |
-
config.batch_size = 128
|
18 |
-
config.lr = 0.1 # batch size is 512
|
19 |
-
|
20 |
-
config.rec = "/train_tmp/glint360k"
|
21 |
-
config.num_classes = 360232
|
22 |
-
config.num_image = 17091657
|
23 |
-
config.num_epoch = 20
|
24 |
-
config.warmup_epoch = -1
|
25 |
-
config.decay_epoch = [8, 12, 15, 18]
|
26 |
-
config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/models/__init__.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
"""
|
7 |
-
Models for EnCodec, AudioGen, MusicGen, as well as the generic LMModel.
|
8 |
-
"""
|
9 |
-
# flake8: noqa
|
10 |
-
from . import builders, loaders
|
11 |
-
from .encodec import (
|
12 |
-
CompressionModel, EncodecModel, DAC,
|
13 |
-
HFEncodecModel, HFEncodecCompressionModel)
|
14 |
-
from .audiogen import AudioGen
|
15 |
-
from .lm import LMModel
|
16 |
-
from .multibanddiffusion import MultiBandDiffusion
|
17 |
-
from .musicgen import MusicGen
|
18 |
-
from .unet import DiffusionUnet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/dataset.py
DELETED
@@ -1,147 +0,0 @@
|
|
1 |
-
import collections
|
2 |
-
import csv
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import random
|
6 |
-
from glob import glob
|
7 |
-
from pathlib import Path
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
import torch
|
11 |
-
import torchvision
|
12 |
-
|
13 |
-
logger = logging.getLogger(f'main.{__name__}')
|
14 |
-
|
15 |
-
|
16 |
-
class VGGSound(torch.utils.data.Dataset):
|
17 |
-
|
18 |
-
def __init__(self, split, specs_dir, transforms=None, splits_path='./data', meta_path='./data/vggsound.csv'):
|
19 |
-
super().__init__()
|
20 |
-
self.split = split
|
21 |
-
self.specs_dir = specs_dir
|
22 |
-
self.transforms = transforms
|
23 |
-
self.splits_path = splits_path
|
24 |
-
self.meta_path = meta_path
|
25 |
-
|
26 |
-
vggsound_meta = list(csv.reader(open(meta_path), quotechar='"'))
|
27 |
-
unique_classes = sorted(list(set(row[2] for row in vggsound_meta)))
|
28 |
-
self.label2target = {label: target for target, label in enumerate(unique_classes)}
|
29 |
-
self.target2label = {target: label for label, target in self.label2target.items()}
|
30 |
-
self.video2target = {row[0]: self.label2target[row[2]] for row in vggsound_meta}
|
31 |
-
|
32 |
-
split_clip_ids_path = os.path.join(splits_path, f'vggsound_{split}.txt')
|
33 |
-
if not os.path.exists(split_clip_ids_path):
|
34 |
-
self.make_split_files()
|
35 |
-
clip_ids_with_timestamp = open(split_clip_ids_path).read().splitlines()
|
36 |
-
clip_paths = [os.path.join(specs_dir, v + '_mel.npy') for v in clip_ids_with_timestamp]
|
37 |
-
self.dataset = clip_paths
|
38 |
-
# self.dataset = clip_paths[:10000] # overfit one batch
|
39 |
-
|
40 |
-
# 'zyTX_1BXKDE_16000_26000'[:11] -> 'zyTX_1BXKDE'
|
41 |
-
vid_classes = [self.video2target[Path(path).stem[:11]] for path in self.dataset]
|
42 |
-
class2count = collections.Counter(vid_classes)
|
43 |
-
self.class_counts = torch.tensor([class2count[cls] for cls in range(len(class2count))])
|
44 |
-
|
45 |
-
# self.sample_weights = [len(self.dataset) / class2count[self.video2target[Path(path).stem[:11]]] for path in self.dataset]
|
46 |
-
|
47 |
-
def __getitem__(self, idx):
|
48 |
-
item = {}
|
49 |
-
|
50 |
-
spec_path = self.dataset[idx]
|
51 |
-
# 'zyTX_1BXKDE_16000_26000' -> 'zyTX_1BXKDE'
|
52 |
-
video_name = Path(spec_path).stem[:11]
|
53 |
-
|
54 |
-
item['input'] = np.load(spec_path)
|
55 |
-
item['input_path'] = spec_path
|
56 |
-
|
57 |
-
# if self.split in ['train', 'valid']:
|
58 |
-
item['target'] = self.video2target[video_name]
|
59 |
-
item['label'] = self.target2label[item['target']]
|
60 |
-
|
61 |
-
if self.transforms is not None:
|
62 |
-
item = self.transforms(item)
|
63 |
-
|
64 |
-
return item
|
65 |
-
|
66 |
-
def __len__(self):
|
67 |
-
return len(self.dataset)
|
68 |
-
|
69 |
-
def make_split_files(self):
|
70 |
-
random.seed(1337)
|
71 |
-
logger.info(f'The split files do not exist @ {self.splits_path}. Calculating the new ones.')
|
72 |
-
# The downloaded videos (some went missing on YouTube and no longer available)
|
73 |
-
available_vid_paths = sorted(glob(os.path.join(self.specs_dir, '*_mel.npy')))
|
74 |
-
logger.info(f'The number of clips available after download: {len(available_vid_paths)}')
|
75 |
-
|
76 |
-
# original (full) train and test sets
|
77 |
-
vggsound_meta = list(csv.reader(open(self.meta_path), quotechar='"'))
|
78 |
-
train_vids = {row[0] for row in vggsound_meta if row[3] == 'train'}
|
79 |
-
test_vids = {row[0] for row in vggsound_meta if row[3] == 'test'}
|
80 |
-
logger.info(f'The number of videos in vggsound train set: {len(train_vids)}')
|
81 |
-
logger.info(f'The number of videos in vggsound test set: {len(test_vids)}')
|
82 |
-
|
83 |
-
# class counts in test set. We would like to have the same distribution in valid
|
84 |
-
unique_classes = sorted(list(set(row[2] for row in vggsound_meta)))
|
85 |
-
label2target = {label: target for target, label in enumerate(unique_classes)}
|
86 |
-
video2target = {row[0]: label2target[row[2]] for row in vggsound_meta}
|
87 |
-
test_vid_classes = [video2target[vid] for vid in test_vids]
|
88 |
-
test_target2count = collections.Counter(test_vid_classes)
|
89 |
-
|
90 |
-
# now given the counts from test set, sample the same count for validation and the rest leave in train
|
91 |
-
train_vids_wo_valid, valid_vids = set(), set()
|
92 |
-
for target, label in enumerate(label2target.keys()):
|
93 |
-
class_train_vids = [vid for vid in train_vids if video2target[vid] == target]
|
94 |
-
random.shuffle(class_train_vids)
|
95 |
-
count = test_target2count[target]
|
96 |
-
valid_vids.update(class_train_vids[:count])
|
97 |
-
train_vids_wo_valid.update(class_train_vids[count:])
|
98 |
-
|
99 |
-
# make file with a list of available test videos (each video should contain timestamps as well)
|
100 |
-
train_i = valid_i = test_i = 0
|
101 |
-
with open(os.path.join(self.splits_path, 'vggsound_train.txt'), 'w') as train_file, \
|
102 |
-
open(os.path.join(self.splits_path, 'vggsound_valid.txt'), 'w') as valid_file, \
|
103 |
-
open(os.path.join(self.splits_path, 'vggsound_test.txt'), 'w') as test_file:
|
104 |
-
for path in available_vid_paths:
|
105 |
-
path = path.replace('_mel.npy', '')
|
106 |
-
vid_name = Path(path).name
|
107 |
-
# 'zyTX_1BXKDE_16000_26000'[:11] -> 'zyTX_1BXKDE'
|
108 |
-
if vid_name[:11] in train_vids_wo_valid:
|
109 |
-
train_file.write(vid_name + '\n')
|
110 |
-
train_i += 1
|
111 |
-
elif vid_name[:11] in valid_vids:
|
112 |
-
valid_file.write(vid_name + '\n')
|
113 |
-
valid_i += 1
|
114 |
-
elif vid_name[:11] in test_vids:
|
115 |
-
test_file.write(vid_name + '\n')
|
116 |
-
test_i += 1
|
117 |
-
else:
|
118 |
-
raise Exception(f'Clip {vid_name} is neither in train, valid nor test. Strange.')
|
119 |
-
|
120 |
-
logger.info(f'Put {train_i} clips to the train set and saved it to ./data/vggsound_train.txt')
|
121 |
-
logger.info(f'Put {valid_i} clips to the valid set and saved it to ./data/vggsound_valid.txt')
|
122 |
-
logger.info(f'Put {test_i} clips to the test set and saved it to ./data/vggsound_test.txt')
|
123 |
-
|
124 |
-
|
125 |
-
if __name__ == '__main__':
|
126 |
-
from transforms import Crop, StandardNormalizeAudio, ToTensor
|
127 |
-
specs_path = '/home/nvme/data/vggsound/features/melspec_10s_22050hz/'
|
128 |
-
|
129 |
-
transforms = torchvision.transforms.transforms.Compose([
|
130 |
-
StandardNormalizeAudio(specs_path),
|
131 |
-
ToTensor(),
|
132 |
-
Crop([80, 848]),
|
133 |
-
])
|
134 |
-
|
135 |
-
datasets = {
|
136 |
-
'train': VGGSound('train', specs_path, transforms),
|
137 |
-
'valid': VGGSound('valid', specs_path, transforms),
|
138 |
-
'test': VGGSound('test', specs_path, transforms),
|
139 |
-
}
|
140 |
-
|
141 |
-
print(datasets['train'][0])
|
142 |
-
print(datasets['valid'][0])
|
143 |
-
print(datasets['test'][0])
|
144 |
-
|
145 |
-
print(datasets['train'].class_counts)
|
146 |
-
print(datasets['valid'].class_counts)
|
147 |
-
print(datasets['test'].class_counts)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ALR03/gradiolangchainChatbotOpenAI/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: GradiolangchainChatbotOpenAI
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../../../_base_/default_runtime.py',
|
3 |
-
'../../../_base_/datasets/deepfashion2.py'
|
4 |
-
]
|
5 |
-
|
6 |
-
default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
|
7 |
-
|
8 |
-
resume = False # 断点恢复
|
9 |
-
load_from = None # 模型权重加载
|
10 |
-
train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=10) # 训练轮数,测试间隔
|
11 |
-
param_scheduler = [
|
12 |
-
dict( # warmup策略
|
13 |
-
type='LinearLR',
|
14 |
-
begin=0,
|
15 |
-
end=500,
|
16 |
-
start_factor=0.001,
|
17 |
-
by_epoch=False),
|
18 |
-
dict( # scheduler
|
19 |
-
type='MultiStepLR',
|
20 |
-
begin=0,
|
21 |
-
end=120,
|
22 |
-
milestones=[80, 100],
|
23 |
-
gamma=0.1,
|
24 |
-
by_epoch=True)
|
25 |
-
]
|
26 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
|
27 |
-
auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
|
28 |
-
|
29 |
-
backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
|
30 |
-
dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
|
31 |
-
data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
|
32 |
-
data_root = 'data/deepfashion2/' # 数据存放路径
|
33 |
-
# 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
|
34 |
-
codec = dict(
|
35 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
36 |
-
|
37 |
-
train_pipeline = [
|
38 |
-
dict(type='LoadImage'),
|
39 |
-
dict(type='GetBBoxCenterScale'),
|
40 |
-
dict(type='RandomFlip', direction='horizontal'),
|
41 |
-
dict(
|
42 |
-
type='RandomBBoxTransform',
|
43 |
-
shift_prob=0,
|
44 |
-
rotate_factor=60,
|
45 |
-
scale_factor=(0.75, 1.25)),
|
46 |
-
dict(type='TopdownAffine', input_size=codec['input_size']),
|
47 |
-
dict(type='GenerateTarget', encoder=codec),
|
48 |
-
dict(type='PackPoseInputs')
|
49 |
-
]
|
50 |
-
val_pipeline = [ # 测试时数据增强
|
51 |
-
dict(type='LoadImage', backend_args=backend_args), # 加载图片
|
52 |
-
dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
|
53 |
-
dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
|
54 |
-
dict(type='PackPoseInputs') # 对target进行打包用于训练
|
55 |
-
]
|
56 |
-
train_dataloader = dict( # 训练数据加载
|
57 |
-
batch_size=64, # 批次大小
|
58 |
-
num_workers=6, # 数据加载进程数
|
59 |
-
persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
|
60 |
-
sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
|
61 |
-
dataset=dict(
|
62 |
-
type=dataset_type, # 数据集类名
|
63 |
-
data_root=data_root, # 数据集路径
|
64 |
-
data_mode=data_mode, # 算法类型
|
65 |
-
ann_file='train/deepfashion2_vest.json', # 标注文件路径
|
66 |
-
data_prefix=dict(img='train/image/'), # 图像路径
|
67 |
-
pipeline=train_pipeline # 数据流水线
|
68 |
-
))
|
69 |
-
val_dataloader = dict(
|
70 |
-
batch_size=32,
|
71 |
-
num_workers=6,
|
72 |
-
persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
|
73 |
-
drop_last=False,
|
74 |
-
sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
|
75 |
-
dataset=dict(
|
76 |
-
type=dataset_type, # 数据集类名
|
77 |
-
data_root=data_root, # 数据集路径
|
78 |
-
data_mode=data_mode, # 算法类型
|
79 |
-
ann_file='validation/deepfashion2_vest.json', # 标注文件路径
|
80 |
-
data_prefix=dict(img='validation/image/'), # 图像路径
|
81 |
-
test_mode=True, # 测试模式开关
|
82 |
-
pipeline=val_pipeline # 数据流水线
|
83 |
-
))
|
84 |
-
test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
|
85 |
-
|
86 |
-
channel_cfg = dict(
|
87 |
-
num_output_channels=294,
|
88 |
-
dataset_joints=294,
|
89 |
-
dataset_channel=[
|
90 |
-
[
|
91 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
|
92 |
-
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
|
93 |
-
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
|
94 |
-
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
95 |
-
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
|
96 |
-
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
|
97 |
-
103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
|
98 |
-
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
|
99 |
-
129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
|
100 |
-
142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
|
101 |
-
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
|
102 |
-
168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
|
103 |
-
181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
|
104 |
-
194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
|
105 |
-
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
106 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
|
107 |
-
233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
|
108 |
-
246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
|
109 |
-
259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
|
110 |
-
272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
|
111 |
-
285, 286, 287, 288, 289, 290, 291, 292, 293
|
112 |
-
],
|
113 |
-
],
|
114 |
-
inference_channel=[
|
115 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
116 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
117 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
118 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
119 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
120 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
121 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
122 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
123 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
124 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
125 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
126 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
127 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
128 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
129 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
130 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
131 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
132 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
133 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
134 |
-
290, 291, 292, 293
|
135 |
-
])
|
136 |
-
|
137 |
-
model = dict(
|
138 |
-
type='TopdownPoseEstimator', # 模型结构决定了算法流程
|
139 |
-
data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
|
140 |
-
type='PoseDataPreprocessor',
|
141 |
-
mean=[123.675, 116.28, 103.53],
|
142 |
-
std=[58.395, 57.12, 57.375],
|
143 |
-
bgr_to_rgb=True),
|
144 |
-
backbone=dict(
|
145 |
-
type='ResNet',
|
146 |
-
depth=50,
|
147 |
-
init_cfg=dict(
|
148 |
-
type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
|
149 |
-
checkpoint='torchvision://resnet50')),
|
150 |
-
head=dict( # 模型头部
|
151 |
-
type='HeatmapHead',
|
152 |
-
in_channels=2048,
|
153 |
-
out_channels=channel_cfg['num_output_channels'],
|
154 |
-
# deconv_out_channels=None,
|
155 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
|
156 |
-
decoder=codec), # 解码器,将heatmap解码成坐标值
|
157 |
-
test_cfg=dict(
|
158 |
-
flip_test=True, # 开启测试时水平翻转集成
|
159 |
-
flip_mode='heatmap', # 对heatmap进行翻转
|
160 |
-
shift_heatmap=True, # 对翻转后的结果进行平移提高精度
|
161 |
-
))
|
162 |
-
|
163 |
-
val_evaluator = [
|
164 |
-
dict(type='PCKAccuracy', thr=0.2),
|
165 |
-
dict(type='AUC'),
|
166 |
-
dict(type='EPE'),
|
167 |
-
]
|
168 |
-
test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
|
169 |
-
|
170 |
-
visualizer = dict(
|
171 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
172 |
-
dict(type='WandbVisBackend')])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AUBADA-ALARABI/poetry20233/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Poetry2023
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: akhooli/poetry2023
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/utils/segment/general.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
import torch.nn.functional as F
|
5 |
-
|
6 |
-
|
7 |
-
def crop_mask(masks, boxes):
|
8 |
-
"""
|
9 |
-
"Crop" predicted masks by zeroing out everything not in the predicted bbox.
|
10 |
-
Vectorized by Chong (thanks Chong).
|
11 |
-
|
12 |
-
Args:
|
13 |
-
- masks should be a size [h, w, n] tensor of masks
|
14 |
-
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
|
15 |
-
"""
|
16 |
-
|
17 |
-
n, h, w = masks.shape
|
18 |
-
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
|
19 |
-
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[
|
20 |
-
None, None, :
|
21 |
-
] # rows shape(1,w,1)
|
22 |
-
c = torch.arange(h, device=masks.device, dtype=x1.dtype)[
|
23 |
-
None, :, None
|
24 |
-
] # cols shape(h,1,1)
|
25 |
-
|
26 |
-
return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
|
27 |
-
|
28 |
-
|
29 |
-
def process_mask_upsample(protos, masks_in, bboxes, shape):
|
30 |
-
"""
|
31 |
-
Crop after upsample.
|
32 |
-
protos: [mask_dim, mask_h, mask_w]
|
33 |
-
masks_in: [n, mask_dim], n is number of masks after nms
|
34 |
-
bboxes: [n, 4], n is number of masks after nms
|
35 |
-
shape: input_image_size, (h, w)
|
36 |
-
|
37 |
-
return: h, w, n
|
38 |
-
"""
|
39 |
-
|
40 |
-
c, mh, mw = protos.shape # CHW
|
41 |
-
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
|
42 |
-
masks = F.interpolate(
|
43 |
-
masks[None], shape, mode="bilinear", align_corners=False
|
44 |
-
)[
|
45 |
-
0
|
46 |
-
] # CHW
|
47 |
-
masks = crop_mask(masks, bboxes) # CHW
|
48 |
-
return masks.gt_(0.5)
|
49 |
-
|
50 |
-
|
51 |
-
def process_mask(protos, masks_in, bboxes, shape, upsample=False):
|
52 |
-
"""
|
53 |
-
Crop before upsample.
|
54 |
-
proto_out: [mask_dim, mask_h, mask_w]
|
55 |
-
out_masks: [n, mask_dim], n is number of masks after nms
|
56 |
-
bboxes: [n, 4], n is number of masks after nms
|
57 |
-
shape:input_image_size, (h, w)
|
58 |
-
|
59 |
-
return: h, w, n
|
60 |
-
"""
|
61 |
-
|
62 |
-
c, mh, mw = protos.shape # CHW
|
63 |
-
ih, iw = shape
|
64 |
-
masks = (
|
65 |
-
(masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
|
66 |
-
) # CHW
|
67 |
-
|
68 |
-
downsampled_bboxes = bboxes.clone()
|
69 |
-
downsampled_bboxes[:, 0] *= mw / iw
|
70 |
-
downsampled_bboxes[:, 2] *= mw / iw
|
71 |
-
downsampled_bboxes[:, 3] *= mh / ih
|
72 |
-
downsampled_bboxes[:, 1] *= mh / ih
|
73 |
-
|
74 |
-
masks = crop_mask(masks, downsampled_bboxes) # CHW
|
75 |
-
if upsample:
|
76 |
-
masks = F.interpolate(
|
77 |
-
masks[None], shape, mode="bilinear", align_corners=False
|
78 |
-
)[
|
79 |
-
0
|
80 |
-
] # CHW
|
81 |
-
return masks.gt_(0.5)
|
82 |
-
|
83 |
-
|
84 |
-
def process_mask_native(protos, masks_in, bboxes, shape):
|
85 |
-
"""
|
86 |
-
Crop after upsample.
|
87 |
-
protos: [mask_dim, mask_h, mask_w]
|
88 |
-
masks_in: [n, mask_dim], n is number of masks after nms
|
89 |
-
bboxes: [n, 4], n is number of masks after nms
|
90 |
-
shape: input_image_size, (h, w)
|
91 |
-
|
92 |
-
return: h, w, n
|
93 |
-
"""
|
94 |
-
c, mh, mw = protos.shape # CHW
|
95 |
-
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
|
96 |
-
gain = min(mh / shape[0], mw / shape[1]) # gain = old / new
|
97 |
-
pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding
|
98 |
-
top, left = int(pad[1]), int(pad[0]) # y, x
|
99 |
-
bottom, right = int(mh - pad[1]), int(mw - pad[0])
|
100 |
-
masks = masks[:, top:bottom, left:right]
|
101 |
-
|
102 |
-
masks = F.interpolate(
|
103 |
-
masks[None], shape, mode="bilinear", align_corners=False
|
104 |
-
)[
|
105 |
-
0
|
106 |
-
] # CHW
|
107 |
-
masks = crop_mask(masks, bboxes) # CHW
|
108 |
-
return masks.gt_(0.5)
|
109 |
-
|
110 |
-
|
111 |
-
def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
|
112 |
-
"""
|
113 |
-
img1_shape: model input shape, [h, w]
|
114 |
-
img0_shape: origin pic shape, [h, w, 3]
|
115 |
-
masks: [h, w, num]
|
116 |
-
"""
|
117 |
-
# Rescale coordinates (xyxy) from im1_shape to im0_shape
|
118 |
-
if ratio_pad is None: # calculate from im0_shape
|
119 |
-
gain = min(
|
120 |
-
im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]
|
121 |
-
) # gain = old / new
|
122 |
-
pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (
|
123 |
-
im1_shape[0] - im0_shape[0] * gain
|
124 |
-
) / 2 # wh padding
|
125 |
-
else:
|
126 |
-
pad = ratio_pad[1]
|
127 |
-
top, left = int(pad[1]), int(pad[0]) # y, x
|
128 |
-
bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
|
129 |
-
|
130 |
-
if len(masks.shape) < 2:
|
131 |
-
raise ValueError(
|
132 |
-
f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}'
|
133 |
-
)
|
134 |
-
masks = masks[top:bottom, left:right]
|
135 |
-
# masks = masks.permute(2, 0, 1).contiguous()
|
136 |
-
# masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]
|
137 |
-
# masks = masks.permute(1, 2, 0).contiguous()
|
138 |
-
masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
|
139 |
-
|
140 |
-
if len(masks.shape) == 2:
|
141 |
-
masks = masks[:, :, None]
|
142 |
-
return masks
|
143 |
-
|
144 |
-
|
145 |
-
def mask_iou(mask1, mask2, eps=1e-7):
|
146 |
-
"""
|
147 |
-
mask1: [N, n] m1 means number of predicted objects
|
148 |
-
mask2: [M, n] m2 means number of gt objects
|
149 |
-
Note: n means image_w x image_h
|
150 |
-
|
151 |
-
return: masks iou, [N, M]
|
152 |
-
"""
|
153 |
-
intersection = torch.matmul(mask1, mask2.t()).clamp(0)
|
154 |
-
union = (
|
155 |
-
mask1.sum(1)[:, None] + mask2.sum(1)[None]
|
156 |
-
) - intersection # (area1 + area2) - intersection
|
157 |
-
return intersection / (union + eps)
|
158 |
-
|
159 |
-
|
160 |
-
def masks_iou(mask1, mask2, eps=1e-7):
|
161 |
-
"""
|
162 |
-
mask1: [N, n] m1 means number of predicted objects
|
163 |
-
mask2: [N, n] m2 means number of gt objects
|
164 |
-
Note: n means image_w x image_h
|
165 |
-
|
166 |
-
return: masks iou, (N, )
|
167 |
-
"""
|
168 |
-
intersection = (mask1 * mask2).sum(1).clamp(0) # (N, )
|
169 |
-
union = (mask1.sum(1) + mask2.sum(1))[
|
170 |
-
None
|
171 |
-
] - intersection # (area1 + area2) - intersection
|
172 |
-
return intersection / (union + eps)
|
173 |
-
|
174 |
-
|
175 |
-
def masks2segments(masks, strategy="largest"):
|
176 |
-
# Convert masks(n,160,160) into segments(n,xy)
|
177 |
-
segments = []
|
178 |
-
for x in masks.int().cpu().numpy().astype("uint8"):
|
179 |
-
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
|
180 |
-
if c:
|
181 |
-
if strategy == "concat": # concatenate all segments
|
182 |
-
c = np.concatenate([x.reshape(-1, 2) for x in c])
|
183 |
-
elif strategy == "largest": # select largest segment
|
184 |
-
c = np.array(
|
185 |
-
c[np.array([len(x) for x in c]).argmax()]
|
186 |
-
).reshape(-1, 2)
|
187 |
-
else:
|
188 |
-
c = np.zeros((0, 2)) # no segments found
|
189 |
-
segments.append(c.astype("float32"))
|
190 |
-
return segments
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adesoji1/Panel_PDF_QA/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Panel PDF QA
|
3 |
-
emoji: 📈
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: red
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
duplicated_from: sophiamyang/Panel_PDF_QA
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/restorabledata-plugin.d.ts
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
import DataManager from './restorabledata';
|
2 |
-
|
3 |
-
export default class DataManagerPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
add(
|
5 |
-
parent: object,
|
6 |
-
eventEmitter?: Phaser.Events.EventEmitter,
|
7 |
-
config?: object
|
8 |
-
): DataManager;
|
9 |
-
|
10 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# empty
|
|
|
|
|
|
|
|
spaces/Andres99/Tune-A-Video-Training-UI/utils.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import pathlib
|
4 |
-
|
5 |
-
|
6 |
-
def find_exp_dirs() -> list[str]:
|
7 |
-
repo_dir = pathlib.Path(__file__).parent
|
8 |
-
exp_root_dir = repo_dir / 'experiments'
|
9 |
-
if not exp_root_dir.exists():
|
10 |
-
return []
|
11 |
-
exp_dirs = sorted(exp_root_dir.glob('*'))
|
12 |
-
exp_dirs = [
|
13 |
-
exp_dir for exp_dir in exp_dirs
|
14 |
-
if (exp_dir / 'model_index.json').exists()
|
15 |
-
]
|
16 |
-
return [path.relative_to(repo_dir).as_posix() for path in exp_dirs]
|
17 |
-
|
18 |
-
|
19 |
-
def save_model_card(
|
20 |
-
save_dir: pathlib.Path,
|
21 |
-
base_model: str,
|
22 |
-
training_prompt: str,
|
23 |
-
test_prompt: str = '',
|
24 |
-
test_image_dir: str = '',
|
25 |
-
) -> None:
|
26 |
-
image_str = ''
|
27 |
-
if test_prompt and test_image_dir:
|
28 |
-
image_paths = sorted((save_dir / test_image_dir).glob('*.gif'))
|
29 |
-
if image_paths:
|
30 |
-
image_path = image_paths[-1]
|
31 |
-
rel_path = image_path.relative_to(save_dir)
|
32 |
-
image_str = f'''## Samples
|
33 |
-
Test prompt: {test_prompt}
|
34 |
-
|
35 |
-
'''
|
36 |
-
|
37 |
-
model_card = f'''---
|
38 |
-
license: creativeml-openrail-m
|
39 |
-
base_model: {base_model}
|
40 |
-
training_prompt: {training_prompt}
|
41 |
-
tags:
|
42 |
-
- stable-diffusion
|
43 |
-
- stable-diffusion-diffusers
|
44 |
-
- text-to-image
|
45 |
-
- diffusers
|
46 |
-
- text-to-video
|
47 |
-
- tune-a-video
|
48 |
-
inference: false
|
49 |
-
---
|
50 |
-
|
51 |
-
# Tune-A-Video - {save_dir.name}
|
52 |
-
|
53 |
-
## Model description
|
54 |
-
- Base model: [{base_model}](https://huggingface.co/{base_model})
|
55 |
-
- Training prompt: {training_prompt}
|
56 |
-
|
57 |
-
{image_str}
|
58 |
-
|
59 |
-
## Related papers:
|
60 |
-
- [Tune-A-Video](https://arxiv.org/abs/2212.11565): One-Shot Tuning of Image Diffusion Models for Text-to-Video Generation
|
61 |
-
- [Stable-Diffusion](https://arxiv.org/abs/2112.10752): High-Resolution Image Synthesis with Latent Diffusion Models
|
62 |
-
'''
|
63 |
-
|
64 |
-
with open(save_dir / 'README.md', 'w') as f:
|
65 |
-
f.write(model_card)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/spectrogram_diffusion.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Spectrogram Diffusion
|
14 |
-
|
15 |
-
[Spectrogram Diffusion](https://huggingface.co/papers/2206.05408) is by Curtis Hawthorne, Ian Simon, Adam Roberts, Neil Zeghidour, Josh Gardner, Ethan Manilow, and Jesse Engel.
|
16 |
-
|
17 |
-
*An ideal music synthesizer should be both interactive and expressive, generating high-fidelity audio in realtime for arbitrary combinations of instruments and notes. Recent neural synthesizers have exhibited a tradeoff between domain-specific models that offer detailed control of only specific instruments, or raw waveform models that can train on any music but with minimal control and slow generation. In this work, we focus on a middle ground of neural synthesizers that can generate audio from MIDI sequences with arbitrary combinations of instruments in realtime. This enables training on a wide range of transcription datasets with a single model, which in turn offers note-level control of composition and instrumentation across a wide range of instruments. We use a simple two-stage process: MIDI to spectrograms with an encoder-decoder Transformer, then spectrograms to audio with a generative adversarial network (GAN) spectrogram inverter. We compare training the decoder as an autoregressive model and as a Denoising Diffusion Probabilistic Model (DDPM) and find that the DDPM approach is superior both qualitatively and as measured by audio reconstruction and Fréchet distance metrics. Given the interactivity and generality of this approach, we find this to be a promising first step towards interactive and expressive neural synthesis for arbitrary combinations of instruments and notes.*
|
18 |
-
|
19 |
-
The original codebase can be found at [magenta/music-spectrogram-diffusion](https://github.com/magenta/music-spectrogram-diffusion).
|
20 |
-
|
21 |
-

|
22 |
-
|
23 |
-
As depicted above the model takes as input a MIDI file and tokenizes it into a sequence of 5 second intervals. Each tokenized interval then together with positional encodings is passed through the Note Encoder and its representation is concatenated with the previous window's generated spectrogram representation obtained via the Context Encoder. For the initial 5 second window this is set to zero. The resulting context is then used as conditioning to sample the denoised Spectrogram from the MIDI window and we concatenate this spectrogram to the final output as well as use it for the context of the next MIDI window. The process repeats till we have gone over all the MIDI inputs. Finally a MelGAN decoder converts the potentially long spectrogram to audio which is the final result of this pipeline.
|
24 |
-
|
25 |
-
<Tip>
|
26 |
-
|
27 |
-
Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
28 |
-
|
29 |
-
</Tip>
|
30 |
-
|
31 |
-
## SpectrogramDiffusionPipeline
|
32 |
-
[[autodoc]] SpectrogramDiffusionPipeline
|
33 |
-
- all
|
34 |
-
- __call__
|
35 |
-
|
36 |
-
## AudioPipelineOutput
|
37 |
-
[[autodoc]] pipelines.AudioPipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/latent_upscale.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Latent upscaler
|
14 |
-
|
15 |
-
The Stable Diffusion latent upscaler model was created by [Katherine Crowson](https://github.com/crowsonkb/k-diffusion) in collaboration with [Stability AI](https://stability.ai/). It is used to enhance the output image resolution by a factor of 2 (see this demo [notebook](https://colab.research.google.com/drive/1o1qYJcFeywzCIdkfKJy7cTpgZTCM2EI4) for a demonstration of the original implementation).
|
16 |
-
|
17 |
-
<Tip>
|
18 |
-
|
19 |
-
Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!
|
20 |
-
|
21 |
-
If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations!
|
22 |
-
|
23 |
-
</Tip>
|
24 |
-
|
25 |
-
## StableDiffusionLatentUpscalePipeline
|
26 |
-
|
27 |
-
[[autodoc]] StableDiffusionLatentUpscalePipeline
|
28 |
-
- all
|
29 |
-
- __call__
|
30 |
-
- enable_sequential_cpu_offload
|
31 |
-
- enable_attention_slicing
|
32 |
-
- disable_attention_slicing
|
33 |
-
- enable_xformers_memory_efficient_attention
|
34 |
-
- disable_xformers_memory_efficient_attention
|
35 |
-
|
36 |
-
## StableDiffusionPipelineOutput
|
37 |
-
|
38 |
-
[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/other-formats.md
DELETED
@@ -1,191 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# 다양한 Stable Diffusion 포맷 불러오기
|
14 |
-
|
15 |
-
Stable Diffusion 모델들은 학습 및 저장된 프레임워크와 다운로드 위치에 따라 다양한 형식으로 제공됩니다. 이러한 형식을 🤗 Diffusers에서 사용할 수 있도록 변환하면 추론을 위한 [다양한 스케줄러 사용](schedulers), 사용자 지정 파이프라인 구축, 추론 속도 최적화를 위한 다양한 기법과 방법 등 라이브러리에서 지원하는 모든 기능을 사용할 수 있습니다.
|
16 |
-
|
17 |
-
<Tip>
|
18 |
-
|
19 |
-
우리는 `.safetensors` 형식을 추천합니다. 왜냐하면 기존의 pickled 파일은 취약하고 머신에서 코드를 실행할 때 악용될 수 있는 것에 비해 훨씬 더 안전합니다. (safetensors 불러오기 가이드에서 자세히 알아보세요.)
|
20 |
-
|
21 |
-
</Tip>
|
22 |
-
|
23 |
-
이 가이드에서는 다른 Stable Diffusion 형식을 🤗 Diffusers와 호환되도록 변환하는 방법을 설명합니다.
|
24 |
-
|
25 |
-
## PyTorch .ckpt
|
26 |
-
|
27 |
-
체크포인트 또는 `.ckpt` 형식은 일반적으로 모델을 저장하는 데 사용됩니다. `.ckpt` 파일은 전체 모델을 포함하며 일반적으로 크기가 몇 GB입니다. `.ckpt` 파일을 [~StableDiffusionPipeline.from_ckpt] 메서드를 사용하여 직접 불러와서 사용할 수도 있지만, 일반적으로 두 가지 형식을 모두 사용할 수 있도록 `.ckpt` 파일을 🤗 Diffusers로 변환하는 것이 더 좋습니다.
|
28 |
-
|
29 |
-
`.ckpt` 파일을 변환하는 두 가지 옵션이 있습니다. Space를 사용하여 체크포인트를 변환하거나 스크립트를 사용하여 `.ckpt` 파일을 변환합니다.
|
30 |
-
|
31 |
-
### Space로 변환하기
|
32 |
-
|
33 |
-
`.ckpt` 파일을 변환하는 가장 쉽고 편리한 방법은 SD에서 Diffusers로 스페이스를 사용하는 것입니다. Space의 지침에 따라 .ckpt 파일을 변환 할 수 있습니다.
|
34 |
-
|
35 |
-
이 접근 방식은 기본 모델에서는 잘 작동하지만 더 많은 사용자 정의 모델에서는 어려움을 겪을 수 있습니다. 빈 pull request나 오류를 반환하면 Space가 실패한 것입니다.
|
36 |
-
이 경우 스크립트를 사용하여 `.ckpt` 파일을 변환해 볼 수 있습니다.
|
37 |
-
|
38 |
-
### 스크립트로 변환하기
|
39 |
-
|
40 |
-
🤗 Diffusers는 `.ckpt` 파일 변환을 위한 변환 스크립트를 제공합니다. 이 접근 방식은 위의 Space보다 더 안정적입니다.
|
41 |
-
|
42 |
-
시작하기 전에 스크립트를 실행할 🤗 Diffusers의 로컬 클론(clone)이 있는지 확인하고 Hugging Face 계정에 로그인하여 pull request를 열고 변환된 모델을 허브에 푸시할 수 있도록 하세요.
|
43 |
-
|
44 |
-
```bash
|
45 |
-
huggingface-cli login
|
46 |
-
```
|
47 |
-
|
48 |
-
스크립트를 사용하려면:
|
49 |
-
|
50 |
-
1. 변환하려는 `.ckpt` 파일이 포함된 리포지토리를 Git으로 클론(clone)합니다.
|
51 |
-
|
52 |
-
이 예제에서는 TemporalNet .ckpt 파일을 변환해 보겠습니다:
|
53 |
-
|
54 |
-
```bash
|
55 |
-
git lfs install
|
56 |
-
git clone https://huggingface.co/CiaraRowles/TemporalNet
|
57 |
-
```
|
58 |
-
|
59 |
-
2. 체크포인트를 변환할 리포지토리에서 pull request를 엽니다:
|
60 |
-
|
61 |
-
```bash
|
62 |
-
cd TemporalNet && git fetch origin refs/pr/13:pr/13
|
63 |
-
git checkout pr/13
|
64 |
-
```
|
65 |
-
|
66 |
-
3. 변환 스크립트에서 구성할 입력 인수는 여러 가지가 있지만 가장 중요한 인수는 다음과 같습니다:
|
67 |
-
|
68 |
-
- `checkpoint_path`: 변환할 `.ckpt` 파일의 경로를 입력합니다.
|
69 |
-
- `original_config_file`: 원래 아키텍처의 구성을 정의하는 YAML 파일입니다. 이 파일을 찾을 수 없는 경우 `.ckpt` 파일을 찾은 GitHub 리포지토리에서 YAML 파일을 검색해 보세요.
|
70 |
-
- `dump_path`: 변환된 모델의 경로
|
71 |
-
|
72 |
-
예를 들어, TemporalNet 모델은 Stable Diffusion v1.5 및 ControlNet 모델이기 때문에 ControlNet 리포지토리에서 cldm_v15.yaml 파일을 가져올 수 있습니다.
|
73 |
-
|
74 |
-
4. 이제 스크립트를 실행하여 .ckpt 파일을 변환할 수 있습니다:
|
75 |
-
|
76 |
-
```bash
|
77 |
-
python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet
|
78 |
-
```
|
79 |
-
|
80 |
-
5. 변환이 완료되면 변환된 모델을 업로드하고 결과물을 pull request [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)를 테스트하세요!
|
81 |
-
|
82 |
-
```bash
|
83 |
-
git push origin pr/13:refs/pr/13
|
84 |
-
```
|
85 |
-
|
86 |
-
## **Keras .pb or .h5**
|
87 |
-
|
88 |
-
🧪 이 기능은 실험적인 기능입니다. 현재로서는 Stable Diffusion v1 체크포인트만 변환 KerasCV Space에서 지원됩니���.
|
89 |
-
|
90 |
-
[KerasCV](https://keras.io/keras_cv/)는 [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion) v1 및 v2에 대한 학습을 지원합니다. 그러나 추론 및 배포를 위한 Stable Diffusion 모델 실험을 제한적으로 지원하는 반면, 🤗 Diffusers는 다양한 [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16) 등 이러한 목적을 위한 보다 완벽한 기능을 갖추고 있습니다.
|
91 |
-
|
92 |
-
[Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) Space 변환은 `.pb` 또는 `.h5`을 PyTorch로 변환한 다음, 추론할 수 있도록 [`StableDiffusionPipeline`] 으로 감싸서 준비합니다. 변환된 체크포인트는 Hugging Face Hub의 리포지토리에 저장됩니다.
|
93 |
-
|
94 |
-
예제로, textual-inversion으로 학습된 `[sayakpaul/textual-inversion-kerasio](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main)` 체크포인트를 변환해 보겠습니다. 이것은 특수 토큰 `<my-funny-cat>`을 사용하여 고양이로 이미지를 개인화합니다.
|
95 |
-
|
96 |
-
KerasCV Space 변환에서는 다음을 입력할 수 있습니다:
|
97 |
-
|
98 |
-
- Hugging Face 토큰.
|
99 |
-
- UNet 과 텍스트 인코더(text encoder) 가중치를 다운로드하는 경로입니다. 모델을 어떻게 학습할지 방식에 따라, UNet과 텍스트 인코더의 경로를 모두 제공할 필요는 없습니다. 예를 들어, textual-inversion에는 텍스트 인코더의 임베딩만 필요하고 텍스트-이미지(text-to-image) 모델 변환에는 UNet 가중치만 필요합니다.
|
100 |
-
- Placeholder 토큰은 textual-inversion 모델에만 적용됩니다.
|
101 |
-
- `output_repo_prefix`는 변환된 모델이 저장되는 리포지토리의 이름입니다.
|
102 |
-
|
103 |
-
**Submit** (제출) 버튼을 클릭하면 KerasCV 체크포인트가 자동으로 변환됩니다! 체크포인트가 성공적으로 변환되면, 변환된 체크포인트가 포함된 새 리포지토리로 연결되는 링크가 표시됩니다. 새 리포지토리로 연결되는 링크를 따라가면 변환된 모델을 사용해 볼 수 있는 추론 위젯이 포함된 모델 카드가 생성된 KerasCV Space 변환을 확인할 수 있습니다.
|
104 |
-
|
105 |
-
코드를 사용하여 추론을 실행하려면 모델 카드의 오른쪽 상단 모서리에 있는 **Use in Diffusers** 버튼을 클릭하여 예시 코드를 복사하여 붙여넣습니다:
|
106 |
-
|
107 |
-
```py
|
108 |
-
from diffusers import DiffusionPipeline
|
109 |
-
|
110 |
-
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
|
111 |
-
```
|
112 |
-
|
113 |
-
그러면 다음과 같은 이미지를 생성할 수 있습니다:
|
114 |
-
|
115 |
-
```py
|
116 |
-
from diffusers import DiffusionPipeline
|
117 |
-
|
118 |
-
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
|
119 |
-
pipeline.to("cuda")
|
120 |
-
|
121 |
-
placeholder_token = "<my-funny-cat-token>"
|
122 |
-
prompt = f"two {placeholder_token} getting married, photorealistic, high quality"
|
123 |
-
image = pipeline(prompt, num_inference_steps=50).images[0]
|
124 |
-
```
|
125 |
-
|
126 |
-
## **A1111 LoRA files**
|
127 |
-
|
128 |
-
[Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (A1111)은 Stable Diffusion을 위해 널리 사용되는 웹 UI로, [Civitai](https://civitai.com/) 와 같은 모델 공유 플랫폼을 지원합니다. 특히 LoRA 기법으로 학습된 모델은 학습 속도가 빠르고 완전히 파인튜닝된 모델보다 파일 크기가 훨씬 작기 때문에 인기가 높습니다.
|
129 |
-
|
130 |
-
🤗 Diffusers는 [`~loaders.LoraLoaderMixin.load_lora_weights`]:를 사용하여 A1111 LoRA 체크포인트 불러오기를 지원합니다:
|
131 |
-
|
132 |
-
```py
|
133 |
-
from diffusers import DiffusionPipeline, UniPCMultistepScheduler
|
134 |
-
import torch
|
135 |
-
|
136 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
137 |
-
"andite/anything-v4.0", torch_dtype=torch.float16, safety_checker=None
|
138 |
-
).to("cuda")
|
139 |
-
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
|
140 |
-
```
|
141 |
-
|
142 |
-
Civitai에서 LoRA 체크포인트를 다운로드하세요; 이 예제에서는 [Howls Moving Castle,Interior/Scenery LoRA (Ghibli Stlye)](https://civitai.com/models/14605?modelVersionId=19998) 체크포인트를 사용했지만, 어떤 LoRA 체크포인트든 자유롭게 사용해 보세요!
|
143 |
-
|
144 |
-
```bash
|
145 |
-
!wget https://civitai.com/api/download/models/19998 -O howls_moving_castle.safetensors
|
146 |
-
```
|
147 |
-
|
148 |
-
메서드를 사용하여 파이프라인에 LoRA 체크포인트를 불러옵니다:
|
149 |
-
|
150 |
-
```py
|
151 |
-
pipeline.load_lora_weights(".", weight_name="howls_moving_castle.safetensors")
|
152 |
-
```
|
153 |
-
|
154 |
-
이제 파이프라인을 사용하여 이미지를 생성할 수 있습니다:
|
155 |
-
|
156 |
-
```py
|
157 |
-
prompt = "masterpiece, illustration, ultra-detailed, cityscape, san francisco, golden gate bridge, california, bay area, in the snow, beautiful detailed starry sky"
|
158 |
-
negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture"
|
159 |
-
|
160 |
-
images = pipeline(
|
161 |
-
prompt=prompt,
|
162 |
-
negative_prompt=negative_prompt,
|
163 |
-
width=512,
|
164 |
-
height=512,
|
165 |
-
num_inference_steps=25,
|
166 |
-
num_images_per_prompt=4,
|
167 |
-
generator=torch.manual_seed(0),
|
168 |
-
).images
|
169 |
-
```
|
170 |
-
|
171 |
-
마지막으로, 디스플레이에 이미지를 표시하는 헬퍼 함수를 만듭니다:
|
172 |
-
|
173 |
-
```py
|
174 |
-
from PIL import Image
|
175 |
-
|
176 |
-
|
177 |
-
def image_grid(imgs, rows=2, cols=2):
|
178 |
-
w, h = imgs[0].size
|
179 |
-
grid = Image.new("RGB", size=(cols * w, rows * h))
|
180 |
-
|
181 |
-
for i, img in enumerate(imgs):
|
182 |
-
grid.paste(img, box=(i % cols * w, i // cols * h))
|
183 |
-
return grid
|
184 |
-
|
185 |
-
|
186 |
-
image_grid(images)
|
187 |
-
```
|
188 |
-
|
189 |
-
<div class="flex justify-center">
|
190 |
-
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/a1111-lora-sf.png" />
|
191 |
-
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py
DELETED
@@ -1,164 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
""" Conversion script for the LDM checkpoints. """
|
16 |
-
|
17 |
-
import argparse
|
18 |
-
|
19 |
-
import torch
|
20 |
-
|
21 |
-
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
|
22 |
-
|
23 |
-
|
24 |
-
if __name__ == "__main__":
|
25 |
-
parser = argparse.ArgumentParser()
|
26 |
-
|
27 |
-
parser.add_argument(
|
28 |
-
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
|
29 |
-
)
|
30 |
-
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
|
31 |
-
parser.add_argument(
|
32 |
-
"--original_config_file",
|
33 |
-
default=None,
|
34 |
-
type=str,
|
35 |
-
help="The YAML config file corresponding to the original architecture.",
|
36 |
-
)
|
37 |
-
parser.add_argument(
|
38 |
-
"--num_in_channels",
|
39 |
-
default=None,
|
40 |
-
type=int,
|
41 |
-
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
|
42 |
-
)
|
43 |
-
parser.add_argument(
|
44 |
-
"--scheduler_type",
|
45 |
-
default="pndm",
|
46 |
-
type=str,
|
47 |
-
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
|
48 |
-
)
|
49 |
-
parser.add_argument(
|
50 |
-
"--pipeline_type",
|
51 |
-
default=None,
|
52 |
-
type=str,
|
53 |
-
help=(
|
54 |
-
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
|
55 |
-
". If `None` pipeline will be automatically inferred."
|
56 |
-
),
|
57 |
-
)
|
58 |
-
parser.add_argument(
|
59 |
-
"--image_size",
|
60 |
-
default=None,
|
61 |
-
type=int,
|
62 |
-
help=(
|
63 |
-
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
|
64 |
-
" Base. Use 768 for Stable Diffusion v2."
|
65 |
-
),
|
66 |
-
)
|
67 |
-
parser.add_argument(
|
68 |
-
"--prediction_type",
|
69 |
-
default=None,
|
70 |
-
type=str,
|
71 |
-
help=(
|
72 |
-
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
|
73 |
-
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
|
74 |
-
),
|
75 |
-
)
|
76 |
-
parser.add_argument(
|
77 |
-
"--extract_ema",
|
78 |
-
action="store_true",
|
79 |
-
help=(
|
80 |
-
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
|
81 |
-
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
|
82 |
-
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
|
83 |
-
),
|
84 |
-
)
|
85 |
-
parser.add_argument(
|
86 |
-
"--upcast_attention",
|
87 |
-
action="store_true",
|
88 |
-
help=(
|
89 |
-
"Whether the attention computation should always be upcasted. This is necessary when running stable"
|
90 |
-
" diffusion 2.1."
|
91 |
-
),
|
92 |
-
)
|
93 |
-
parser.add_argument(
|
94 |
-
"--from_safetensors",
|
95 |
-
action="store_true",
|
96 |
-
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
|
97 |
-
)
|
98 |
-
parser.add_argument(
|
99 |
-
"--to_safetensors",
|
100 |
-
action="store_true",
|
101 |
-
help="Whether to store pipeline in safetensors format or not.",
|
102 |
-
)
|
103 |
-
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
|
104 |
-
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
|
105 |
-
parser.add_argument(
|
106 |
-
"--stable_unclip",
|
107 |
-
type=str,
|
108 |
-
default=None,
|
109 |
-
required=False,
|
110 |
-
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
|
111 |
-
)
|
112 |
-
parser.add_argument(
|
113 |
-
"--stable_unclip_prior",
|
114 |
-
type=str,
|
115 |
-
default=None,
|
116 |
-
required=False,
|
117 |
-
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
|
118 |
-
)
|
119 |
-
parser.add_argument(
|
120 |
-
"--clip_stats_path",
|
121 |
-
type=str,
|
122 |
-
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
|
123 |
-
required=False,
|
124 |
-
)
|
125 |
-
parser.add_argument(
|
126 |
-
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
|
127 |
-
)
|
128 |
-
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
|
129 |
-
parser.add_argument(
|
130 |
-
"--vae_path",
|
131 |
-
type=str,
|
132 |
-
default=None,
|
133 |
-
required=False,
|
134 |
-
help="Set to a path, hub id to an already converted vae to not convert it again.",
|
135 |
-
)
|
136 |
-
args = parser.parse_args()
|
137 |
-
|
138 |
-
pipe = download_from_original_stable_diffusion_ckpt(
|
139 |
-
checkpoint_path=args.checkpoint_path,
|
140 |
-
original_config_file=args.original_config_file,
|
141 |
-
image_size=args.image_size,
|
142 |
-
prediction_type=args.prediction_type,
|
143 |
-
model_type=args.pipeline_type,
|
144 |
-
extract_ema=args.extract_ema,
|
145 |
-
scheduler_type=args.scheduler_type,
|
146 |
-
num_in_channels=args.num_in_channels,
|
147 |
-
upcast_attention=args.upcast_attention,
|
148 |
-
from_safetensors=args.from_safetensors,
|
149 |
-
device=args.device,
|
150 |
-
stable_unclip=args.stable_unclip,
|
151 |
-
stable_unclip_prior=args.stable_unclip_prior,
|
152 |
-
clip_stats_path=args.clip_stats_path,
|
153 |
-
controlnet=args.controlnet,
|
154 |
-
vae_path=args.vae_path,
|
155 |
-
)
|
156 |
-
|
157 |
-
if args.half:
|
158 |
-
pipe.to(torch_dtype=torch.float16)
|
159 |
-
|
160 |
-
if args.controlnet:
|
161 |
-
# only save the controlnet model
|
162 |
-
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
|
163 |
-
else:
|
164 |
-
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/unet_2d.py
DELETED
@@ -1,329 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
from dataclasses import dataclass
|
15 |
-
from typing import Optional, Tuple, Union
|
16 |
-
|
17 |
-
import torch
|
18 |
-
import torch.nn as nn
|
19 |
-
|
20 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
21 |
-
from ..utils import BaseOutput
|
22 |
-
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
|
23 |
-
from .modeling_utils import ModelMixin
|
24 |
-
from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
|
25 |
-
|
26 |
-
|
27 |
-
@dataclass
|
28 |
-
class UNet2DOutput(BaseOutput):
|
29 |
-
"""
|
30 |
-
The output of [`UNet2DModel`].
|
31 |
-
|
32 |
-
Args:
|
33 |
-
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
34 |
-
The hidden states output from the last layer of the model.
|
35 |
-
"""
|
36 |
-
|
37 |
-
sample: torch.FloatTensor
|
38 |
-
|
39 |
-
|
40 |
-
class UNet2DModel(ModelMixin, ConfigMixin):
|
41 |
-
r"""
|
42 |
-
A 2D UNet model that takes a noisy sample and a timestep and returns a sample shaped output.
|
43 |
-
|
44 |
-
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
|
45 |
-
for all models (such as downloading or saving).
|
46 |
-
|
47 |
-
Parameters:
|
48 |
-
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
|
49 |
-
Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) -
|
50 |
-
1)`.
|
51 |
-
in_channels (`int`, *optional*, defaults to 3): Number of channels in the input sample.
|
52 |
-
out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
|
53 |
-
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
|
54 |
-
time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use.
|
55 |
-
freq_shift (`int`, *optional*, defaults to 0): Frequency shift for Fourier time embedding.
|
56 |
-
flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
|
57 |
-
Whether to flip sin to cos for Fourier time embedding.
|
58 |
-
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`):
|
59 |
-
Tuple of downsample block types.
|
60 |
-
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`):
|
61 |
-
Block type for middle of UNet, it can be either `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`.
|
62 |
-
up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`):
|
63 |
-
Tuple of upsample block types.
|
64 |
-
block_out_channels (`Tuple[int]`, *optional*, defaults to `(224, 448, 672, 896)`):
|
65 |
-
Tuple of block output channels.
|
66 |
-
layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block.
|
67 |
-
mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block.
|
68 |
-
downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution.
|
69 |
-
downsample_type (`str`, *optional*, defaults to `conv`):
|
70 |
-
The downsample type for downsampling layers. Choose between "conv" and "resnet"
|
71 |
-
upsample_type (`str`, *optional*, defaults to `conv`):
|
72 |
-
The upsample type for upsampling layers. Choose between "conv" and "resnet"
|
73 |
-
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
|
74 |
-
attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension.
|
75 |
-
norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for normalization.
|
76 |
-
norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for normalization.
|
77 |
-
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
|
78 |
-
for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
|
79 |
-
class_embed_type (`str`, *optional*, defaults to `None`):
|
80 |
-
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
|
81 |
-
`"timestep"`, or `"identity"`.
|
82 |
-
num_class_embeds (`int`, *optional*, defaults to `None`):
|
83 |
-
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim` when performing class
|
84 |
-
conditioning with `class_embed_type` equal to `None`.
|
85 |
-
"""
|
86 |
-
|
87 |
-
@register_to_config
|
88 |
-
def __init__(
|
89 |
-
self,
|
90 |
-
sample_size: Optional[Union[int, Tuple[int, int]]] = None,
|
91 |
-
in_channels: int = 3,
|
92 |
-
out_channels: int = 3,
|
93 |
-
center_input_sample: bool = False,
|
94 |
-
time_embedding_type: str = "positional",
|
95 |
-
freq_shift: int = 0,
|
96 |
-
flip_sin_to_cos: bool = True,
|
97 |
-
down_block_types: Tuple[str] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"),
|
98 |
-
up_block_types: Tuple[str] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"),
|
99 |
-
block_out_channels: Tuple[int] = (224, 448, 672, 896),
|
100 |
-
layers_per_block: int = 2,
|
101 |
-
mid_block_scale_factor: float = 1,
|
102 |
-
downsample_padding: int = 1,
|
103 |
-
downsample_type: str = "conv",
|
104 |
-
upsample_type: str = "conv",
|
105 |
-
act_fn: str = "silu",
|
106 |
-
attention_head_dim: Optional[int] = 8,
|
107 |
-
norm_num_groups: int = 32,
|
108 |
-
norm_eps: float = 1e-5,
|
109 |
-
resnet_time_scale_shift: str = "default",
|
110 |
-
add_attention: bool = True,
|
111 |
-
class_embed_type: Optional[str] = None,
|
112 |
-
num_class_embeds: Optional[int] = None,
|
113 |
-
):
|
114 |
-
super().__init__()
|
115 |
-
|
116 |
-
self.sample_size = sample_size
|
117 |
-
time_embed_dim = block_out_channels[0] * 4
|
118 |
-
|
119 |
-
# Check inputs
|
120 |
-
if len(down_block_types) != len(up_block_types):
|
121 |
-
raise ValueError(
|
122 |
-
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
|
123 |
-
)
|
124 |
-
|
125 |
-
if len(block_out_channels) != len(down_block_types):
|
126 |
-
raise ValueError(
|
127 |
-
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
|
128 |
-
)
|
129 |
-
|
130 |
-
# input
|
131 |
-
self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
|
132 |
-
|
133 |
-
# time
|
134 |
-
if time_embedding_type == "fourier":
|
135 |
-
self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16)
|
136 |
-
timestep_input_dim = 2 * block_out_channels[0]
|
137 |
-
elif time_embedding_type == "positional":
|
138 |
-
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
|
139 |
-
timestep_input_dim = block_out_channels[0]
|
140 |
-
|
141 |
-
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
142 |
-
|
143 |
-
# class embedding
|
144 |
-
if class_embed_type is None and num_class_embeds is not None:
|
145 |
-
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
|
146 |
-
elif class_embed_type == "timestep":
|
147 |
-
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
148 |
-
elif class_embed_type == "identity":
|
149 |
-
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
|
150 |
-
else:
|
151 |
-
self.class_embedding = None
|
152 |
-
|
153 |
-
self.down_blocks = nn.ModuleList([])
|
154 |
-
self.mid_block = None
|
155 |
-
self.up_blocks = nn.ModuleList([])
|
156 |
-
|
157 |
-
# down
|
158 |
-
output_channel = block_out_channels[0]
|
159 |
-
for i, down_block_type in enumerate(down_block_types):
|
160 |
-
input_channel = output_channel
|
161 |
-
output_channel = block_out_channels[i]
|
162 |
-
is_final_block = i == len(block_out_channels) - 1
|
163 |
-
|
164 |
-
down_block = get_down_block(
|
165 |
-
down_block_type,
|
166 |
-
num_layers=layers_per_block,
|
167 |
-
in_channels=input_channel,
|
168 |
-
out_channels=output_channel,
|
169 |
-
temb_channels=time_embed_dim,
|
170 |
-
add_downsample=not is_final_block,
|
171 |
-
resnet_eps=norm_eps,
|
172 |
-
resnet_act_fn=act_fn,
|
173 |
-
resnet_groups=norm_num_groups,
|
174 |
-
attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
|
175 |
-
downsample_padding=downsample_padding,
|
176 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
177 |
-
downsample_type=downsample_type,
|
178 |
-
)
|
179 |
-
self.down_blocks.append(down_block)
|
180 |
-
|
181 |
-
# mid
|
182 |
-
self.mid_block = UNetMidBlock2D(
|
183 |
-
in_channels=block_out_channels[-1],
|
184 |
-
temb_channels=time_embed_dim,
|
185 |
-
resnet_eps=norm_eps,
|
186 |
-
resnet_act_fn=act_fn,
|
187 |
-
output_scale_factor=mid_block_scale_factor,
|
188 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
189 |
-
attention_head_dim=attention_head_dim if attention_head_dim is not None else block_out_channels[-1],
|
190 |
-
resnet_groups=norm_num_groups,
|
191 |
-
add_attention=add_attention,
|
192 |
-
)
|
193 |
-
|
194 |
-
# up
|
195 |
-
reversed_block_out_channels = list(reversed(block_out_channels))
|
196 |
-
output_channel = reversed_block_out_channels[0]
|
197 |
-
for i, up_block_type in enumerate(up_block_types):
|
198 |
-
prev_output_channel = output_channel
|
199 |
-
output_channel = reversed_block_out_channels[i]
|
200 |
-
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
|
201 |
-
|
202 |
-
is_final_block = i == len(block_out_channels) - 1
|
203 |
-
|
204 |
-
up_block = get_up_block(
|
205 |
-
up_block_type,
|
206 |
-
num_layers=layers_per_block + 1,
|
207 |
-
in_channels=input_channel,
|
208 |
-
out_channels=output_channel,
|
209 |
-
prev_output_channel=prev_output_channel,
|
210 |
-
temb_channels=time_embed_dim,
|
211 |
-
add_upsample=not is_final_block,
|
212 |
-
resnet_eps=norm_eps,
|
213 |
-
resnet_act_fn=act_fn,
|
214 |
-
resnet_groups=norm_num_groups,
|
215 |
-
attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
|
216 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
217 |
-
upsample_type=upsample_type,
|
218 |
-
)
|
219 |
-
self.up_blocks.append(up_block)
|
220 |
-
prev_output_channel = output_channel
|
221 |
-
|
222 |
-
# out
|
223 |
-
num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
|
224 |
-
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps)
|
225 |
-
self.conv_act = nn.SiLU()
|
226 |
-
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
|
227 |
-
|
228 |
-
def forward(
|
229 |
-
self,
|
230 |
-
sample: torch.FloatTensor,
|
231 |
-
timestep: Union[torch.Tensor, float, int],
|
232 |
-
class_labels: Optional[torch.Tensor] = None,
|
233 |
-
return_dict: bool = True,
|
234 |
-
) -> Union[UNet2DOutput, Tuple]:
|
235 |
-
r"""
|
236 |
-
The [`UNet2DModel`] forward method.
|
237 |
-
|
238 |
-
Args:
|
239 |
-
sample (`torch.FloatTensor`):
|
240 |
-
The noisy input tensor with the following shape `(batch, channel, height, width)`.
|
241 |
-
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
|
242 |
-
class_labels (`torch.FloatTensor`, *optional*, defaults to `None`):
|
243 |
-
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
|
244 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
245 |
-
Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.
|
246 |
-
|
247 |
-
Returns:
|
248 |
-
[`~models.unet_2d.UNet2DOutput`] or `tuple`:
|
249 |
-
If `return_dict` is True, an [`~models.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is
|
250 |
-
returned where the first element is the sample tensor.
|
251 |
-
"""
|
252 |
-
# 0. center input if necessary
|
253 |
-
if self.config.center_input_sample:
|
254 |
-
sample = 2 * sample - 1.0
|
255 |
-
|
256 |
-
# 1. time
|
257 |
-
timesteps = timestep
|
258 |
-
if not torch.is_tensor(timesteps):
|
259 |
-
timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
|
260 |
-
elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
|
261 |
-
timesteps = timesteps[None].to(sample.device)
|
262 |
-
|
263 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
264 |
-
timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device)
|
265 |
-
|
266 |
-
t_emb = self.time_proj(timesteps)
|
267 |
-
|
268 |
-
# timesteps does not contain any weights and will always return f32 tensors
|
269 |
-
# but time_embedding might actually be running in fp16. so we need to cast here.
|
270 |
-
# there might be better ways to encapsulate this.
|
271 |
-
t_emb = t_emb.to(dtype=self.dtype)
|
272 |
-
emb = self.time_embedding(t_emb)
|
273 |
-
|
274 |
-
if self.class_embedding is not None:
|
275 |
-
if class_labels is None:
|
276 |
-
raise ValueError("class_labels should be provided when doing class conditioning")
|
277 |
-
|
278 |
-
if self.config.class_embed_type == "timestep":
|
279 |
-
class_labels = self.time_proj(class_labels)
|
280 |
-
|
281 |
-
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
|
282 |
-
emb = emb + class_emb
|
283 |
-
|
284 |
-
# 2. pre-process
|
285 |
-
skip_sample = sample
|
286 |
-
sample = self.conv_in(sample)
|
287 |
-
|
288 |
-
# 3. down
|
289 |
-
down_block_res_samples = (sample,)
|
290 |
-
for downsample_block in self.down_blocks:
|
291 |
-
if hasattr(downsample_block, "skip_conv"):
|
292 |
-
sample, res_samples, skip_sample = downsample_block(
|
293 |
-
hidden_states=sample, temb=emb, skip_sample=skip_sample
|
294 |
-
)
|
295 |
-
else:
|
296 |
-
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
297 |
-
|
298 |
-
down_block_res_samples += res_samples
|
299 |
-
|
300 |
-
# 4. mid
|
301 |
-
sample = self.mid_block(sample, emb)
|
302 |
-
|
303 |
-
# 5. up
|
304 |
-
skip_sample = None
|
305 |
-
for upsample_block in self.up_blocks:
|
306 |
-
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
307 |
-
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
308 |
-
|
309 |
-
if hasattr(upsample_block, "skip_conv"):
|
310 |
-
sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)
|
311 |
-
else:
|
312 |
-
sample = upsample_block(sample, res_samples, emb)
|
313 |
-
|
314 |
-
# 6. post-process
|
315 |
-
sample = self.conv_norm_out(sample)
|
316 |
-
sample = self.conv_act(sample)
|
317 |
-
sample = self.conv_out(sample)
|
318 |
-
|
319 |
-
if skip_sample is not None:
|
320 |
-
sample += skip_sample
|
321 |
-
|
322 |
-
if self.config.time_embedding_type == "fourier":
|
323 |
-
timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:]))))
|
324 |
-
sample = sample / timesteps
|
325 |
-
|
326 |
-
if not return_dict:
|
327 |
-
return (sample,)
|
328 |
-
|
329 |
-
return UNet2DOutput(sample=sample)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/evaluations/inception.py
DELETED
@@ -1,322 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import torchvision
|
5 |
-
|
6 |
-
try:
|
7 |
-
from torchvision.models.utils import load_state_dict_from_url
|
8 |
-
except ImportError:
|
9 |
-
from torch.utils.model_zoo import load_url as load_state_dict_from_url
|
10 |
-
|
11 |
-
# Inception weights ported to Pytorch from
|
12 |
-
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
|
13 |
-
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
|
14 |
-
|
15 |
-
|
16 |
-
class InceptionV3(nn.Module):
|
17 |
-
"""Pretrained InceptionV3 network returning feature maps"""
|
18 |
-
|
19 |
-
# Index of default block of inception to return,
|
20 |
-
# corresponds to output of final average pooling
|
21 |
-
DEFAULT_BLOCK_INDEX = 3
|
22 |
-
|
23 |
-
# Maps feature dimensionality to their output blocks indices
|
24 |
-
BLOCK_INDEX_BY_DIM = {
|
25 |
-
64: 0, # First max pooling features
|
26 |
-
192: 1, # Second max pooling featurs
|
27 |
-
768: 2, # Pre-aux classifier features
|
28 |
-
2048: 3 # Final average pooling features
|
29 |
-
}
|
30 |
-
|
31 |
-
def __init__(self,
|
32 |
-
output_blocks=[DEFAULT_BLOCK_INDEX],
|
33 |
-
resize_input=True,
|
34 |
-
normalize_input=True,
|
35 |
-
requires_grad=False,
|
36 |
-
use_fid_inception=True):
|
37 |
-
"""Build pretrained InceptionV3
|
38 |
-
Parameters
|
39 |
-
----------
|
40 |
-
output_blocks : list of int
|
41 |
-
Indices of blocks to return features of. Possible values are:
|
42 |
-
- 0: corresponds to output of first max pooling
|
43 |
-
- 1: corresponds to output of second max pooling
|
44 |
-
- 2: corresponds to output which is fed to aux classifier
|
45 |
-
- 3: corresponds to output of final average pooling
|
46 |
-
resize_input : bool
|
47 |
-
If true, bilinearly resizes input to width and height 299 before
|
48 |
-
feeding input to model. As the network without fully connected
|
49 |
-
layers is fully convolutional, it should be able to handle inputs
|
50 |
-
of arbitrary size, so resizing might not be strictly needed
|
51 |
-
normalize_input : bool
|
52 |
-
If true, scales the input from range (0, 1) to the range the
|
53 |
-
pretrained Inception network expects, namely (-1, 1)
|
54 |
-
requires_grad : bool
|
55 |
-
If true, parameters of the model require gradients. Possibly useful
|
56 |
-
for finetuning the network
|
57 |
-
use_fid_inception : bool
|
58 |
-
If true, uses the pretrained Inception model used in Tensorflow's
|
59 |
-
FID implementation. If false, uses the pretrained Inception model
|
60 |
-
available in torchvision. The FID Inception model has different
|
61 |
-
weights and a slightly different structure from torchvision's
|
62 |
-
Inception model. If you want to compute FID scores, you are
|
63 |
-
strongly advised to set this parameter to true to get comparable
|
64 |
-
results.
|
65 |
-
"""
|
66 |
-
super(InceptionV3, self).__init__()
|
67 |
-
|
68 |
-
self.resize_input = resize_input
|
69 |
-
self.normalize_input = normalize_input
|
70 |
-
self.output_blocks = sorted(output_blocks)
|
71 |
-
self.last_needed_block = max(output_blocks)
|
72 |
-
|
73 |
-
assert self.last_needed_block <= 3, \
|
74 |
-
'Last possible output block index is 3'
|
75 |
-
|
76 |
-
self.blocks = nn.ModuleList()
|
77 |
-
|
78 |
-
if use_fid_inception:
|
79 |
-
inception = fid_inception_v3()
|
80 |
-
else:
|
81 |
-
inception = _inception_v3(pretrained=True)
|
82 |
-
|
83 |
-
# Block 0: input to maxpool1
|
84 |
-
block0 = [
|
85 |
-
inception.Conv2d_1a_3x3,
|
86 |
-
inception.Conv2d_2a_3x3,
|
87 |
-
inception.Conv2d_2b_3x3,
|
88 |
-
nn.MaxPool2d(kernel_size=3, stride=2)
|
89 |
-
]
|
90 |
-
self.blocks.append(nn.Sequential(*block0))
|
91 |
-
|
92 |
-
# Block 1: maxpool1 to maxpool2
|
93 |
-
if self.last_needed_block >= 1:
|
94 |
-
block1 = [
|
95 |
-
inception.Conv2d_3b_1x1,
|
96 |
-
inception.Conv2d_4a_3x3,
|
97 |
-
nn.MaxPool2d(kernel_size=3, stride=2)
|
98 |
-
]
|
99 |
-
self.blocks.append(nn.Sequential(*block1))
|
100 |
-
|
101 |
-
# Block 2: maxpool2 to aux classifier
|
102 |
-
if self.last_needed_block >= 2:
|
103 |
-
block2 = [
|
104 |
-
inception.Mixed_5b,
|
105 |
-
inception.Mixed_5c,
|
106 |
-
inception.Mixed_5d,
|
107 |
-
inception.Mixed_6a,
|
108 |
-
inception.Mixed_6b,
|
109 |
-
inception.Mixed_6c,
|
110 |
-
inception.Mixed_6d,
|
111 |
-
inception.Mixed_6e,
|
112 |
-
]
|
113 |
-
self.blocks.append(nn.Sequential(*block2))
|
114 |
-
|
115 |
-
# Block 3: aux classifier to final avgpool
|
116 |
-
if self.last_needed_block >= 3:
|
117 |
-
block3 = [
|
118 |
-
inception.Mixed_7a,
|
119 |
-
inception.Mixed_7b,
|
120 |
-
inception.Mixed_7c,
|
121 |
-
nn.AdaptiveAvgPool2d(output_size=(1, 1))
|
122 |
-
]
|
123 |
-
self.blocks.append(nn.Sequential(*block3))
|
124 |
-
|
125 |
-
for param in self.parameters():
|
126 |
-
param.requires_grad = requires_grad
|
127 |
-
|
128 |
-
def forward(self, inp):
|
129 |
-
"""Get Inception feature maps
|
130 |
-
Parameters
|
131 |
-
----------
|
132 |
-
inp : torch.autograd.Variable
|
133 |
-
Input tensor of shape Bx3xHxW. Values are expected to be in
|
134 |
-
range (0, 1)
|
135 |
-
Returns
|
136 |
-
-------
|
137 |
-
List of torch.autograd.Variable, corresponding to the selected output
|
138 |
-
block, sorted ascending by index
|
139 |
-
"""
|
140 |
-
outp = []
|
141 |
-
x = inp
|
142 |
-
|
143 |
-
if self.resize_input:
|
144 |
-
x = F.interpolate(x,
|
145 |
-
size=(299, 299),
|
146 |
-
mode='bilinear',
|
147 |
-
align_corners=False)
|
148 |
-
|
149 |
-
if self.normalize_input:
|
150 |
-
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
|
151 |
-
|
152 |
-
for idx, block in enumerate(self.blocks):
|
153 |
-
x = block(x)
|
154 |
-
if idx in self.output_blocks:
|
155 |
-
outp.append(x)
|
156 |
-
|
157 |
-
if idx == self.last_needed_block:
|
158 |
-
break
|
159 |
-
|
160 |
-
return outp
|
161 |
-
|
162 |
-
|
163 |
-
def _inception_v3(*args, **kwargs):
|
164 |
-
"""Wraps `torchvision.models.inception_v3`
|
165 |
-
Skips default weight inititialization if supported by torchvision version.
|
166 |
-
See https://github.com/mseitzer/pytorch-fid/issues/28.
|
167 |
-
"""
|
168 |
-
try:
|
169 |
-
version = tuple(map(int, torchvision.__version__.split('.')[:2]))
|
170 |
-
except ValueError:
|
171 |
-
# Just a caution against weird version strings
|
172 |
-
version = (0,)
|
173 |
-
|
174 |
-
if version >= (0, 6):
|
175 |
-
kwargs['init_weights'] = False
|
176 |
-
|
177 |
-
return torchvision.models.inception_v3(*args, **kwargs)
|
178 |
-
|
179 |
-
|
180 |
-
def fid_inception_v3():
|
181 |
-
"""Build pretrained Inception model for FID computation
|
182 |
-
The Inception model for FID computation uses a different set of weights
|
183 |
-
and has a slightly different structure than torchvision's Inception.
|
184 |
-
This method first constructs torchvision's Inception and then patches the
|
185 |
-
necessary parts that are different in the FID Inception model.
|
186 |
-
"""
|
187 |
-
inception = _inception_v3(num_classes=1008,
|
188 |
-
aux_logits=False,
|
189 |
-
pretrained=False)
|
190 |
-
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
|
191 |
-
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
|
192 |
-
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
|
193 |
-
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
|
194 |
-
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
|
195 |
-
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
|
196 |
-
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
|
197 |
-
inception.Mixed_7b = FIDInceptionE_1(1280)
|
198 |
-
inception.Mixed_7c = FIDInceptionE_2(2048)
|
199 |
-
|
200 |
-
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
|
201 |
-
inception.load_state_dict(state_dict)
|
202 |
-
return inception
|
203 |
-
|
204 |
-
|
205 |
-
class FIDInceptionA(torchvision.models.inception.InceptionA):
|
206 |
-
"""InceptionA block patched for FID computation"""
|
207 |
-
def __init__(self, in_channels, pool_features):
|
208 |
-
super(FIDInceptionA, self).__init__(in_channels, pool_features)
|
209 |
-
|
210 |
-
def forward(self, x):
|
211 |
-
branch1x1 = self.branch1x1(x)
|
212 |
-
|
213 |
-
branch5x5 = self.branch5x5_1(x)
|
214 |
-
branch5x5 = self.branch5x5_2(branch5x5)
|
215 |
-
|
216 |
-
branch3x3dbl = self.branch3x3dbl_1(x)
|
217 |
-
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
|
218 |
-
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
|
219 |
-
|
220 |
-
# Patch: Tensorflow's average pool does not use the padded zero's in
|
221 |
-
# its average calculation
|
222 |
-
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
|
223 |
-
count_include_pad=False)
|
224 |
-
branch_pool = self.branch_pool(branch_pool)
|
225 |
-
|
226 |
-
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
|
227 |
-
return torch.cat(outputs, 1)
|
228 |
-
|
229 |
-
|
230 |
-
class FIDInceptionC(torchvision.models.inception.InceptionC):
|
231 |
-
"""InceptionC block patched for FID computation"""
|
232 |
-
def __init__(self, in_channels, channels_7x7):
|
233 |
-
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
|
234 |
-
|
235 |
-
def forward(self, x):
|
236 |
-
branch1x1 = self.branch1x1(x)
|
237 |
-
|
238 |
-
branch7x7 = self.branch7x7_1(x)
|
239 |
-
branch7x7 = self.branch7x7_2(branch7x7)
|
240 |
-
branch7x7 = self.branch7x7_3(branch7x7)
|
241 |
-
|
242 |
-
branch7x7dbl = self.branch7x7dbl_1(x)
|
243 |
-
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
|
244 |
-
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
|
245 |
-
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
|
246 |
-
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
|
247 |
-
|
248 |
-
# Patch: Tensorflow's average pool does not use the padded zero's in
|
249 |
-
# its average calculation
|
250 |
-
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
|
251 |
-
count_include_pad=False)
|
252 |
-
branch_pool = self.branch_pool(branch_pool)
|
253 |
-
|
254 |
-
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
|
255 |
-
return torch.cat(outputs, 1)
|
256 |
-
|
257 |
-
|
258 |
-
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
|
259 |
-
"""First InceptionE block patched for FID computation"""
|
260 |
-
def __init__(self, in_channels):
|
261 |
-
super(FIDInceptionE_1, self).__init__(in_channels)
|
262 |
-
|
263 |
-
def forward(self, x):
|
264 |
-
branch1x1 = self.branch1x1(x)
|
265 |
-
|
266 |
-
branch3x3 = self.branch3x3_1(x)
|
267 |
-
branch3x3 = [
|
268 |
-
self.branch3x3_2a(branch3x3),
|
269 |
-
self.branch3x3_2b(branch3x3),
|
270 |
-
]
|
271 |
-
branch3x3 = torch.cat(branch3x3, 1)
|
272 |
-
|
273 |
-
branch3x3dbl = self.branch3x3dbl_1(x)
|
274 |
-
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
|
275 |
-
branch3x3dbl = [
|
276 |
-
self.branch3x3dbl_3a(branch3x3dbl),
|
277 |
-
self.branch3x3dbl_3b(branch3x3dbl),
|
278 |
-
]
|
279 |
-
branch3x3dbl = torch.cat(branch3x3dbl, 1)
|
280 |
-
|
281 |
-
# Patch: Tensorflow's average pool does not use the padded zero's in
|
282 |
-
# its average calculation
|
283 |
-
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
|
284 |
-
count_include_pad=False)
|
285 |
-
branch_pool = self.branch_pool(branch_pool)
|
286 |
-
|
287 |
-
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
|
288 |
-
return torch.cat(outputs, 1)
|
289 |
-
|
290 |
-
|
291 |
-
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
|
292 |
-
"""Second InceptionE block patched for FID computation"""
|
293 |
-
def __init__(self, in_channels):
|
294 |
-
super(FIDInceptionE_2, self).__init__(in_channels)
|
295 |
-
|
296 |
-
def forward(self, x):
|
297 |
-
branch1x1 = self.branch1x1(x)
|
298 |
-
|
299 |
-
branch3x3 = self.branch3x3_1(x)
|
300 |
-
branch3x3 = [
|
301 |
-
self.branch3x3_2a(branch3x3),
|
302 |
-
self.branch3x3_2b(branch3x3),
|
303 |
-
]
|
304 |
-
branch3x3 = torch.cat(branch3x3, 1)
|
305 |
-
|
306 |
-
branch3x3dbl = self.branch3x3dbl_1(x)
|
307 |
-
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
|
308 |
-
branch3x3dbl = [
|
309 |
-
self.branch3x3dbl_3a(branch3x3dbl),
|
310 |
-
self.branch3x3dbl_3b(branch3x3dbl),
|
311 |
-
]
|
312 |
-
branch3x3dbl = torch.cat(branch3x3dbl, 1)
|
313 |
-
|
314 |
-
# Patch: The FID Inception model uses max pooling instead of average
|
315 |
-
# pooling. This is likely an error in this specific Inception
|
316 |
-
# implementation, as other Inception models use average pooling here
|
317 |
-
# (which matches the description in the paper).
|
318 |
-
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
|
319 |
-
branch_pool = self.branch_pool(branch_pool)
|
320 |
-
|
321 |
-
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
|
322 |
-
return torch.cat(outputs, 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aravindsssss/GradiolangchainChatBoatOpenAI/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: GradiolangchainChatBoatOpenAI
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/certifi/__main__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
|
3 |
-
from pip._vendor.certifi import contents, where
|
4 |
-
|
5 |
-
parser = argparse.ArgumentParser()
|
6 |
-
parser.add_argument("-c", "--contents", action="store_true")
|
7 |
-
args = parser.parse_args()
|
8 |
-
|
9 |
-
if args.contents:
|
10 |
-
print(contents())
|
11 |
-
else:
|
12 |
-
print(where())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/wait.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
import errno
|
2 |
-
import select
|
3 |
-
import sys
|
4 |
-
from functools import partial
|
5 |
-
|
6 |
-
try:
|
7 |
-
from time import monotonic
|
8 |
-
except ImportError:
|
9 |
-
from time import time as monotonic
|
10 |
-
|
11 |
-
__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
|
12 |
-
|
13 |
-
|
14 |
-
class NoWayToWaitForSocketError(Exception):
|
15 |
-
pass
|
16 |
-
|
17 |
-
|
18 |
-
# How should we wait on sockets?
|
19 |
-
#
|
20 |
-
# There are two types of APIs you can use for waiting on sockets: the fancy
|
21 |
-
# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
|
22 |
-
# select/poll. The stateful APIs are more efficient when you have a lots of
|
23 |
-
# sockets to keep track of, because you can set them up once and then use them
|
24 |
-
# lots of times. But we only ever want to wait on a single socket at a time
|
25 |
-
# and don't want to keep track of state, so the stateless APIs are actually
|
26 |
-
# more efficient. So we want to use select() or poll().
|
27 |
-
#
|
28 |
-
# Now, how do we choose between select() and poll()? On traditional Unixes,
|
29 |
-
# select() has a strange calling convention that makes it slow, or fail
|
30 |
-
# altogether, for high-numbered file descriptors. The point of poll() is to fix
|
31 |
-
# that, so on Unixes, we prefer poll().
|
32 |
-
#
|
33 |
-
# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
|
34 |
-
# for it), but that's OK, because on Windows, select() doesn't have this
|
35 |
-
# strange calling convention; plain select() works fine.
|
36 |
-
#
|
37 |
-
# So: on Windows we use select(), and everywhere else we use poll(). We also
|
38 |
-
# fall back to select() in case poll() is somehow broken or missing.
|
39 |
-
|
40 |
-
if sys.version_info >= (3, 5):
|
41 |
-
# Modern Python, that retries syscalls by default
|
42 |
-
def _retry_on_intr(fn, timeout):
|
43 |
-
return fn(timeout)
|
44 |
-
|
45 |
-
else:
|
46 |
-
# Old and broken Pythons.
|
47 |
-
def _retry_on_intr(fn, timeout):
|
48 |
-
if timeout is None:
|
49 |
-
deadline = float("inf")
|
50 |
-
else:
|
51 |
-
deadline = monotonic() + timeout
|
52 |
-
|
53 |
-
while True:
|
54 |
-
try:
|
55 |
-
return fn(timeout)
|
56 |
-
# OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
|
57 |
-
except (OSError, select.error) as e:
|
58 |
-
# 'e.args[0]' incantation works for both OSError and select.error
|
59 |
-
if e.args[0] != errno.EINTR:
|
60 |
-
raise
|
61 |
-
else:
|
62 |
-
timeout = deadline - monotonic()
|
63 |
-
if timeout < 0:
|
64 |
-
timeout = 0
|
65 |
-
if timeout == float("inf"):
|
66 |
-
timeout = None
|
67 |
-
continue
|
68 |
-
|
69 |
-
|
70 |
-
def select_wait_for_socket(sock, read=False, write=False, timeout=None):
|
71 |
-
if not read and not write:
|
72 |
-
raise RuntimeError("must specify at least one of read=True, write=True")
|
73 |
-
rcheck = []
|
74 |
-
wcheck = []
|
75 |
-
if read:
|
76 |
-
rcheck.append(sock)
|
77 |
-
if write:
|
78 |
-
wcheck.append(sock)
|
79 |
-
# When doing a non-blocking connect, most systems signal success by
|
80 |
-
# marking the socket writable. Windows, though, signals success by marked
|
81 |
-
# it as "exceptional". We paper over the difference by checking the write
|
82 |
-
# sockets for both conditions. (The stdlib selectors module does the same
|
83 |
-
# thing.)
|
84 |
-
fn = partial(select.select, rcheck, wcheck, wcheck)
|
85 |
-
rready, wready, xready = _retry_on_intr(fn, timeout)
|
86 |
-
return bool(rready or wready or xready)
|
87 |
-
|
88 |
-
|
89 |
-
def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
|
90 |
-
if not read and not write:
|
91 |
-
raise RuntimeError("must specify at least one of read=True, write=True")
|
92 |
-
mask = 0
|
93 |
-
if read:
|
94 |
-
mask |= select.POLLIN
|
95 |
-
if write:
|
96 |
-
mask |= select.POLLOUT
|
97 |
-
poll_obj = select.poll()
|
98 |
-
poll_obj.register(sock, mask)
|
99 |
-
|
100 |
-
# For some reason, poll() takes timeout in milliseconds
|
101 |
-
def do_poll(t):
|
102 |
-
if t is not None:
|
103 |
-
t *= 1000
|
104 |
-
return poll_obj.poll(t)
|
105 |
-
|
106 |
-
return bool(_retry_on_intr(do_poll, timeout))
|
107 |
-
|
108 |
-
|
109 |
-
def null_wait_for_socket(*args, **kwargs):
|
110 |
-
raise NoWayToWaitForSocketError("no select-equivalent available")
|
111 |
-
|
112 |
-
|
113 |
-
def _have_working_poll():
|
114 |
-
# Apparently some systems have a select.poll that fails as soon as you try
|
115 |
-
# to use it, either due to strange configuration or broken monkeypatching
|
116 |
-
# from libraries like eventlet/greenlet.
|
117 |
-
try:
|
118 |
-
poll_obj = select.poll()
|
119 |
-
_retry_on_intr(poll_obj.poll, 0)
|
120 |
-
except (AttributeError, OSError):
|
121 |
-
return False
|
122 |
-
else:
|
123 |
-
return True
|
124 |
-
|
125 |
-
|
126 |
-
def wait_for_socket(*args, **kwargs):
|
127 |
-
# We delay choosing which implementation to use until the first time we're
|
128 |
-
# called. We could do it at import time, but then we might make the wrong
|
129 |
-
# decision if someone goes wild with monkeypatching select.poll after
|
130 |
-
# we're imported.
|
131 |
-
global wait_for_socket
|
132 |
-
if _have_working_poll():
|
133 |
-
wait_for_socket = poll_wait_for_socket
|
134 |
-
elif hasattr(select, "select"):
|
135 |
-
wait_for_socket = select_wait_for_socket
|
136 |
-
else: # Platform-specific: Appengine.
|
137 |
-
wait_for_socket = null_wait_for_socket
|
138 |
-
return wait_for_socket(*args, **kwargs)
|
139 |
-
|
140 |
-
|
141 |
-
def wait_for_read(sock, timeout=None):
|
142 |
-
"""Waits for reading to be available on a given socket.
|
143 |
-
Returns True if the socket is readable, or False if the timeout expired.
|
144 |
-
"""
|
145 |
-
return wait_for_socket(sock, read=True, timeout=timeout)
|
146 |
-
|
147 |
-
|
148 |
-
def wait_for_write(sock, timeout=None):
|
149 |
-
"""Waits for writing to be available on a given socket.
|
150 |
-
Returns True if the socket is readable, or False if the timeout expired.
|
151 |
-
"""
|
152 |
-
return wait_for_socket(sock, write=True, timeout=timeout)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_rpm.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import distutils.command.bdist_rpm as orig
|
2 |
-
import warnings
|
3 |
-
|
4 |
-
from setuptools import SetuptoolsDeprecationWarning
|
5 |
-
|
6 |
-
|
7 |
-
class bdist_rpm(orig.bdist_rpm):
|
8 |
-
"""
|
9 |
-
Override the default bdist_rpm behavior to do the following:
|
10 |
-
|
11 |
-
1. Run egg_info to ensure the name and version are properly calculated.
|
12 |
-
2. Always run 'install' using --single-version-externally-managed to
|
13 |
-
disable eggs in RPM distributions.
|
14 |
-
"""
|
15 |
-
|
16 |
-
def run(self):
|
17 |
-
warnings.warn(
|
18 |
-
"bdist_rpm is deprecated and will be removed in a future "
|
19 |
-
"version. Use bdist_wheel (wheel packages) instead.",
|
20 |
-
SetuptoolsDeprecationWarning,
|
21 |
-
)
|
22 |
-
|
23 |
-
# ensure distro name is up-to-date
|
24 |
-
self.run_command('egg_info')
|
25 |
-
|
26 |
-
orig.bdist_rpm.run(self)
|
27 |
-
|
28 |
-
def _make_spec_file(self):
|
29 |
-
spec = orig.bdist_rpm._make_spec_file(self)
|
30 |
-
spec = [
|
31 |
-
line.replace(
|
32 |
-
"setup.py install ",
|
33 |
-
"setup.py install --single-version-externally-managed "
|
34 |
-
).replace(
|
35 |
-
"%setup",
|
36 |
-
"%setup -n %{name}-%{unmangled_version}"
|
37 |
-
)
|
38 |
-
for line in spec
|
39 |
-
]
|
40 |
-
return spec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin.py
DELETED
@@ -1,259 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
|
5 |
-
"""
|
6 |
-
This file registers pre-defined datasets at hard-coded paths, and their metadata.
|
7 |
-
|
8 |
-
We hard-code metadata for common datasets. This will enable:
|
9 |
-
1. Consistency check when loading the datasets
|
10 |
-
2. Use models on these standard datasets directly and run demos,
|
11 |
-
without having to download the dataset annotations
|
12 |
-
|
13 |
-
We hard-code some paths to the dataset that's assumed to
|
14 |
-
exist in "./datasets/".
|
15 |
-
|
16 |
-
Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
|
17 |
-
To add new dataset, refer to the tutorial "docs/DATASETS.md".
|
18 |
-
"""
|
19 |
-
|
20 |
-
import os
|
21 |
-
|
22 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog
|
23 |
-
|
24 |
-
from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata
|
25 |
-
from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
|
26 |
-
from .cityscapes_panoptic import register_all_cityscapes_panoptic
|
27 |
-
from .coco import load_sem_seg, register_coco_instances
|
28 |
-
from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
|
29 |
-
from .lvis import get_lvis_instances_meta, register_lvis_instances
|
30 |
-
from .pascal_voc import register_pascal_voc
|
31 |
-
|
32 |
-
# ==== Predefined datasets and splits for COCO ==========
|
33 |
-
|
34 |
-
_PREDEFINED_SPLITS_COCO = {}
|
35 |
-
_PREDEFINED_SPLITS_COCO["coco"] = {
|
36 |
-
"coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
|
37 |
-
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
|
38 |
-
"coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
|
39 |
-
"coco_2014_valminusminival": (
|
40 |
-
"coco/val2014",
|
41 |
-
"coco/annotations/instances_valminusminival2014.json",
|
42 |
-
),
|
43 |
-
"coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"),
|
44 |
-
"coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"),
|
45 |
-
"coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
|
46 |
-
"coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
|
47 |
-
"coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
|
48 |
-
}
|
49 |
-
|
50 |
-
_PREDEFINED_SPLITS_COCO["coco_person"] = {
|
51 |
-
"keypoints_coco_2014_train": (
|
52 |
-
"coco/train2014",
|
53 |
-
"coco/annotations/person_keypoints_train2014.json",
|
54 |
-
),
|
55 |
-
"keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
|
56 |
-
"keypoints_coco_2014_minival": (
|
57 |
-
"coco/val2014",
|
58 |
-
"coco/annotations/person_keypoints_minival2014.json",
|
59 |
-
),
|
60 |
-
"keypoints_coco_2014_valminusminival": (
|
61 |
-
"coco/val2014",
|
62 |
-
"coco/annotations/person_keypoints_valminusminival2014.json",
|
63 |
-
),
|
64 |
-
"keypoints_coco_2017_train": (
|
65 |
-
"coco/train2017",
|
66 |
-
"coco/annotations/person_keypoints_train2017.json",
|
67 |
-
),
|
68 |
-
"keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
|
69 |
-
"keypoints_coco_2017_val_100": (
|
70 |
-
"coco/val2017",
|
71 |
-
"coco/annotations/person_keypoints_val2017_100.json",
|
72 |
-
),
|
73 |
-
}
|
74 |
-
|
75 |
-
|
76 |
-
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
|
77 |
-
"coco_2017_train_panoptic": (
|
78 |
-
# This is the original panoptic annotation directory
|
79 |
-
"coco/panoptic_train2017",
|
80 |
-
"coco/annotations/panoptic_train2017.json",
|
81 |
-
# This directory contains semantic annotations that are
|
82 |
-
# converted from panoptic annotations.
|
83 |
-
# It is used by PanopticFPN.
|
84 |
-
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
|
85 |
-
# to create these directories.
|
86 |
-
"coco/panoptic_stuff_train2017",
|
87 |
-
),
|
88 |
-
"coco_2017_val_panoptic": (
|
89 |
-
"coco/panoptic_val2017",
|
90 |
-
"coco/annotations/panoptic_val2017.json",
|
91 |
-
"coco/panoptic_stuff_val2017",
|
92 |
-
),
|
93 |
-
"coco_2017_val_100_panoptic": (
|
94 |
-
"coco/panoptic_val2017_100",
|
95 |
-
"coco/annotations/panoptic_val2017_100.json",
|
96 |
-
"coco/panoptic_stuff_val2017_100",
|
97 |
-
),
|
98 |
-
}
|
99 |
-
|
100 |
-
|
101 |
-
def register_all_coco(root):
|
102 |
-
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
|
103 |
-
for key, (image_root, json_file) in splits_per_dataset.items():
|
104 |
-
# Assume pre-defined datasets live in `./datasets`.
|
105 |
-
register_coco_instances(
|
106 |
-
key,
|
107 |
-
_get_builtin_metadata(dataset_name),
|
108 |
-
os.path.join(root, json_file) if "://" not in json_file else json_file,
|
109 |
-
os.path.join(root, image_root),
|
110 |
-
)
|
111 |
-
|
112 |
-
for (
|
113 |
-
prefix,
|
114 |
-
(panoptic_root, panoptic_json, semantic_root),
|
115 |
-
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
|
116 |
-
prefix_instances = prefix[: -len("_panoptic")]
|
117 |
-
instances_meta = MetadataCatalog.get(prefix_instances)
|
118 |
-
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
|
119 |
-
# The "separated" version of COCO panoptic segmentation dataset,
|
120 |
-
# e.g. used by Panoptic FPN
|
121 |
-
register_coco_panoptic_separated(
|
122 |
-
prefix,
|
123 |
-
_get_builtin_metadata("coco_panoptic_separated"),
|
124 |
-
image_root,
|
125 |
-
os.path.join(root, panoptic_root),
|
126 |
-
os.path.join(root, panoptic_json),
|
127 |
-
os.path.join(root, semantic_root),
|
128 |
-
instances_json,
|
129 |
-
)
|
130 |
-
# The "standard" version of COCO panoptic segmentation dataset,
|
131 |
-
# e.g. used by Panoptic-DeepLab
|
132 |
-
register_coco_panoptic(
|
133 |
-
prefix,
|
134 |
-
_get_builtin_metadata("coco_panoptic_standard"),
|
135 |
-
image_root,
|
136 |
-
os.path.join(root, panoptic_root),
|
137 |
-
os.path.join(root, panoptic_json),
|
138 |
-
instances_json,
|
139 |
-
)
|
140 |
-
|
141 |
-
|
142 |
-
# ==== Predefined datasets and splits for LVIS ==========
|
143 |
-
|
144 |
-
|
145 |
-
_PREDEFINED_SPLITS_LVIS = {
|
146 |
-
"lvis_v1": {
|
147 |
-
"lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"),
|
148 |
-
"lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"),
|
149 |
-
"lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"),
|
150 |
-
"lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"),
|
151 |
-
},
|
152 |
-
"lvis_v0.5": {
|
153 |
-
"lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"),
|
154 |
-
"lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"),
|
155 |
-
"lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"),
|
156 |
-
"lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"),
|
157 |
-
},
|
158 |
-
"lvis_v0.5_cocofied": {
|
159 |
-
"lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"),
|
160 |
-
"lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"),
|
161 |
-
},
|
162 |
-
}
|
163 |
-
|
164 |
-
|
165 |
-
def register_all_lvis(root):
|
166 |
-
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
|
167 |
-
for key, (image_root, json_file) in splits_per_dataset.items():
|
168 |
-
register_lvis_instances(
|
169 |
-
key,
|
170 |
-
get_lvis_instances_meta(dataset_name),
|
171 |
-
os.path.join(root, json_file) if "://" not in json_file else json_file,
|
172 |
-
os.path.join(root, image_root),
|
173 |
-
)
|
174 |
-
|
175 |
-
|
176 |
-
# ==== Predefined splits for raw cityscapes images ===========
|
177 |
-
_RAW_CITYSCAPES_SPLITS = {
|
178 |
-
"cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"),
|
179 |
-
"cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"),
|
180 |
-
"cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"),
|
181 |
-
}
|
182 |
-
|
183 |
-
|
184 |
-
def register_all_cityscapes(root):
|
185 |
-
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
|
186 |
-
meta = _get_builtin_metadata("cityscapes")
|
187 |
-
image_dir = os.path.join(root, image_dir)
|
188 |
-
gt_dir = os.path.join(root, gt_dir)
|
189 |
-
|
190 |
-
inst_key = key.format(task="instance_seg")
|
191 |
-
DatasetCatalog.register(
|
192 |
-
inst_key,
|
193 |
-
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
|
194 |
-
x, y, from_json=True, to_polygons=True
|
195 |
-
),
|
196 |
-
)
|
197 |
-
MetadataCatalog.get(inst_key).set(
|
198 |
-
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
|
199 |
-
)
|
200 |
-
|
201 |
-
sem_key = key.format(task="sem_seg")
|
202 |
-
DatasetCatalog.register(
|
203 |
-
sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
|
204 |
-
)
|
205 |
-
MetadataCatalog.get(sem_key).set(
|
206 |
-
image_dir=image_dir,
|
207 |
-
gt_dir=gt_dir,
|
208 |
-
evaluator_type="cityscapes_sem_seg",
|
209 |
-
ignore_label=255,
|
210 |
-
**meta,
|
211 |
-
)
|
212 |
-
|
213 |
-
|
214 |
-
# ==== Predefined splits for PASCAL VOC ===========
|
215 |
-
def register_all_pascal_voc(root):
|
216 |
-
SPLITS = [
|
217 |
-
("voc_2007_trainval", "VOC2007", "trainval"),
|
218 |
-
("voc_2007_train", "VOC2007", "train"),
|
219 |
-
("voc_2007_val", "VOC2007", "val"),
|
220 |
-
("voc_2007_test", "VOC2007", "test"),
|
221 |
-
("voc_2012_trainval", "VOC2012", "trainval"),
|
222 |
-
("voc_2012_train", "VOC2012", "train"),
|
223 |
-
("voc_2012_val", "VOC2012", "val"),
|
224 |
-
]
|
225 |
-
for name, dirname, split in SPLITS:
|
226 |
-
year = 2007 if "2007" in name else 2012
|
227 |
-
register_pascal_voc(name, os.path.join(root, dirname), split, year)
|
228 |
-
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
|
229 |
-
|
230 |
-
|
231 |
-
def register_all_ade20k(root):
|
232 |
-
root = os.path.join(root, "ADEChallengeData2016")
|
233 |
-
for name, dirname in [("train", "training"), ("val", "validation")]:
|
234 |
-
image_dir = os.path.join(root, "images", dirname)
|
235 |
-
gt_dir = os.path.join(root, "annotations_detectron2", dirname)
|
236 |
-
name = f"ade20k_sem_seg_{name}"
|
237 |
-
DatasetCatalog.register(
|
238 |
-
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
|
239 |
-
)
|
240 |
-
MetadataCatalog.get(name).set(
|
241 |
-
stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:],
|
242 |
-
image_root=image_dir,
|
243 |
-
sem_seg_root=gt_dir,
|
244 |
-
evaluator_type="sem_seg",
|
245 |
-
ignore_label=255,
|
246 |
-
)
|
247 |
-
|
248 |
-
|
249 |
-
# True for open source;
|
250 |
-
# Internally at fb, we register them elsewhere
|
251 |
-
if __name__.endswith(".builtin"):
|
252 |
-
# Assume pre-defined datasets live in `./datasets`.
|
253 |
-
_root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets"))
|
254 |
-
register_all_coco(_root)
|
255 |
-
register_all_lvis(_root)
|
256 |
-
register_all_cityscapes(_root)
|
257 |
-
register_all_cityscapes_panoptic(_root)
|
258 |
-
register_all_pascal_voc(_root)
|
259 |
-
register_all_ade20k(_root)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_engine.py
DELETED
@@ -1,186 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
import json
|
4 |
-
import math
|
5 |
-
import os
|
6 |
-
import tempfile
|
7 |
-
import time
|
8 |
-
import unittest
|
9 |
-
from unittest import mock
|
10 |
-
import torch
|
11 |
-
from fvcore.common.checkpoint import Checkpointer
|
12 |
-
from torch import nn
|
13 |
-
|
14 |
-
from detectron2 import model_zoo
|
15 |
-
from detectron2.config import configurable, get_cfg
|
16 |
-
from detectron2.engine import DefaultTrainer, SimpleTrainer, default_setup, hooks
|
17 |
-
from detectron2.modeling.meta_arch import META_ARCH_REGISTRY
|
18 |
-
from detectron2.utils.events import CommonMetricPrinter, JSONWriter
|
19 |
-
|
20 |
-
|
21 |
-
@META_ARCH_REGISTRY.register()
|
22 |
-
class _SimpleModel(nn.Module):
|
23 |
-
@configurable
|
24 |
-
def __init__(self, sleep_sec=0):
|
25 |
-
super().__init__()
|
26 |
-
self.mod = nn.Linear(10, 20)
|
27 |
-
self.sleep_sec = sleep_sec
|
28 |
-
|
29 |
-
@classmethod
|
30 |
-
def from_config(cls, cfg):
|
31 |
-
return {}
|
32 |
-
|
33 |
-
def forward(self, x):
|
34 |
-
if self.sleep_sec > 0:
|
35 |
-
time.sleep(self.sleep_sec)
|
36 |
-
return {"loss": x.sum() + sum([x.mean() for x in self.parameters()])}
|
37 |
-
|
38 |
-
|
39 |
-
class TestTrainer(unittest.TestCase):
|
40 |
-
def _data_loader(self, device):
|
41 |
-
device = torch.device(device)
|
42 |
-
while True:
|
43 |
-
yield torch.rand(3, 3).to(device)
|
44 |
-
|
45 |
-
def test_simple_trainer(self, device="cpu"):
|
46 |
-
model = _SimpleModel().to(device=device)
|
47 |
-
trainer = SimpleTrainer(
|
48 |
-
model, self._data_loader(device), torch.optim.SGD(model.parameters(), 0.1)
|
49 |
-
)
|
50 |
-
trainer.train(0, 10)
|
51 |
-
|
52 |
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
|
53 |
-
def test_simple_trainer_cuda(self):
|
54 |
-
self.test_simple_trainer(device="cuda")
|
55 |
-
|
56 |
-
def test_writer_hooks(self):
|
57 |
-
model = _SimpleModel(sleep_sec=0.1)
|
58 |
-
trainer = SimpleTrainer(
|
59 |
-
model, self._data_loader("cpu"), torch.optim.SGD(model.parameters(), 0.1)
|
60 |
-
)
|
61 |
-
|
62 |
-
max_iter = 50
|
63 |
-
|
64 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
|
65 |
-
json_file = os.path.join(d, "metrics.json")
|
66 |
-
writers = [CommonMetricPrinter(max_iter), JSONWriter(json_file)]
|
67 |
-
|
68 |
-
trainer.register_hooks(
|
69 |
-
[hooks.EvalHook(0, lambda: {"metric": 100}), hooks.PeriodicWriter(writers)]
|
70 |
-
)
|
71 |
-
with self.assertLogs(writers[0].logger) as logs:
|
72 |
-
trainer.train(0, max_iter)
|
73 |
-
|
74 |
-
with open(json_file, "r") as f:
|
75 |
-
data = [json.loads(line.strip()) for line in f]
|
76 |
-
self.assertEqual([x["iteration"] for x in data], [19, 39, 49, 50])
|
77 |
-
# the eval metric is in the last line with iter 50
|
78 |
-
self.assertIn("metric", data[-1], "Eval metric must be in last line of JSON!")
|
79 |
-
|
80 |
-
# test logged messages from CommonMetricPrinter
|
81 |
-
self.assertEqual(len(logs.output), 3)
|
82 |
-
for log, iter in zip(logs.output, [19, 39, 49]):
|
83 |
-
self.assertIn(f"iter: {iter}", log)
|
84 |
-
|
85 |
-
self.assertIn("eta: 0:00:00", logs.output[-1], "Last ETA must be 0!")
|
86 |
-
|
87 |
-
def test_default_trainer(self):
|
88 |
-
# TODO: this test requires manifold access, so changed device to CPU. see: T88318502
|
89 |
-
cfg = get_cfg()
|
90 |
-
cfg.MODEL.DEVICE = "cpu"
|
91 |
-
cfg.MODEL.META_ARCHITECTURE = "_SimpleModel"
|
92 |
-
cfg.DATASETS.TRAIN = ("coco_2017_val_100",)
|
93 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
|
94 |
-
cfg.OUTPUT_DIR = d
|
95 |
-
trainer = DefaultTrainer(cfg)
|
96 |
-
|
97 |
-
# test property
|
98 |
-
self.assertIs(trainer.model, trainer._trainer.model)
|
99 |
-
trainer.model = _SimpleModel()
|
100 |
-
self.assertIs(trainer.model, trainer._trainer.model)
|
101 |
-
|
102 |
-
def test_checkpoint_resume(self):
|
103 |
-
model = _SimpleModel()
|
104 |
-
dataloader = self._data_loader("cpu")
|
105 |
-
opt = torch.optim.SGD(model.parameters(), 0.1)
|
106 |
-
scheduler = torch.optim.lr_scheduler.StepLR(opt, 3)
|
107 |
-
|
108 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
|
109 |
-
trainer = SimpleTrainer(model, dataloader, opt)
|
110 |
-
checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer)
|
111 |
-
|
112 |
-
trainer.register_hooks(
|
113 |
-
[
|
114 |
-
hooks.LRScheduler(scheduler=scheduler),
|
115 |
-
# checkpoint after scheduler to properly save the state of scheduler
|
116 |
-
hooks.PeriodicCheckpointer(checkpointer, 10),
|
117 |
-
]
|
118 |
-
)
|
119 |
-
|
120 |
-
trainer.train(0, 12)
|
121 |
-
self.assertAlmostEqual(opt.param_groups[0]["lr"], 1e-5)
|
122 |
-
self.assertEqual(scheduler.last_epoch, 12)
|
123 |
-
del trainer
|
124 |
-
|
125 |
-
opt = torch.optim.SGD(model.parameters(), 999) # lr will be loaded
|
126 |
-
trainer = SimpleTrainer(model, dataloader, opt)
|
127 |
-
scheduler = torch.optim.lr_scheduler.StepLR(opt, 3)
|
128 |
-
trainer.register_hooks(
|
129 |
-
[
|
130 |
-
hooks.LRScheduler(scheduler=scheduler),
|
131 |
-
]
|
132 |
-
)
|
133 |
-
checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer)
|
134 |
-
checkpointer.resume_or_load("non_exist.pth")
|
135 |
-
self.assertEqual(trainer.iter, 11) # last finished iter number (0-based in Trainer)
|
136 |
-
# number of times `scheduler.step()` was called (1-based)
|
137 |
-
self.assertEqual(scheduler.last_epoch, 12)
|
138 |
-
self.assertAlmostEqual(opt.param_groups[0]["lr"], 1e-5)
|
139 |
-
|
140 |
-
def test_eval_hook(self):
|
141 |
-
model = _SimpleModel()
|
142 |
-
dataloader = self._data_loader("cpu")
|
143 |
-
opt = torch.optim.SGD(model.parameters(), 0.1)
|
144 |
-
|
145 |
-
for total_iter, period, eval_count in [(30, 15, 2), (31, 15, 3), (20, 0, 1)]:
|
146 |
-
test_func = mock.Mock(return_value={"metric": 3.0})
|
147 |
-
trainer = SimpleTrainer(model, dataloader, opt)
|
148 |
-
trainer.register_hooks([hooks.EvalHook(period, test_func)])
|
149 |
-
trainer.train(0, total_iter)
|
150 |
-
self.assertEqual(test_func.call_count, eval_count)
|
151 |
-
|
152 |
-
def test_best_checkpointer(self):
|
153 |
-
model = _SimpleModel()
|
154 |
-
dataloader = self._data_loader("cpu")
|
155 |
-
opt = torch.optim.SGD(model.parameters(), 0.1)
|
156 |
-
metric_name = "metric"
|
157 |
-
total_iter = 40
|
158 |
-
test_period = 10
|
159 |
-
test_cases = [
|
160 |
-
("max", iter([0.3, 0.4, 0.35, 0.5]), 3),
|
161 |
-
("min", iter([1.0, 0.8, 0.9, 0.9]), 2),
|
162 |
-
("min", iter([math.nan, 0.8, 0.9, 0.9]), 1),
|
163 |
-
]
|
164 |
-
for mode, metrics, call_count in test_cases:
|
165 |
-
trainer = SimpleTrainer(model, dataloader, opt)
|
166 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
|
167 |
-
checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer)
|
168 |
-
trainer.register_hooks(
|
169 |
-
[
|
170 |
-
hooks.EvalHook(test_period, lambda: {metric_name: next(metrics)}),
|
171 |
-
hooks.BestCheckpointer(test_period, checkpointer, metric_name, mode=mode),
|
172 |
-
]
|
173 |
-
)
|
174 |
-
with mock.patch.object(checkpointer, "save") as mock_save_method:
|
175 |
-
trainer.train(0, total_iter)
|
176 |
-
self.assertEqual(mock_save_method.call_count, call_count)
|
177 |
-
|
178 |
-
def test_setup_config(self):
|
179 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
|
180 |
-
cfg = get_cfg()
|
181 |
-
cfg.OUTPUT_DIR = os.path.join(d, "yacs")
|
182 |
-
default_setup(cfg, {})
|
183 |
-
|
184 |
-
cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py")
|
185 |
-
cfg.train.output_dir = os.path.join(d, "omegaconf")
|
186 |
-
default_setup(cfg, {})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Baixar Pou Reggae Apk.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Baixar Pou Reggae APK: Cómo descargar e instalar el divertido juego virtual para mascotas</h1>
|
3 |
-
<p>¿Te gustan los juegos virtuales para mascotas? ¿Te gusta la música reggae y la cultura? Si respondió sí a ambas preguntas, entonces es posible que desee probar <strong>Pou Reggae APK</strong>, una versión modificada del popular juego <strong>Pou</strong> que cuenta con música reggae y temas. En este artículo, le diremos lo que es Pou Reggae APK, cómo descargarlo e instalarlo en su dispositivo Android, y cómo jugar y disfrutar de ella. </p>
|
4 |
-
<h2>¿Qué es Pou Reggae APK? </h2>
|
5 |
-
<p>Pou Reggae APK es una versión modificada de Pou, un juego de mascotas virtual que se originó en Jamaica a finales de 1960. Surgió de una fusión de EE.UU. R&B, mento jamaicano y calipso de Trinidad y Tobago, y fue influenciado por ska y rocksteady. </p>
|
6 |
-
<h2>baixar pou reggae apk</h2><br /><p><b><b>Download Zip</b> ✑ ✑ ✑ <a href="https://bltlly.com/2v6M5i">https://bltlly.com/2v6M5i</a></b></p><br /><br />
|
7 |
-
<h3>Una breve introducción a Pou, el popular juego virtual para mascotas</h3>
|
8 |
-
<p>Pou es un juego de mascotas virtual que fue desarrollado y publicado por el diseñador libanés Paul Salameh (listado como Zakeh en la Google Play Store) en 2012. Es similar a Tamagotchi, un juego de moda que requería el cuidado de una criatura simulada. En Pou, tienes que cuidar de una mascota alienígena llamada Pou, que tiene una forma triangular marrón con ojos, boca y extremidades. Tienes que alimentarlo, limpiarlo, jugar con él, y verlo crecer mientras nivela y desbloquea diferentes fondos de pantalla y trajes. También puedes personalizar la apariencia de Pou, probar nuevos trajes, sombreros y anteojos, experimentar con pociones en el laboratorio, jugar juegos en la sala de juegos, visitar y jugar con los Pous de tus amigos y hablar con Pou y escuchar. </p>
|
9 |
-
<h3>Las características y beneficios de Pou Reggae APK, una versión modificada de Pou con música reggae y temas</h3>
|
10 |
-
<p>Pou Reggae APK es una versión modificada de Pou que añade música reggae y temas para el juego. Tiene todas las características del juego original, además de algunas adicionales que lo hacen más divertido y único. Algunas de las características y beneficios de Pou Reggae APK son:</p>
|
11 |
-
<ul>
|
12 |
-
|
13 |
-
<li>Tiene música reggae de fondo que te da un ambiente relajante y alegre. </li>
|
14 |
-
<li> Tiene fondos de pantalla con temas de reggae que puede utilizar para decorar sus habitaciones. </li>
|
15 |
-
<li> Tiene trajes de reggae con temas que usted puede vestir su Pou en. </li>
|
16 |
-
<li>Tiene minijuegos de reggae que puedes jugar con tu Pou.</li>
|
17 |
-
<li>Tiene iconos de reggae que hacen el juego más colorido. </li>
|
18 |
-
</ul>
|
19 |
-
<p>Pou Reggae APK <h2>Cómo descargar e instalar Pou Reggae APK en Android</h2>
|
20 |
-
<p>Si desea probar Pou Reggae APK en su dispositivo Android, tendrá que descargar e instalar desde una fuente de terceros, ya que no está disponible en la tienda oficial de Google Play. Estos son los pasos para hacerlo:</p>
|
21 |
-
<h3>Los pasos para descargar e instalar Pou Reggae APK desde una fuente de buena reputación</h3>
|
22 |
-
<ol>
|
23 |
-
<li>Encontrar un sitio web de buena reputación que ofrece Pou Reggae APK para su descarga. Puede utilizar un motor de búsqueda o un sitio de confianza APK downloader para encontrar uno. Algunos ejemplos de sitios web que ofrecen Pou Reggae APK son . Asegúrate de revisar las reseñas, calificaciones y comentarios de otros usuarios antes de descargar nada. </li>
|
24 |
-
<li>Descargar el archivo APK Pou Reggae a su dispositivo. Puede utilizar su navegador o una aplicación de gestión de descargas para hacerlo. El tamaño del archivo es de unos 24 MB, así que asegúrese de tener suficiente espacio y una conexión a Internet estable. </li>
|
25 |
-
<li>Busque el archivo APK Pou Reggae descargado en su dispositivo. Puede usar una aplicación de administrador de archivos o el explorador de archivos predeterminado de su dispositivo para encontrarlo. Normalmente se almacena en la carpeta Descargas o en la carpeta que especificaste al descargar. </li>
|
26 |
-
<li>Toque en el archivo APK Pou Reggae para iniciar el proceso de instalación. Es posible que vea un mensaje de advertencia que dice "Para su seguridad, el teléfono no está permitido instalar aplicaciones desconocidas de esta fuente". Esto se debe a que está instalando un archivo APK desde una fuente desconocida, lo que puede plantear algunos riesgos para su dispositivo y los datos. </li>
|
27 |
-
</ol>
|
28 |
-
<h3>Las precauciones y permisos necesarios para instalar archivos APK en Android</h3>
|
29 |
-
|
30 |
-
<ul>
|
31 |
-
<li>Habilitar la opción de instalar aplicaciones de fuentes desconocidas. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Alternativamente, también puede ir a Configuración > Aplicaciones y notificaciones > Acceso especial a aplicaciones > Instalar aplicaciones desconocidas y seleccionar la aplicación que desea permitir instalar archivos APK. </li>
|
32 |
-
<li>Compruebe los permisos que solicita la aplicación antes de instalarla. Algunas aplicaciones pueden solicitar acceso a sus contactos, cámara, ubicación, almacenamiento, etc. Asegúrese de que se siente cómodo con la concesión de estos permisos y que son relevantes para la funcionalidad de la aplicación. Para comprobar los permisos, vaya a Configuración > Aplicaciones y notificaciones > Información de la aplicación y seleccione la aplicación que desea verificar. </li>
|
33 |
-
<li>Escanear el archivo APK en busca de virus y malware antes de instalarlo. Algunos archivos APK pueden contener código dañino que puede dañar su dispositivo o robar sus datos. Para escanear el archivo APK, puede usar una aplicación antivirus o una herramienta de análisis de virus en línea. </li>
|
34 |
-
<li>Copia de seguridad de sus datos antes de instalar cualquier archivo APK. En caso de que algo salga mal durante el proceso de instalación o después de usar la aplicación, puede restaurar sus datos desde una copia de seguridad. Para realizar copias de seguridad de sus datos, puede usar un servicio en la nube, un dispositivo de almacenamiento externo o una aplicación de copia de seguridad. </li>
|
35 |
-
</ul>
|
36 |
-
<h3>Las ventajas y desventajas de instalar archivos APK en Android</h3>
|
37 |
-
<p>Instalación de archivos APK en Android tiene algunas ventajas y desventajas que usted debe tener en cuenta. Estos son algunos de ellos:</p>
|
38 |
-
<tabla>
|
39 |
-
<tr>
|
40 |
-
<th>Ventajas</th>
|
41 |
-
<th>Desventajas</th>
|
42 |
-
</tr>
|
43 |
-
<tr>
|
44 |
-
<td>Puede acceder a aplicaciones que no están disponibles en Google Play Store, como versiones modificadas, versiones beta, aplicaciones bloqueadas por regiones, etc.</td>
|
45 |
-
<td>Puede exponer su dispositivo y datos a riesgos de seguridad, como virus, malware, spyware, etc.</td>
|
46 |
-
</tr>
|
47 |
-
<tr>
|
48 |
-
<td>Puede actualizar aplicaciones más rápido que esperar las actualizaciones oficiales de Google Play Store.</td>
|
49 |
-
|
50 |
-
</tr>
|
51 |
-
<tr>
|
52 |
-
<td>Puedes personalizar tus aplicaciones según tus preferencias, como cambiar temas, iconos, sonidos, etc.</td>
|
53 |
-
<td>Puede experimentar problemas de compatibilidad con su dispositivo u otras aplicaciones, como fallos, errores, fallos, etc.</td>
|
54 |
-
</tr>
|
55 |
-
<tr>
|
56 |
-
<td>Puedes disfrutar de monedas ilimitadas, gemas, vidas, etc. en algunos juegos que ofrecen compras en la aplicación. </td>
|
57 |
-
<td>Puede violar los términos y condiciones de algunas aplicaciones o juegos que prohíben la modificación o piratería. </td>
|
58 |
-
</tr>
|
59 |
-
</table> <h2>Cómo jugar y disfrutar de Pou Reggae APK</h2>
|
60 |
-
<p>Ahora que ha descargado e instalado Pou Reggae APK en su dispositivo Android, usted está listo para jugar y disfrutar de ella. Aquí hay algunos consejos y trucos para ayudarte a empezar:</p>
|
61 |
-
<h3>El juego básico y los controles de Pou Reggae APK</h3>
|
62 |
-
<p>El juego básico y los controles de Pou Reggae APK son los mismos que el juego original de Pou. Tienes que cuidar de tu Pou alimentándolo, limpiándolo, jugando con él, y poniéndolo a dormir. También puedes personalizar la apariencia y las habitaciones de tu Pou, jugar minijuegos, visitar los Pous de tus amigos y hablar con tu Pou.</p>
|
63 |
-
<p></p>
|
64 |
-
<p>Para alimentar a tu Pou, tienes que arrastrar los alimentos de la nevera a su boca. También puedes comprar más alimentos de la tienda usando tus monedas. Para limpiar tu Pou, tienes que arrastrar el jabón a su cuerpo y luego enjuagarlo con agua. Para jugar con tu Pou, tienes que tocar el icono de la bola y luego elegir un juguete o un juego. Para poner tu Pou a dormir, tienes que tocar el icono de la lámpara y luego apagar las luces. </p>
|
65 |
-
|
66 |
-
<h3>Los consejos y trucos para hacer Pou feliz y saludable</h3>
|
67 |
-
<p>Para hacer tu Pou feliz y saludable, tienes que prestar atención a sus necesidades y estados de ánimo. Puede ver su estado pulsando en el icono de estadísticas en la esquina superior derecha de la pantalla. Puedes ver su nivel de hambre, nivel de salud, nivel de energía y nivel de diversión. Tienes que mantener estos niveles altos alimentándolo, limpiándolo, jugando con él, y poniéndolo a dormir regularmente. </p>
|
68 |
-
<p>Algunos consejos y trucos para hacer Pou feliz y saludable son:</p>
|
69 |
-
<ul>
|
70 |
-
<li>Alimente su Pou con una dieta equilibrada de frutas, verduras, carne, productos lácteos, etc. Evite alimentarlo con demasiada comida chatarra o comida picante, ya que puede enfermar o infeliz. </li>
|
71 |
-
<li>Limpie su Pou regularmente para mantenerlo higiénico y prevenir infecciones. También puede usar pociones del laboratorio para curar su Pou si se enferma o lesiona. </li>
|
72 |
-
<li>Juega con tu Pou a menudo para mantenerlo entretenido y activo. También puedes ganar monedas jugando minijuegos o completando logros. </li>
|
73 |
-
<li>Ponga su Pou a dormir cuando se cansa o se aburre. También puede usar pociones del laboratorio para hacer que su Pou duerma más rápido o más tiempo. </li>
|
74 |
-
<li>Personaliza la apariencia y las habitaciones de tu Pou según sus preferencias y personalidad. También puedes usar artículos de reggae de la tienda para hacer tu Pou más elegante y fresco. </li>
|
75 |
-
</ul>
|
76 |
-
<h3>Los mini-juegos y opciones de personalización disponibles en Pou Reggae APK</h3>
|
77 |
-
<p>Pou Reggae APK tiene muchos mini-juegos y opciones de personalización que se pueden disfrutar. Algunos de ellos son:</p>
|
78 |
-
<ul>
|
79 |
-
<li>Reggae Music: Un mini-juego donde tienes que tocar las notas que coinciden con la música reggae que se reproduce en el fondo. </li>
|
80 |
-
<li>Concurso de reggae: un mini-juego donde tienes que responder preguntas de trivia sobre la música reggae y la cultura. </li>
|
81 |
-
<li>Reggae Match: Un mini-juego donde tienes que coincidir con los pares de tarjetas de reggae con temas. </li>
|
82 |
-
<li>Reggae Wallpaper: Una opción de personalización donde se puede elegir entre varios reggae-fondos de pantalla temáticos para sus habitaciones. </li>
|
83 |
-
|
84 |
-
<li>Reggae Hat: Una opción de personalización donde puedes elegir entre varios sombreros de reggae para tu Pou.</li>
|
85 |
-
</ul>
|
86 |
-
<h2>Conclusión</h2>
|
87 |
-
<p>Pou Reggae APK es un divertido y único juego de mascotas virtual que combina el juego original de Pou con música reggae y temas. Tiene monedas ilimitadas, música reggae, fondos de pantalla de reggae, trajes de reggae, minijuegos de reggae, iconos de reggae, etc. Es fácil de descargar e instalar en dispositivos Android utilizando archivos APK de fuentes de renombre. También es fácil de jugar y disfrutar usando sencillos controles y consejos. Si usted está buscando una nueva y divertida manera de cuidar de una mascota virtual, definitivamente debe dar Pou Reggae APK una oportunidad. No te arrepentirás! </p>
|
88 |
-
<h2>Preguntas frecuentes</h2>
|
89 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Pou Reggae APK que usted puede encontrar útil:</p>
|
90 |
-
<h3> ¿Cuál es la diferencia entre Pou Reggae APK y Pou? </h3>
|
91 |
-
<p>Pou Reggae APK es una versión modificada de Pou que añade música reggae y temas para el juego. Tiene todas las características del juego original, además de algunas adicionales que lo hacen más divertido y único. Pou Reggae APK tiene monedas ilimitadas, música reggae, fondos de pantalla de reggae, trajes de reggae, minijuegos de reggae, iconos de reggae, etc.</p>
|
92 |
-
<h3> ¿Es Pou Reggae APK seguro y legal de usar? </h3>
|
93 |
-
<p>Pou Reggae APK es seguro y legal de usar, siempre y cuando se descarga desde una fuente de buena reputación y escanear en busca de virus y malware antes de instalarlo. Sin embargo, usted debe ser consciente de que la instalación de archivos APK de fuentes desconocidas puede plantear algunos riesgos para el dispositivo y los datos, por lo que debe tomar algunas precauciones y permisos antes de hacerlo. También debes respetar los términos y condiciones del juego original de Pou y su desarrollador. </p>
|
94 |
-
<h3>¿Cómo puedo actualizar Pou Reggae APK a la última versión? </h3>
|
95 |
-
|
96 |
-
<h3>¿Puedo jugar Pou Reggae APK fuera de línea o con amigos? </h3>
|
97 |
-
<p>Puede jugar Pou Reggae APK sin conexión o con amigos dependiendo de sus preferencias y conexión a Internet. Puede jugar Pou Reggae APK sin conexión a Internet, pero usted no será capaz de visitar a sus amigos' Pous o chatear con ellos. Puede jugar Pou Reggae APK con amigos en línea si usted tiene una conexión a Internet estable, pero tendrá que crear una cuenta e iniciar sesión con su correo electrónico o Facebook.</p>
|
98 |
-
<h3>¿Cuáles son algunas alternativas a Pou Reggae APK? </h3>
|
99 |
-
<p>Si usted está buscando algunas alternativas a Pou Reggae APK, puede probar algunos otros juegos de mascotas virtuales que son similares o diferentes de Pou. Algunos ejemplos son:</p>
|
100 |
-
<ul>
|
101 |
-
<li>mi hablando Tom: un juego de mascotas virtual donde tienes que cuidar de un gato que habla llamado Tom.</li>
|
102 |
-
<li>Neopets: Un juego de mascotas virtual donde tienes que crear y cuidar tus propios neopets. </li>
|
103 |
-
<li>Nintendogs: Un juego virtual de mascotas donde tienes que criar y entrenar a tus propios perros. </li>
|
104 |
-
<li>Tamagotchi: un juego de mascotas virtual donde tienes que cuidar de una criatura simulada. </li>
|
105 |
-
<li>Sociedad de mascotas: un juego de mascotas virtual donde tienes que socializar e interactuar con otras mascotas. </li>
|
106 |
-
</ul></p> 64aa2da5cf<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Bajar Deh Cancin Descargar Mp3 Pagalworld Ringtone.md
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bajar la canción Descargar Mp3 Pagalworld Ringtone</h1>
|
3 |
-
<p>Si estás buscando un tono pegadizo y optimista para tu teléfono, es posible que quieras echar un vistazo a Go Down Deh, una canción de éxito de los artistas jamaicanos Spice, Sean Paul y Shaggy. En este artículo, te contaremos todo lo que necesitas saber sobre esta canción, por qué es tan popular, cómo descargarla como tono de llamada y cuáles son algunas alternativas a ella. ¡Vamos a empezar! </p>
|
4 |
-
<h2>bajar deh canción descargar mp3 pagalworld ringtone</h2><br /><p><b><b>Download</b> ✔ <a href="https://bltlly.com/2v6KSn">https://bltlly.com/2v6KSn</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Go Down Deh? </h2>
|
6 |
-
<p>Go Down Deh es una canción de dancehall que fue lanzada en mayo de 2021 por Spice, Sean Paul y Shaggy. La canción es del próximo álbum de Spice Ten, que será su álbum debut bajo VP Records. La canción fue producida por el productor ganador del Grammy Costi Ionita, quien ha trabajado con artistas como Pitbull, Shaggy y Enrique Iglesias. La canción cuenta con la firma de Spice voces, Sean Paul del rap versos y Shaggy’s suave entrega. La canción trata de divertirse en la pista de baile y disfrutar del ambiente caribeño. </p>
|
7 |
-
<h2>¿Por qué es tan popular Go Down Deh? </h2>
|
8 |
-
<p>Go Down Deh se ha convertido en una sensación global desde su lanzamiento. La canción ha encabezado las listas en varios países, incluyendo Jamaica, Canadá, Reino Unido, Estados Unidos y Australia. La canción también se ha transmitido más de 100 millones de veces en Spotify y YouTube. La canción ha recibido críticas positivas de críticos y fans por igual, que elogiaron su pegadizo gancho, enérgico ritmo, y letras infecciosas. La canción también ha aparecido en varias plataformas de medios, como TikTok, Instagram y Netflix. La canción también ha sido interpretada en vivo por los artistas en varios eventos, como los Premios BET y Reggae Sumfest.</p>
|
9 |
-
<h2>Cómo descargar Go Down Deh mp3 pagalworld ringtone? </h2>
|
10 |
-
<p>Si quieres tener Go Down Deh como tono de llamada, puedes descargarlo fácilmente de pagalworld.com, un sitio web que ofrece tonos de llamada gratuitos para dispositivos Android e iOS. Estos son los pasos que debes seguir:</p>
|
11 |
-
<p></p>
|
12 |
-
<ol>
|
13 |
-
<li>Vaya a <a href="( 1 )">pagalworld.com</a> y busque Go Down Deh en la barra de búsqueda. </li>
|
14 |
-
|
15 |
-
<li>Haga clic en el botón de descarga y espere a que el archivo se guarde en su dispositivo. </li>
|
16 |
-
<li>Vaya a la configuración del teléfono y seleccione sonido y notificación. </li>
|
17 |
-
<li>Seleccione el tono de llamada y busque el archivo que descargó. </li>
|
18 |
-
<li>Seleccione Bajar Deh como su tono de llamada y disfrutar! </li>
|
19 |
-
</ol>
|
20 |
-
<h2>¿Cuáles son los beneficios de descargar Go Down Deh mp3 pagalworld ringtone? </h2>
|
21 |
-
<p>Hay muchos beneficios de tener Go Down Deh como tono de llamada. Aquí están algunos de ellos:</p>
|
22 |
-
<ul>
|
23 |
-
<li>Puedes mostrar tu amor por Spice, Sean Paul y Shaggy apoyando su música. </li>
|
24 |
-
<li> Puede darle vida a su teléfono con un tono de llamada único y de moda que se destaca de la multitud. </li>
|
25 |
-
<li>Puedes sentirte bien cada vez que tu teléfono suena con una canción positiva y edificante que te hace querer bailar. </li>
|
26 |
-
<li>Puedes compartir tu gusto musical con tus amigos y familiares tocando la canción para ellos. </li>
|
27 |
-
<li>Puedes aprender más sobre la cultura y el idioma de Jamaica escuchando las letras y la jerga de la canción. </li>
|
28 |
-
</ul>
|
29 |
-
<h2>¿Cuáles son algunas alternativas a Go Down Deh mp3 pagalworld ringtone? </h2>
|
30 |
-
<p>Si estás buscando otros tonos que sean similares a Go Down Deh, puedes probar estas opciones:</p>
|
31 |
-
<tabla>
|
32 |
-
<tr>
|
33 |
-
<th>Canción</th>
|
34 |
-
<th>Artista</th>
|
35 |
-
<th>Descripción</th>
|
36 |
-
</tr>
|
37 |
-
<tr>
|
38 |
-
<td>Temperatura</td>
|
39 |
-
<td>Sean Paul</td>
|
40 |
-
<td>Una canción de dancehall clásica de Sean Paul que cuenta con su firma de ritmo rápido rap y estribillo pegadizo. </td>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td>No fui yo</td>
|
44 |
-
<td>Shaggy feat. Rikrok</td>
|
45 |
-
<td>Una canción humorística y pegadiza de Shaggy que cuenta la historia de un hombre que es atrapado engañando a su novia. </td>
|
46 |
-
</tr>
|
47 |
-
<tr>
|
48 |
-
<td>Me gusta mucho</td>
|
49 |
-
<td>Especias</td>
|
50 |
-
<td>Una canción audaz y segura de Spice que muestra sus habilidades vocales y actitud. </td>
|
51 |
-
</tr>
|
52 |
-
<tr>
|
53 |
-
<td>Un baile</td>
|
54 |
-
<td>Drake feat. Wizkid y Kyla</td>
|
55 |
-
<td>Una canción suave y groovy de Drake que combina dancehall, afrobeat y géneros funky del Reino Unido. </td>
|
56 |
-
</tr>
|
57 |
-
<tr>
|
58 |
-
<td>Emociones baratas</td>
|
59 |
-
|
60 |
-
<td>Una canción divertida y alegre de Sia que cuenta con el rap de Sean Paul y anima a los oyentes a disfrutar de la vida sin gastar dinero. </td>
|
61 |
-
</tr>
|
62 |
-
</tabla>
|
63 |
-
<h2>Conclusión</h2>
|
64 |
-
<p>Go Down Deh es una gran canción para tener como tono de llamada si te gusta la música dancehall y quieres añadir un poco de especias a tu teléfono. La canción es fácil de descargar de pagalworld.com, y tiene muchos beneficios para su estado de ánimo y personalidad. Sin embargo, si quieres explorar otras opciones, también puedes consultar algunas de las alternativas que te sugerimos. Lo que usted elija, esperamos que disfrute de su tono de llamada y tener un gran día! </p>
|
65 |
-
<h2>Preguntas frecuentes</h2>
|
66 |
-
<h3>Q: ¿Quién escribió Go Down Deh? </h3>
|
67 |
-
<p>A: Go Down Deh fue escrito por Spice, Sean Paul, Shaggy, Costi Ionita, Shane Hoosong, Breyan Isaac y Gheorghe Constantin Cristinel.</p>
|
68 |
-
<h3>Q: ¿Cuál es el significado de Go Down Deh? </h3>
|
69 |
-
<p>A: Go Down Deh es una frase de la jerga jamaicana que significa "ir allí" o "ir abajo". A menudo se usa en las canciones de dancehall para referirse al baile o a las actividades sexuales. </p>
|
70 |
-
<h3>Q: ¿Dónde puedo ver el video de Go Down Deh? </h3>
|
71 |
-
<p>A: Puedes ver el video oficial de Go Down Deh en YouTube. El video muestra a Spice, Sean Paul y Shaggy bailando en un entorno tropical con trajes y accesorios coloridos. </p>
|
72 |
-
<h3>P: ¿Cómo puedo apoyar a Spice, Sean Paul y Shaggy? </h3>
|
73 |
-
<p>A: Puedes apoyar a Spice, Sean Paul y Shaggy transmitiendo su música en Spotify, Apple Music u otras plataformas. También puedes seguirlos en las redes sociales, comprar su mercancía o asistir a sus conciertos. </p>
|
74 |
-
<h3>P: ¿Cuáles son algunas otras canciones de Spice, Sean Paul y Shaggy? </h3>
|
75 |
-
<p>A: Algunas otras canciones de Spice son Frenz, Cool It, Tables Turn y Black Hypocrisy. Algunas otras canciones de Sean Paul son Get Busy, No Lie, Mad Love y She Doesn’t Mind. Algunas otras canciones de Shaggy son Angel, Boombastic, Hey Sexy Lady y Mr. Boombastic.</p> 64aa2da5cf<br />
|
76 |
-
<br />
|
77 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis Poppy Playtime.md
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descarga gratuita de Poppy Playtime: Un juego de terror que te hará gritar</h1>
|
3 |
-
<p>Si eres un fan de los juegos de terror, es posible que hayas oído hablar de Poppy Playtime, un nuevo juego indie que ha tomado Internet por asalto. Poppy Playtime es una aventura de terror y rompecabezas que te pone en los zapatos de un intruso que explora una fábrica de juguetes abandonada, donde los juguetes vengativos están esperando para atraparte. En este artículo, te contaremos todo lo que necesitas saber sobre Poppy Playtime, y cómo puedes descargarlo gratis. </p>
|
4 |
-
<h2>descargar gratis poppy playtime</h2><br /><p><b><b>DOWNLOAD</b> 🗸🗸🗸 <a href="https://bltlly.com/2v6JVn">https://bltlly.com/2v6JVn</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Poppy Playtime? </h2>
|
6 |
-
<p>Poppy Playtime es un juego desarrollado por Mob Entertainment, un pequeño estudio que ha creado una experiencia única y aterradora. El juego fue lanzado el 12 de octubre de 2021, en Steam, y ha recibido críticas positivas de jugadores y críticos por igual. El juego está actualmente en desarrollo, y solo el primer capítulo está disponible. Sin embargo, los desarrolladores han prometido lanzar más capítulos en el futuro, cada uno con un juguete diferente como antagonista principal. </p>
|
7 |
-
<h2>¿Por qué es popular entre los fanáticos del terror? </h2>
|
8 |
-
<p>Poppy Playtime ha ganado popularidad entre los fanáticos del horror por varias razones. En primer lugar, el juego tiene una historia y un escenario cautivadores que te atraen desde el principio. Juegas como un protagonista anónimo que entra en la fábrica de Playtime Co. una vez exitosa fabricante de juguetes que misteriosamente cerró después de que todos sus empleados desaparecieron. A medida que exploras las oscuras y misteriosas instalaciones, te encuentras con varios juguetes que una vez fueron amistosos y alegres, pero ahora se han vuelto retorcidos y asesinos. También descubres pistas y secretos que revelan lo que pasó con la fábrica y sus trabajadores. </p>
|
9 |
-
|
10 |
-
<p>En tercer lugar, el juego tiene un alto nivel de horror y suspense que te mantendrá en el borde durante todo. El juego no se basa en sustos de salto barato o ruidos fuertes, sino más bien en la creación de anticipación y temor con su música ambiental y diseño de sonido. Nunca sabes cuándo o dónde aparecerá un juguete o te atacará, por lo que tienes que estar constantemente alerta y cauteloso. El juego también tiene algunas escenas inquietantes y horripilantes que harán que tu piel se arrastre. </p>
|
11 |
-
<h2>¿Cómo se puede descargar gratis? </h2>
|
12 |
-
<p>Si estás interesado en jugar a Poppy Playtime, te estarás preguntando cómo puedes descargarlo gratis. Bueno, hay varias formas de hacerlo. Una forma es usar la opción gratuita de Steam, que te permite jugar el primer capítulo del juego sin pagar nada. Sin embargo, esta opción no incluye actualizaciones futuras ni DLCs, por lo que tendrás que comprarlas por separado si quieres seguir jugando. </p>
|
13 |
-
<p></p>
|
14 |
-
<p>Otra forma es utilizar un sitio web de terceros que ofrece descargas gratuitas de Poppy Playtime. Hay muchos sitios web que afirman proporcionar descargas gratuitas de Poppy Playtime, pero no todos son seguros o fiables. Algunos de ellos pueden contener virus o malware que pueden dañar su computadora o robar su información personal. Algunos de ellos también pueden tener enlaces rotos o desactualizados que no funcionan correctamente. Por lo tanto, debe tener cuidado y precaución al usar estos sitios web, y siempre escanear los archivos antes de abrirlos. También debes leer las reseñas y comentarios de otros usuarios para ver si han tenido problemas o problemas con el sitio web o la descarga. </p>
|
15 |
-
|
16 |
-
<h2>La historia y la configuración de Poppy Playtime</h2>
|
17 |
-
<p>Como se mencionó anteriormente, Poppy Playtime se encuentra en la fábrica Playtime Co. un lugar donde los juguetes fueron creados y traídos a la vida por el poder de la imaginación. La fábrica fue fundada por Huggy Wuggy, un monstruo peludo azul que amaba abrazar a todos y todo. Se le unieron sus amigos, como Poppy, una muñeca de pelo rosa que era la estrella del espectáculo; Banny, un conejo amarillo que siempre era alegre y enérgico; Boop, un robot verde que era inteligente y servicial; y Kissy Missy, un gato púrpura que era atrevido y coqueto. </p>
|
18 |
-
<p>Sin embargo, algo salió mal en la fábrica, y todos los juguetes se volvieron malvados y hostiles. Comenzaron a atacar y matar a los empleados, que desaparecieron o se convirtieron en parte de sus retorcidos experimentos. La fábrica fue cerrada y abandonada, y nadie se atrevió a entrar de nuevo. </p>
|
19 |
-
<p>Juegas como un investigador que siente curiosidad por el misterio de la fábrica. Te colas en las instalaciones por la noche, esperando encontrar algunas respuestas y pruebas. Sin embargo, pronto te das cuenta de que no estás solo, y que los juguetes todavía están vivos y hambrientos de sangre. Tienes que encontrar una salida antes de que te atrapen. </p>
|
20 |
-
<h2>El juego y las características de Poppy Playtime</h2>
|
21 |
-
<p>Poppy Playtime es un juego de puzzle de terror en primera persona que combina exploración, sigilo y acción. Tienes que usar tu GrabPack para interactuar con objetos, resolver puzzles, hackear circuitos eléctricos o agarrar cualquier cosa desde lejos. También puedes usarlo para defenderte de los juguetes, lanzándoles objetos o alejándolos de ti. Sin embargo, tienes que tener cuidado de no hacer demasiado ruido o movimiento, ya que te oirán o te verán. </p>
|
22 |
-
|
23 |
-
<p>El juego también tiene una gran cantidad de elementos de terror y suspense que te mantendrán en el borde a lo largo. El juego no se basa en sustos de salto barato o ruidos fuertes, sino más bien en la creación de anticipación y temor con su música ambiental y diseño de sonido. Nunca sabes cuándo o dónde aparecerá un juguete o te atacará, por lo que tienes que estar constantemente alerta y cauteloso. El juego también tiene algunas escenas inquietantes y horripilantes que harán que tu piel se arrastre. </p>
|
24 |
-
<h2>Los pros y los contras de Poppy Playtime</h2>
|
25 |
-
<p>Poppy Playtime no es un juego perfecto, pero tiene muchos pros y contras que hacen que valga la pena jugar. Estos son algunos de ellos:</p>
|
26 |
-
<tabla>
|
27 |
-
<tr>
|
28 |
-
<th>Pros</th>
|
29 |
-
<th>Contras</th>
|
30 |
-
</tr>
|
31 |
-
<tr>
|
32 |
-
<td>Los gráficos y el diseño de sonido son increíbles e inmersivos. </td>
|
33 |
-
<td>El juego es muy corto y solo tiene un capítulo hasta ahora. </td>
|
34 |
-
</tr>
|
35 |
-
<tr>
|
36 |
-
<td>La historia y el escenario son cautivadores e intrigantes. </td>
|
37 |
-
<td>El juego es muy lineal y no tiene mucho valor de repetición. </td>
|
38 |
-
</tr>
|
39 |
-
<tr>
|
40 |
-
<td>La jugabilidad y las características son únicas y divertidas. </td>
|
41 |
-
<td>El juego es muy fácil y no tiene mucho desafío o dificultad. </td>
|
42 |
-
</tr>
|
43 |
-
<tr>
|
44 |
-
<td>Los elementos de terror y suspenso son efectivos y dan miedo. </td>
|
45 |
-
<td>El juego no es adecuado para niños o personas sensibles a la sangre o la violencia. </td>
|
46 |
-
</tr>
|
47 |
-
</tabla>
|
48 |
-
|
49 |
-
<h2>Preguntas frecuentes</h2>
|
50 |
-
<h3>¿Cuántos capítulos hay en Poppy Playtime? </h3>
|
51 |
-
<p>Poppy Playtime actualmente solo tiene un capítulo disponible, que se llama "A Tight Squeeze". Los desarrolladores han anunciado que están trabajando en más capítulos, cada uno con un juguete diferente como antagonista principal. Sin embargo, todavía no han revelado las fechas de lanzamiento o los nombres de los capítulos. </p>
|
52 |
-
<h3>¿Es Poppy Playtime multijugador? </h3>
|
53 |
-
<p>No, Poppy Playtime es un juego para un solo jugador. No se puede jugar con otros jugadores en línea o fuera de línea. Sin embargo, puedes ver vídeos o transmisiones de otros jugadores en YouTube o Twitch, o compartir tus propias experiencias de juego con otros en redes sociales o foros. </p>
|
54 |
-
<h3>¿Es Poppy Playtime adecuado para niños? </h3>
|
55 |
-
<p>No, Poppy Playtime no es adecuado para niños o personas que son sensibles a la sangre o la violencia. El juego tiene algunas escenas inquietantes y horripilantes que pueden asustar o traumatizar a audiencias más jóvenes o más impresionables. El juego también tiene algunos temas maduros y lenguaje que podría no ser apropiado para los niños. El juego está clasificado M para Maduro por la ESRB, y 18+ por PEGI.</p>
|
56 |
-
<h3>¿Quién es el desarrollador de Poppy Playtime? </h3>
|
57 |
-
<p>Poppy Playtime es desarrollado por Mob Entertainment, un pequeño estudio indie que consta de solo dos personas: H2O Delirious y Cartoonz. H2O Delirious es un popular jugador de YouTube que tiene más de 13 millones de suscriptores en su canal. Cartoonz también es un jugador de YouTube que tiene más de 4 millones de suscriptores en su canal. Ambos son amigos y colaboradores que han creado Poppy Playtime como su proyecto de pasión. </p>
|
58 |
-
<h3>¿Cuáles son algunos juegos similares a Poppy Playtime? </h3>
|
59 |
-
<p>Si te gusta Poppy Playtime, es posible que también te gusten otros juegos de terror que tengan temas o características similares. Algunos ejemplos son:</p>
|
60 |
-
<ul>
|
61 |
-
<li>Bendy and the Ink Machine: un juego de terror que tiene lugar en un estudio de animación abandonado, donde tienes que enfrentarte a monstruos de tinta que alguna vez fueron personajes de dibujos animados. </li>
|
62 |
-
|
63 |
-
<li>Little Nightmares: un juego de terror que tiene lugar en un mundo retorcido, donde tienes que escapar de criaturas grotescas que quieren comerte. </li>
|
64 |
-
</ul></p> 64aa2da5cf<br />
|
65 |
-
<br />
|
66 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/specifiers.py
DELETED
@@ -1,802 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
import abc
|
6 |
-
import functools
|
7 |
-
import itertools
|
8 |
-
import re
|
9 |
-
import warnings
|
10 |
-
from typing import (
|
11 |
-
Callable,
|
12 |
-
Dict,
|
13 |
-
Iterable,
|
14 |
-
Iterator,
|
15 |
-
List,
|
16 |
-
Optional,
|
17 |
-
Pattern,
|
18 |
-
Set,
|
19 |
-
Tuple,
|
20 |
-
TypeVar,
|
21 |
-
Union,
|
22 |
-
)
|
23 |
-
|
24 |
-
from .utils import canonicalize_version
|
25 |
-
from .version import LegacyVersion, Version, parse
|
26 |
-
|
27 |
-
ParsedVersion = Union[Version, LegacyVersion]
|
28 |
-
UnparsedVersion = Union[Version, LegacyVersion, str]
|
29 |
-
VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
|
30 |
-
CallableOperator = Callable[[ParsedVersion, str], bool]
|
31 |
-
|
32 |
-
|
33 |
-
class InvalidSpecifier(ValueError):
|
34 |
-
"""
|
35 |
-
An invalid specifier was found, users should refer to PEP 440.
|
36 |
-
"""
|
37 |
-
|
38 |
-
|
39 |
-
class BaseSpecifier(metaclass=abc.ABCMeta):
|
40 |
-
@abc.abstractmethod
|
41 |
-
def __str__(self) -> str:
|
42 |
-
"""
|
43 |
-
Returns the str representation of this Specifier like object. This
|
44 |
-
should be representative of the Specifier itself.
|
45 |
-
"""
|
46 |
-
|
47 |
-
@abc.abstractmethod
|
48 |
-
def __hash__(self) -> int:
|
49 |
-
"""
|
50 |
-
Returns a hash value for this Specifier like object.
|
51 |
-
"""
|
52 |
-
|
53 |
-
@abc.abstractmethod
|
54 |
-
def __eq__(self, other: object) -> bool:
|
55 |
-
"""
|
56 |
-
Returns a boolean representing whether or not the two Specifier like
|
57 |
-
objects are equal.
|
58 |
-
"""
|
59 |
-
|
60 |
-
@abc.abstractproperty
|
61 |
-
def prereleases(self) -> Optional[bool]:
|
62 |
-
"""
|
63 |
-
Returns whether or not pre-releases as a whole are allowed by this
|
64 |
-
specifier.
|
65 |
-
"""
|
66 |
-
|
67 |
-
@prereleases.setter
|
68 |
-
def prereleases(self, value: bool) -> None:
|
69 |
-
"""
|
70 |
-
Sets whether or not pre-releases as a whole are allowed by this
|
71 |
-
specifier.
|
72 |
-
"""
|
73 |
-
|
74 |
-
@abc.abstractmethod
|
75 |
-
def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
|
76 |
-
"""
|
77 |
-
Determines if the given item is contained within this specifier.
|
78 |
-
"""
|
79 |
-
|
80 |
-
@abc.abstractmethod
|
81 |
-
def filter(
|
82 |
-
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
83 |
-
) -> Iterable[VersionTypeVar]:
|
84 |
-
"""
|
85 |
-
Takes an iterable of items and filters them so that only items which
|
86 |
-
are contained within this specifier are allowed in it.
|
87 |
-
"""
|
88 |
-
|
89 |
-
|
90 |
-
class _IndividualSpecifier(BaseSpecifier):
|
91 |
-
|
92 |
-
_operators: Dict[str, str] = {}
|
93 |
-
_regex: Pattern[str]
|
94 |
-
|
95 |
-
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
|
96 |
-
match = self._regex.search(spec)
|
97 |
-
if not match:
|
98 |
-
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
|
99 |
-
|
100 |
-
self._spec: Tuple[str, str] = (
|
101 |
-
match.group("operator").strip(),
|
102 |
-
match.group("version").strip(),
|
103 |
-
)
|
104 |
-
|
105 |
-
# Store whether or not this Specifier should accept prereleases
|
106 |
-
self._prereleases = prereleases
|
107 |
-
|
108 |
-
def __repr__(self) -> str:
|
109 |
-
pre = (
|
110 |
-
f", prereleases={self.prereleases!r}"
|
111 |
-
if self._prereleases is not None
|
112 |
-
else ""
|
113 |
-
)
|
114 |
-
|
115 |
-
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
|
116 |
-
|
117 |
-
def __str__(self) -> str:
|
118 |
-
return "{}{}".format(*self._spec)
|
119 |
-
|
120 |
-
@property
|
121 |
-
def _canonical_spec(self) -> Tuple[str, str]:
|
122 |
-
return self._spec[0], canonicalize_version(self._spec[1])
|
123 |
-
|
124 |
-
def __hash__(self) -> int:
|
125 |
-
return hash(self._canonical_spec)
|
126 |
-
|
127 |
-
def __eq__(self, other: object) -> bool:
|
128 |
-
if isinstance(other, str):
|
129 |
-
try:
|
130 |
-
other = self.__class__(str(other))
|
131 |
-
except InvalidSpecifier:
|
132 |
-
return NotImplemented
|
133 |
-
elif not isinstance(other, self.__class__):
|
134 |
-
return NotImplemented
|
135 |
-
|
136 |
-
return self._canonical_spec == other._canonical_spec
|
137 |
-
|
138 |
-
def _get_operator(self, op: str) -> CallableOperator:
|
139 |
-
operator_callable: CallableOperator = getattr(
|
140 |
-
self, f"_compare_{self._operators[op]}"
|
141 |
-
)
|
142 |
-
return operator_callable
|
143 |
-
|
144 |
-
def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
|
145 |
-
if not isinstance(version, (LegacyVersion, Version)):
|
146 |
-
version = parse(version)
|
147 |
-
return version
|
148 |
-
|
149 |
-
@property
|
150 |
-
def operator(self) -> str:
|
151 |
-
return self._spec[0]
|
152 |
-
|
153 |
-
@property
|
154 |
-
def version(self) -> str:
|
155 |
-
return self._spec[1]
|
156 |
-
|
157 |
-
@property
|
158 |
-
def prereleases(self) -> Optional[bool]:
|
159 |
-
return self._prereleases
|
160 |
-
|
161 |
-
@prereleases.setter
|
162 |
-
def prereleases(self, value: bool) -> None:
|
163 |
-
self._prereleases = value
|
164 |
-
|
165 |
-
def __contains__(self, item: str) -> bool:
|
166 |
-
return self.contains(item)
|
167 |
-
|
168 |
-
def contains(
|
169 |
-
self, item: UnparsedVersion, prereleases: Optional[bool] = None
|
170 |
-
) -> bool:
|
171 |
-
|
172 |
-
# Determine if prereleases are to be allowed or not.
|
173 |
-
if prereleases is None:
|
174 |
-
prereleases = self.prereleases
|
175 |
-
|
176 |
-
# Normalize item to a Version or LegacyVersion, this allows us to have
|
177 |
-
# a shortcut for ``"2.0" in Specifier(">=2")
|
178 |
-
normalized_item = self._coerce_version(item)
|
179 |
-
|
180 |
-
# Determine if we should be supporting prereleases in this specifier
|
181 |
-
# or not, if we do not support prereleases than we can short circuit
|
182 |
-
# logic if this version is a prereleases.
|
183 |
-
if normalized_item.is_prerelease and not prereleases:
|
184 |
-
return False
|
185 |
-
|
186 |
-
# Actually do the comparison to determine if this item is contained
|
187 |
-
# within this Specifier or not.
|
188 |
-
operator_callable: CallableOperator = self._get_operator(self.operator)
|
189 |
-
return operator_callable(normalized_item, self.version)
|
190 |
-
|
191 |
-
def filter(
|
192 |
-
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
193 |
-
) -> Iterable[VersionTypeVar]:
|
194 |
-
|
195 |
-
yielded = False
|
196 |
-
found_prereleases = []
|
197 |
-
|
198 |
-
kw = {"prereleases": prereleases if prereleases is not None else True}
|
199 |
-
|
200 |
-
# Attempt to iterate over all the values in the iterable and if any of
|
201 |
-
# them match, yield them.
|
202 |
-
for version in iterable:
|
203 |
-
parsed_version = self._coerce_version(version)
|
204 |
-
|
205 |
-
if self.contains(parsed_version, **kw):
|
206 |
-
# If our version is a prerelease, and we were not set to allow
|
207 |
-
# prereleases, then we'll store it for later in case nothing
|
208 |
-
# else matches this specifier.
|
209 |
-
if parsed_version.is_prerelease and not (
|
210 |
-
prereleases or self.prereleases
|
211 |
-
):
|
212 |
-
found_prereleases.append(version)
|
213 |
-
# Either this is not a prerelease, or we should have been
|
214 |
-
# accepting prereleases from the beginning.
|
215 |
-
else:
|
216 |
-
yielded = True
|
217 |
-
yield version
|
218 |
-
|
219 |
-
# Now that we've iterated over everything, determine if we've yielded
|
220 |
-
# any values, and if we have not and we have any prereleases stored up
|
221 |
-
# then we will go ahead and yield the prereleases.
|
222 |
-
if not yielded and found_prereleases:
|
223 |
-
for version in found_prereleases:
|
224 |
-
yield version
|
225 |
-
|
226 |
-
|
227 |
-
class LegacySpecifier(_IndividualSpecifier):
|
228 |
-
|
229 |
-
_regex_str = r"""
|
230 |
-
(?P<operator>(==|!=|<=|>=|<|>))
|
231 |
-
\s*
|
232 |
-
(?P<version>
|
233 |
-
[^,;\s)]* # Since this is a "legacy" specifier, and the version
|
234 |
-
# string can be just about anything, we match everything
|
235 |
-
# except for whitespace, a semi-colon for marker support,
|
236 |
-
# a closing paren since versions can be enclosed in
|
237 |
-
# them, and a comma since it's a version separator.
|
238 |
-
)
|
239 |
-
"""
|
240 |
-
|
241 |
-
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
242 |
-
|
243 |
-
_operators = {
|
244 |
-
"==": "equal",
|
245 |
-
"!=": "not_equal",
|
246 |
-
"<=": "less_than_equal",
|
247 |
-
">=": "greater_than_equal",
|
248 |
-
"<": "less_than",
|
249 |
-
">": "greater_than",
|
250 |
-
}
|
251 |
-
|
252 |
-
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
|
253 |
-
super().__init__(spec, prereleases)
|
254 |
-
|
255 |
-
warnings.warn(
|
256 |
-
"Creating a LegacyVersion has been deprecated and will be "
|
257 |
-
"removed in the next major release",
|
258 |
-
DeprecationWarning,
|
259 |
-
)
|
260 |
-
|
261 |
-
def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
|
262 |
-
if not isinstance(version, LegacyVersion):
|
263 |
-
version = LegacyVersion(str(version))
|
264 |
-
return version
|
265 |
-
|
266 |
-
def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
267 |
-
return prospective == self._coerce_version(spec)
|
268 |
-
|
269 |
-
def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
270 |
-
return prospective != self._coerce_version(spec)
|
271 |
-
|
272 |
-
def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
273 |
-
return prospective <= self._coerce_version(spec)
|
274 |
-
|
275 |
-
def _compare_greater_than_equal(
|
276 |
-
self, prospective: LegacyVersion, spec: str
|
277 |
-
) -> bool:
|
278 |
-
return prospective >= self._coerce_version(spec)
|
279 |
-
|
280 |
-
def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
|
281 |
-
return prospective < self._coerce_version(spec)
|
282 |
-
|
283 |
-
def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
|
284 |
-
return prospective > self._coerce_version(spec)
|
285 |
-
|
286 |
-
|
287 |
-
def _require_version_compare(
|
288 |
-
fn: Callable[["Specifier", ParsedVersion, str], bool]
|
289 |
-
) -> Callable[["Specifier", ParsedVersion, str], bool]:
|
290 |
-
@functools.wraps(fn)
|
291 |
-
def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
|
292 |
-
if not isinstance(prospective, Version):
|
293 |
-
return False
|
294 |
-
return fn(self, prospective, spec)
|
295 |
-
|
296 |
-
return wrapped
|
297 |
-
|
298 |
-
|
299 |
-
class Specifier(_IndividualSpecifier):
|
300 |
-
|
301 |
-
_regex_str = r"""
|
302 |
-
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
|
303 |
-
(?P<version>
|
304 |
-
(?:
|
305 |
-
# The identity operators allow for an escape hatch that will
|
306 |
-
# do an exact string match of the version you wish to install.
|
307 |
-
# This will not be parsed by PEP 440 and we cannot determine
|
308 |
-
# any semantic meaning from it. This operator is discouraged
|
309 |
-
# but included entirely as an escape hatch.
|
310 |
-
(?<====) # Only match for the identity operator
|
311 |
-
\s*
|
312 |
-
[^\s]* # We just match everything, except for whitespace
|
313 |
-
# since we are only testing for strict identity.
|
314 |
-
)
|
315 |
-
|
|
316 |
-
(?:
|
317 |
-
# The (non)equality operators allow for wild card and local
|
318 |
-
# versions to be specified so we have to define these two
|
319 |
-
# operators separately to enable that.
|
320 |
-
(?<===|!=) # Only match for equals and not equals
|
321 |
-
|
322 |
-
\s*
|
323 |
-
v?
|
324 |
-
(?:[0-9]+!)? # epoch
|
325 |
-
[0-9]+(?:\.[0-9]+)* # release
|
326 |
-
(?: # pre release
|
327 |
-
[-_\.]?
|
328 |
-
(a|b|c|rc|alpha|beta|pre|preview)
|
329 |
-
[-_\.]?
|
330 |
-
[0-9]*
|
331 |
-
)?
|
332 |
-
(?: # post release
|
333 |
-
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
334 |
-
)?
|
335 |
-
|
336 |
-
# You cannot use a wild card and a dev or local version
|
337 |
-
# together so group them with a | and make them optional.
|
338 |
-
(?:
|
339 |
-
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
340 |
-
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
341 |
-
|
|
342 |
-
\.\* # Wild card syntax of .*
|
343 |
-
)?
|
344 |
-
)
|
345 |
-
|
|
346 |
-
(?:
|
347 |
-
# The compatible operator requires at least two digits in the
|
348 |
-
# release segment.
|
349 |
-
(?<=~=) # Only match for the compatible operator
|
350 |
-
|
351 |
-
\s*
|
352 |
-
v?
|
353 |
-
(?:[0-9]+!)? # epoch
|
354 |
-
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
|
355 |
-
(?: # pre release
|
356 |
-
[-_\.]?
|
357 |
-
(a|b|c|rc|alpha|beta|pre|preview)
|
358 |
-
[-_\.]?
|
359 |
-
[0-9]*
|
360 |
-
)?
|
361 |
-
(?: # post release
|
362 |
-
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
363 |
-
)?
|
364 |
-
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
365 |
-
)
|
366 |
-
|
|
367 |
-
(?:
|
368 |
-
# All other operators only allow a sub set of what the
|
369 |
-
# (non)equality operators do. Specifically they do not allow
|
370 |
-
# local versions to be specified nor do they allow the prefix
|
371 |
-
# matching wild cards.
|
372 |
-
(?<!==|!=|~=) # We have special cases for these
|
373 |
-
# operators so we want to make sure they
|
374 |
-
# don't match here.
|
375 |
-
|
376 |
-
\s*
|
377 |
-
v?
|
378 |
-
(?:[0-9]+!)? # epoch
|
379 |
-
[0-9]+(?:\.[0-9]+)* # release
|
380 |
-
(?: # pre release
|
381 |
-
[-_\.]?
|
382 |
-
(a|b|c|rc|alpha|beta|pre|preview)
|
383 |
-
[-_\.]?
|
384 |
-
[0-9]*
|
385 |
-
)?
|
386 |
-
(?: # post release
|
387 |
-
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
388 |
-
)?
|
389 |
-
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
390 |
-
)
|
391 |
-
)
|
392 |
-
"""
|
393 |
-
|
394 |
-
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
395 |
-
|
396 |
-
_operators = {
|
397 |
-
"~=": "compatible",
|
398 |
-
"==": "equal",
|
399 |
-
"!=": "not_equal",
|
400 |
-
"<=": "less_than_equal",
|
401 |
-
">=": "greater_than_equal",
|
402 |
-
"<": "less_than",
|
403 |
-
">": "greater_than",
|
404 |
-
"===": "arbitrary",
|
405 |
-
}
|
406 |
-
|
407 |
-
@_require_version_compare
|
408 |
-
def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
|
409 |
-
|
410 |
-
# Compatible releases have an equivalent combination of >= and ==. That
|
411 |
-
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
|
412 |
-
# implement this in terms of the other specifiers instead of
|
413 |
-
# implementing it ourselves. The only thing we need to do is construct
|
414 |
-
# the other specifiers.
|
415 |
-
|
416 |
-
# We want everything but the last item in the version, but we want to
|
417 |
-
# ignore suffix segments.
|
418 |
-
prefix = ".".join(
|
419 |
-
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
|
420 |
-
)
|
421 |
-
|
422 |
-
# Add the prefix notation to the end of our string
|
423 |
-
prefix += ".*"
|
424 |
-
|
425 |
-
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
|
426 |
-
prospective, prefix
|
427 |
-
)
|
428 |
-
|
429 |
-
@_require_version_compare
|
430 |
-
def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
431 |
-
|
432 |
-
# We need special logic to handle prefix matching
|
433 |
-
if spec.endswith(".*"):
|
434 |
-
# In the case of prefix matching we want to ignore local segment.
|
435 |
-
prospective = Version(prospective.public)
|
436 |
-
# Split the spec out by dots, and pretend that there is an implicit
|
437 |
-
# dot in between a release segment and a pre-release segment.
|
438 |
-
split_spec = _version_split(spec[:-2]) # Remove the trailing .*
|
439 |
-
|
440 |
-
# Split the prospective version out by dots, and pretend that there
|
441 |
-
# is an implicit dot in between a release segment and a pre-release
|
442 |
-
# segment.
|
443 |
-
split_prospective = _version_split(str(prospective))
|
444 |
-
|
445 |
-
# Shorten the prospective version to be the same length as the spec
|
446 |
-
# so that we can determine if the specifier is a prefix of the
|
447 |
-
# prospective version or not.
|
448 |
-
shortened_prospective = split_prospective[: len(split_spec)]
|
449 |
-
|
450 |
-
# Pad out our two sides with zeros so that they both equal the same
|
451 |
-
# length.
|
452 |
-
padded_spec, padded_prospective = _pad_version(
|
453 |
-
split_spec, shortened_prospective
|
454 |
-
)
|
455 |
-
|
456 |
-
return padded_prospective == padded_spec
|
457 |
-
else:
|
458 |
-
# Convert our spec string into a Version
|
459 |
-
spec_version = Version(spec)
|
460 |
-
|
461 |
-
# If the specifier does not have a local segment, then we want to
|
462 |
-
# act as if the prospective version also does not have a local
|
463 |
-
# segment.
|
464 |
-
if not spec_version.local:
|
465 |
-
prospective = Version(prospective.public)
|
466 |
-
|
467 |
-
return prospective == spec_version
|
468 |
-
|
469 |
-
@_require_version_compare
|
470 |
-
def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
471 |
-
return not self._compare_equal(prospective, spec)
|
472 |
-
|
473 |
-
@_require_version_compare
|
474 |
-
def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
475 |
-
|
476 |
-
# NB: Local version identifiers are NOT permitted in the version
|
477 |
-
# specifier, so local version labels can be universally removed from
|
478 |
-
# the prospective version.
|
479 |
-
return Version(prospective.public) <= Version(spec)
|
480 |
-
|
481 |
-
@_require_version_compare
|
482 |
-
def _compare_greater_than_equal(
|
483 |
-
self, prospective: ParsedVersion, spec: str
|
484 |
-
) -> bool:
|
485 |
-
|
486 |
-
# NB: Local version identifiers are NOT permitted in the version
|
487 |
-
# specifier, so local version labels can be universally removed from
|
488 |
-
# the prospective version.
|
489 |
-
return Version(prospective.public) >= Version(spec)
|
490 |
-
|
491 |
-
@_require_version_compare
|
492 |
-
def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
|
493 |
-
|
494 |
-
# Convert our spec to a Version instance, since we'll want to work with
|
495 |
-
# it as a version.
|
496 |
-
spec = Version(spec_str)
|
497 |
-
|
498 |
-
# Check to see if the prospective version is less than the spec
|
499 |
-
# version. If it's not we can short circuit and just return False now
|
500 |
-
# instead of doing extra unneeded work.
|
501 |
-
if not prospective < spec:
|
502 |
-
return False
|
503 |
-
|
504 |
-
# This special case is here so that, unless the specifier itself
|
505 |
-
# includes is a pre-release version, that we do not accept pre-release
|
506 |
-
# versions for the version mentioned in the specifier (e.g. <3.1 should
|
507 |
-
# not match 3.1.dev0, but should match 3.0.dev0).
|
508 |
-
if not spec.is_prerelease and prospective.is_prerelease:
|
509 |
-
if Version(prospective.base_version) == Version(spec.base_version):
|
510 |
-
return False
|
511 |
-
|
512 |
-
# If we've gotten to here, it means that prospective version is both
|
513 |
-
# less than the spec version *and* it's not a pre-release of the same
|
514 |
-
# version in the spec.
|
515 |
-
return True
|
516 |
-
|
517 |
-
@_require_version_compare
|
518 |
-
def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
|
519 |
-
|
520 |
-
# Convert our spec to a Version instance, since we'll want to work with
|
521 |
-
# it as a version.
|
522 |
-
spec = Version(spec_str)
|
523 |
-
|
524 |
-
# Check to see if the prospective version is greater than the spec
|
525 |
-
# version. If it's not we can short circuit and just return False now
|
526 |
-
# instead of doing extra unneeded work.
|
527 |
-
if not prospective > spec:
|
528 |
-
return False
|
529 |
-
|
530 |
-
# This special case is here so that, unless the specifier itself
|
531 |
-
# includes is a post-release version, that we do not accept
|
532 |
-
# post-release versions for the version mentioned in the specifier
|
533 |
-
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
|
534 |
-
if not spec.is_postrelease and prospective.is_postrelease:
|
535 |
-
if Version(prospective.base_version) == Version(spec.base_version):
|
536 |
-
return False
|
537 |
-
|
538 |
-
# Ensure that we do not allow a local version of the version mentioned
|
539 |
-
# in the specifier, which is technically greater than, to match.
|
540 |
-
if prospective.local is not None:
|
541 |
-
if Version(prospective.base_version) == Version(spec.base_version):
|
542 |
-
return False
|
543 |
-
|
544 |
-
# If we've gotten to here, it means that prospective version is both
|
545 |
-
# greater than the spec version *and* it's not a pre-release of the
|
546 |
-
# same version in the spec.
|
547 |
-
return True
|
548 |
-
|
549 |
-
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
|
550 |
-
return str(prospective).lower() == str(spec).lower()
|
551 |
-
|
552 |
-
@property
|
553 |
-
def prereleases(self) -> bool:
|
554 |
-
|
555 |
-
# If there is an explicit prereleases set for this, then we'll just
|
556 |
-
# blindly use that.
|
557 |
-
if self._prereleases is not None:
|
558 |
-
return self._prereleases
|
559 |
-
|
560 |
-
# Look at all of our specifiers and determine if they are inclusive
|
561 |
-
# operators, and if they are if they are including an explicit
|
562 |
-
# prerelease.
|
563 |
-
operator, version = self._spec
|
564 |
-
if operator in ["==", ">=", "<=", "~=", "==="]:
|
565 |
-
# The == specifier can include a trailing .*, if it does we
|
566 |
-
# want to remove before parsing.
|
567 |
-
if operator == "==" and version.endswith(".*"):
|
568 |
-
version = version[:-2]
|
569 |
-
|
570 |
-
# Parse the version, and if it is a pre-release than this
|
571 |
-
# specifier allows pre-releases.
|
572 |
-
if parse(version).is_prerelease:
|
573 |
-
return True
|
574 |
-
|
575 |
-
return False
|
576 |
-
|
577 |
-
@prereleases.setter
|
578 |
-
def prereleases(self, value: bool) -> None:
|
579 |
-
self._prereleases = value
|
580 |
-
|
581 |
-
|
582 |
-
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
|
583 |
-
|
584 |
-
|
585 |
-
def _version_split(version: str) -> List[str]:
|
586 |
-
result: List[str] = []
|
587 |
-
for item in version.split("."):
|
588 |
-
match = _prefix_regex.search(item)
|
589 |
-
if match:
|
590 |
-
result.extend(match.groups())
|
591 |
-
else:
|
592 |
-
result.append(item)
|
593 |
-
return result
|
594 |
-
|
595 |
-
|
596 |
-
def _is_not_suffix(segment: str) -> bool:
|
597 |
-
return not any(
|
598 |
-
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
|
599 |
-
)
|
600 |
-
|
601 |
-
|
602 |
-
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
|
603 |
-
left_split, right_split = [], []
|
604 |
-
|
605 |
-
# Get the release segment of our versions
|
606 |
-
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
|
607 |
-
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
|
608 |
-
|
609 |
-
# Get the rest of our versions
|
610 |
-
left_split.append(left[len(left_split[0]) :])
|
611 |
-
right_split.append(right[len(right_split[0]) :])
|
612 |
-
|
613 |
-
# Insert our padding
|
614 |
-
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
|
615 |
-
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
|
616 |
-
|
617 |
-
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
|
618 |
-
|
619 |
-
|
620 |
-
class SpecifierSet(BaseSpecifier):
|
621 |
-
def __init__(
|
622 |
-
self, specifiers: str = "", prereleases: Optional[bool] = None
|
623 |
-
) -> None:
|
624 |
-
|
625 |
-
# Split on , to break each individual specifier into it's own item, and
|
626 |
-
# strip each item to remove leading/trailing whitespace.
|
627 |
-
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
|
628 |
-
|
629 |
-
# Parsed each individual specifier, attempting first to make it a
|
630 |
-
# Specifier and falling back to a LegacySpecifier.
|
631 |
-
parsed: Set[_IndividualSpecifier] = set()
|
632 |
-
for specifier in split_specifiers:
|
633 |
-
try:
|
634 |
-
parsed.add(Specifier(specifier))
|
635 |
-
except InvalidSpecifier:
|
636 |
-
parsed.add(LegacySpecifier(specifier))
|
637 |
-
|
638 |
-
# Turn our parsed specifiers into a frozen set and save them for later.
|
639 |
-
self._specs = frozenset(parsed)
|
640 |
-
|
641 |
-
# Store our prereleases value so we can use it later to determine if
|
642 |
-
# we accept prereleases or not.
|
643 |
-
self._prereleases = prereleases
|
644 |
-
|
645 |
-
def __repr__(self) -> str:
|
646 |
-
pre = (
|
647 |
-
f", prereleases={self.prereleases!r}"
|
648 |
-
if self._prereleases is not None
|
649 |
-
else ""
|
650 |
-
)
|
651 |
-
|
652 |
-
return f"<SpecifierSet({str(self)!r}{pre})>"
|
653 |
-
|
654 |
-
def __str__(self) -> str:
|
655 |
-
return ",".join(sorted(str(s) for s in self._specs))
|
656 |
-
|
657 |
-
def __hash__(self) -> int:
|
658 |
-
return hash(self._specs)
|
659 |
-
|
660 |
-
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
|
661 |
-
if isinstance(other, str):
|
662 |
-
other = SpecifierSet(other)
|
663 |
-
elif not isinstance(other, SpecifierSet):
|
664 |
-
return NotImplemented
|
665 |
-
|
666 |
-
specifier = SpecifierSet()
|
667 |
-
specifier._specs = frozenset(self._specs | other._specs)
|
668 |
-
|
669 |
-
if self._prereleases is None and other._prereleases is not None:
|
670 |
-
specifier._prereleases = other._prereleases
|
671 |
-
elif self._prereleases is not None and other._prereleases is None:
|
672 |
-
specifier._prereleases = self._prereleases
|
673 |
-
elif self._prereleases == other._prereleases:
|
674 |
-
specifier._prereleases = self._prereleases
|
675 |
-
else:
|
676 |
-
raise ValueError(
|
677 |
-
"Cannot combine SpecifierSets with True and False prerelease "
|
678 |
-
"overrides."
|
679 |
-
)
|
680 |
-
|
681 |
-
return specifier
|
682 |
-
|
683 |
-
def __eq__(self, other: object) -> bool:
|
684 |
-
if isinstance(other, (str, _IndividualSpecifier)):
|
685 |
-
other = SpecifierSet(str(other))
|
686 |
-
elif not isinstance(other, SpecifierSet):
|
687 |
-
return NotImplemented
|
688 |
-
|
689 |
-
return self._specs == other._specs
|
690 |
-
|
691 |
-
def __len__(self) -> int:
|
692 |
-
return len(self._specs)
|
693 |
-
|
694 |
-
def __iter__(self) -> Iterator[_IndividualSpecifier]:
|
695 |
-
return iter(self._specs)
|
696 |
-
|
697 |
-
@property
|
698 |
-
def prereleases(self) -> Optional[bool]:
|
699 |
-
|
700 |
-
# If we have been given an explicit prerelease modifier, then we'll
|
701 |
-
# pass that through here.
|
702 |
-
if self._prereleases is not None:
|
703 |
-
return self._prereleases
|
704 |
-
|
705 |
-
# If we don't have any specifiers, and we don't have a forced value,
|
706 |
-
# then we'll just return None since we don't know if this should have
|
707 |
-
# pre-releases or not.
|
708 |
-
if not self._specs:
|
709 |
-
return None
|
710 |
-
|
711 |
-
# Otherwise we'll see if any of the given specifiers accept
|
712 |
-
# prereleases, if any of them do we'll return True, otherwise False.
|
713 |
-
return any(s.prereleases for s in self._specs)
|
714 |
-
|
715 |
-
@prereleases.setter
|
716 |
-
def prereleases(self, value: bool) -> None:
|
717 |
-
self._prereleases = value
|
718 |
-
|
719 |
-
def __contains__(self, item: UnparsedVersion) -> bool:
|
720 |
-
return self.contains(item)
|
721 |
-
|
722 |
-
def contains(
|
723 |
-
self, item: UnparsedVersion, prereleases: Optional[bool] = None
|
724 |
-
) -> bool:
|
725 |
-
|
726 |
-
# Ensure that our item is a Version or LegacyVersion instance.
|
727 |
-
if not isinstance(item, (LegacyVersion, Version)):
|
728 |
-
item = parse(item)
|
729 |
-
|
730 |
-
# Determine if we're forcing a prerelease or not, if we're not forcing
|
731 |
-
# one for this particular filter call, then we'll use whatever the
|
732 |
-
# SpecifierSet thinks for whether or not we should support prereleases.
|
733 |
-
if prereleases is None:
|
734 |
-
prereleases = self.prereleases
|
735 |
-
|
736 |
-
# We can determine if we're going to allow pre-releases by looking to
|
737 |
-
# see if any of the underlying items supports them. If none of them do
|
738 |
-
# and this item is a pre-release then we do not allow it and we can
|
739 |
-
# short circuit that here.
|
740 |
-
# Note: This means that 1.0.dev1 would not be contained in something
|
741 |
-
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
|
742 |
-
if not prereleases and item.is_prerelease:
|
743 |
-
return False
|
744 |
-
|
745 |
-
# We simply dispatch to the underlying specs here to make sure that the
|
746 |
-
# given version is contained within all of them.
|
747 |
-
# Note: This use of all() here means that an empty set of specifiers
|
748 |
-
# will always return True, this is an explicit design decision.
|
749 |
-
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
|
750 |
-
|
751 |
-
def filter(
|
752 |
-
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
753 |
-
) -> Iterable[VersionTypeVar]:
|
754 |
-
|
755 |
-
# Determine if we're forcing a prerelease or not, if we're not forcing
|
756 |
-
# one for this particular filter call, then we'll use whatever the
|
757 |
-
# SpecifierSet thinks for whether or not we should support prereleases.
|
758 |
-
if prereleases is None:
|
759 |
-
prereleases = self.prereleases
|
760 |
-
|
761 |
-
# If we have any specifiers, then we want to wrap our iterable in the
|
762 |
-
# filter method for each one, this will act as a logical AND amongst
|
763 |
-
# each specifier.
|
764 |
-
if self._specs:
|
765 |
-
for spec in self._specs:
|
766 |
-
iterable = spec.filter(iterable, prereleases=bool(prereleases))
|
767 |
-
return iterable
|
768 |
-
# If we do not have any specifiers, then we need to have a rough filter
|
769 |
-
# which will filter out any pre-releases, unless there are no final
|
770 |
-
# releases, and which will filter out LegacyVersion in general.
|
771 |
-
else:
|
772 |
-
filtered: List[VersionTypeVar] = []
|
773 |
-
found_prereleases: List[VersionTypeVar] = []
|
774 |
-
|
775 |
-
item: UnparsedVersion
|
776 |
-
parsed_version: Union[Version, LegacyVersion]
|
777 |
-
|
778 |
-
for item in iterable:
|
779 |
-
# Ensure that we some kind of Version class for this item.
|
780 |
-
if not isinstance(item, (LegacyVersion, Version)):
|
781 |
-
parsed_version = parse(item)
|
782 |
-
else:
|
783 |
-
parsed_version = item
|
784 |
-
|
785 |
-
# Filter out any item which is parsed as a LegacyVersion
|
786 |
-
if isinstance(parsed_version, LegacyVersion):
|
787 |
-
continue
|
788 |
-
|
789 |
-
# Store any item which is a pre-release for later unless we've
|
790 |
-
# already found a final version or we are accepting prereleases
|
791 |
-
if parsed_version.is_prerelease and not prereleases:
|
792 |
-
if not filtered:
|
793 |
-
found_prereleases.append(item)
|
794 |
-
else:
|
795 |
-
filtered.append(item)
|
796 |
-
|
797 |
-
# If we've found no items except for pre-releases, then we'll go
|
798 |
-
# ahead and use the pre-releases
|
799 |
-
if not filtered and found_prereleases and prereleases is None:
|
800 |
-
return found_prereleases
|
801 |
-
|
802 |
-
return filtered
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigSalmon/TestAnyGPTModel/app.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
|
3 |
-
import torch
|
4 |
-
|
5 |
-
first = """It is a wonderful day to"""
|
6 |
-
|
7 |
-
|
8 |
-
name_of_model = st.text_input("Name of the model you want to run", "gpt2")
|
9 |
-
|
10 |
-
@st.cache(allow_output_mutation=True)
|
11 |
-
def get_model(name_of_model):
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
13 |
-
model = AutoModelForCausalLM.from_pretrained(name_of_model)
|
14 |
-
return model, tokenizer
|
15 |
-
|
16 |
-
model, tokenizer = get_model(name_of_model)
|
17 |
-
temp = st.sidebar.slider("Temperature", 0.7, 1.5)
|
18 |
-
number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 50)
|
19 |
-
lengths = st.sidebar.slider("Length", 3, 500)
|
20 |
-
bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
|
21 |
-
logs_outputs = st.sidebar.slider("Logit Outputs", 50, 300)
|
22 |
-
|
23 |
-
def run_generate(text, bad_words):
|
24 |
-
yo = []
|
25 |
-
input_ids = tokenizer.encode(text, return_tensors='pt')
|
26 |
-
res = len(tokenizer.encode(text))
|
27 |
-
bad_words = bad_words.split()
|
28 |
-
bad_word_ids = []
|
29 |
-
for bad_word in bad_words:
|
30 |
-
bad_word = " " + bad_word
|
31 |
-
ids = tokenizer(bad_word).input_ids
|
32 |
-
bad_word_ids.append(ids)
|
33 |
-
sample_outputs = model.generate(
|
34 |
-
input_ids,
|
35 |
-
do_sample=True,
|
36 |
-
max_length= res + lengths,
|
37 |
-
min_length = res + lengths,
|
38 |
-
top_k=50,
|
39 |
-
temperature=temp,
|
40 |
-
num_return_sequences=number_of_outputs,
|
41 |
-
bad_words_ids=bad_word_ids
|
42 |
-
)
|
43 |
-
for i in range(number_of_outputs):
|
44 |
-
e = tokenizer.decode(sample_outputs[i])
|
45 |
-
e = e.replace(text, "")
|
46 |
-
yo.append(e)
|
47 |
-
return yo
|
48 |
-
with st.form(key='my_form'):
|
49 |
-
text = st.text_area(label='Enter sentence', value=first)
|
50 |
-
submit_button = st.form_submit_button(label='Submit')
|
51 |
-
submit_button2 = st.form_submit_button(label='Submit Log Probs')
|
52 |
-
if submit_button:
|
53 |
-
translated_text = run_generate(text, bad_words)
|
54 |
-
st.write(translated_text if translated_text else "No translation found")
|
55 |
-
if submit_button2:
|
56 |
-
with torch.no_grad():
|
57 |
-
text2 = str(text)
|
58 |
-
print(text2)
|
59 |
-
text3 = tokenizer.encode(text2)
|
60 |
-
myinput, past_key_values = torch.tensor([text3]), None
|
61 |
-
myinput = myinput
|
62 |
-
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
63 |
-
logits = logits[0,-1]
|
64 |
-
probabilities = torch.nn.functional.softmax(logits)
|
65 |
-
best_logits, best_indices = logits.topk(logs_outputs)
|
66 |
-
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
67 |
-
st.write(best_words)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CALM/Dashboard/perso/get_usernames.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
|
3 |
-
with open(
|
4 |
-
"/mnt/storage/Documents/hugging_face/colaborative_hub_training/demo_neurips/training-transformers-together-dashboard/data/"
|
5 |
-
"serializaledata_V2.json",
|
6 |
-
"r",
|
7 |
-
) as f:
|
8 |
-
serialized_data = json.load(f)
|
9 |
-
|
10 |
-
usernames = []
|
11 |
-
for item in serialized_data["points"][0]:
|
12 |
-
usernames.append(item["profileId"])
|
13 |
-
|
14 |
-
print(usernames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/uninitialized_copy.h
DELETED
@@ -1,303 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file uninitialized_copy.h
|
19 |
-
* \brief Copy construction into a range of uninitialized elements from a source range
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/detail/execution_policy.h>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
|
30 |
-
|
31 |
-
/*! \addtogroup copying
|
32 |
-
* \{
|
33 |
-
*/
|
34 |
-
|
35 |
-
|
36 |
-
/*! In \c thrust, the function \c thrust::device_new allocates memory for
|
37 |
-
* an object and then creates an object at that location by calling a constructor.
|
38 |
-
* Occasionally, however, it is useful to separate those two operations.
|
39 |
-
* If each iterator in the range <tt>[result, result + (last - first))</tt> points
|
40 |
-
* to uninitialized memory, then \p uninitialized_copy creates a copy of
|
41 |
-
* <tt>[first, last)</tt> in that range. That is, for each iterator \c i in
|
42 |
-
* the input, \p uninitialized_copy creates a copy of \c *i in the location pointed
|
43 |
-
* to by the corresponding iterator in the output range by \p ForwardIterator's
|
44 |
-
* \c value_type's copy constructor with *i as its argument.
|
45 |
-
*
|
46 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
47 |
-
*
|
48 |
-
* \param exec The execution policy to use for parallelization.
|
49 |
-
* \param first The first element of the input range to copy from.
|
50 |
-
* \param last The last element of the input range to copy from.
|
51 |
-
* \param result The first element of the output range to copy to.
|
52 |
-
* \return An iterator pointing to the last element of the output range.
|
53 |
-
*
|
54 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
55 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
|
56 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
|
57 |
-
* \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that takes
|
58 |
-
* a single argument whose type is \p InputIterator's \c value_type.
|
59 |
-
*
|
60 |
-
* \pre \p first may equal \p result, but the range <tt>[first, last)</tt> and the range <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
|
61 |
-
*
|
62 |
-
* The following code snippet demonstrates how to use \p uninitialized_copy to initialize
|
63 |
-
* a range of uninitialized memory using the \p thrust::device execution policy for
|
64 |
-
* parallelization:
|
65 |
-
*
|
66 |
-
* \code
|
67 |
-
* #include <thrust/uninitialized_copy.h>
|
68 |
-
* #include <thrust/device_malloc.h>
|
69 |
-
* #include <thrust/device_vector.h>
|
70 |
-
* #include <thrust/execution_policy.h>
|
71 |
-
*
|
72 |
-
* struct Int
|
73 |
-
* {
|
74 |
-
* __host__ __device__
|
75 |
-
* Int(int x) : val(x) {}
|
76 |
-
* int val;
|
77 |
-
* };
|
78 |
-
* ...
|
79 |
-
* const int N = 137;
|
80 |
-
*
|
81 |
-
* Int val(46);
|
82 |
-
* thrust::device_vector<Int> input(N, val);
|
83 |
-
* thrust::device_ptr<Int> array = thrust::device_malloc<Int>(N);
|
84 |
-
* thrust::uninitialized_copy(thrust::device, input.begin(), input.end(), array);
|
85 |
-
*
|
86 |
-
* // Int x = array[i];
|
87 |
-
* // x.val == 46 for all 0 <= i < N
|
88 |
-
* \endcode
|
89 |
-
*
|
90 |
-
* \see http://www.sgi.com/tech/stl/uninitialized_copy.html
|
91 |
-
* \see \c copy
|
92 |
-
* \see \c uninitialized_fill
|
93 |
-
* \see \c device_new
|
94 |
-
* \see \c device_malloc
|
95 |
-
*/
|
96 |
-
template<typename DerivedPolicy, typename InputIterator, typename ForwardIterator>
|
97 |
-
__host__ __device__
|
98 |
-
ForwardIterator uninitialized_copy(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
99 |
-
InputIterator first,
|
100 |
-
InputIterator last,
|
101 |
-
ForwardIterator result);
|
102 |
-
|
103 |
-
|
104 |
-
/*! In \c thrust, the function \c thrust::device_new allocates memory for
|
105 |
-
* an object and then creates an object at that location by calling a constructor.
|
106 |
-
* Occasionally, however, it is useful to separate those two operations.
|
107 |
-
* If each iterator in the range <tt>[result, result + (last - first))</tt> points
|
108 |
-
* to uninitialized memory, then \p uninitialized_copy creates a copy of
|
109 |
-
* <tt>[first, last)</tt> in that range. That is, for each iterator \c i in
|
110 |
-
* the input, \p uninitialized_copy creates a copy of \c *i in the location pointed
|
111 |
-
* to by the corresponding iterator in the output range by \p ForwardIterator's
|
112 |
-
* \c value_type's copy constructor with *i as its argument.
|
113 |
-
*
|
114 |
-
* \param first The first element of the input range to copy from.
|
115 |
-
* \param last The last element of the input range to copy from.
|
116 |
-
* \param result The first element of the output range to copy to.
|
117 |
-
* \return An iterator pointing to the last element of the output range.
|
118 |
-
*
|
119 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
|
120 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
|
121 |
-
* \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that takes
|
122 |
-
* a single argument whose type is \p InputIterator's \c value_type.
|
123 |
-
*
|
124 |
-
* \pre \p first may equal \p result, but the range <tt>[first, last)</tt> and the range <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
|
125 |
-
*
|
126 |
-
* The following code snippet demonstrates how to use \p uninitialized_copy to initialize
|
127 |
-
* a range of uninitialized memory.
|
128 |
-
*
|
129 |
-
* \code
|
130 |
-
* #include <thrust/uninitialized_copy.h>
|
131 |
-
* #include <thrust/device_malloc.h>
|
132 |
-
* #include <thrust/device_vector.h>
|
133 |
-
*
|
134 |
-
* struct Int
|
135 |
-
* {
|
136 |
-
* __host__ __device__
|
137 |
-
* Int(int x) : val(x) {}
|
138 |
-
* int val;
|
139 |
-
* };
|
140 |
-
* ...
|
141 |
-
* const int N = 137;
|
142 |
-
*
|
143 |
-
* Int val(46);
|
144 |
-
* thrust::device_vector<Int> input(N, val);
|
145 |
-
* thrust::device_ptr<Int> array = thrust::device_malloc<Int>(N);
|
146 |
-
* thrust::uninitialized_copy(input.begin(), input.end(), array);
|
147 |
-
*
|
148 |
-
* // Int x = array[i];
|
149 |
-
* // x.val == 46 for all 0 <= i < N
|
150 |
-
* \endcode
|
151 |
-
*
|
152 |
-
* \see http://www.sgi.com/tech/stl/uninitialized_copy.html
|
153 |
-
* \see \c copy
|
154 |
-
* \see \c uninitialized_fill
|
155 |
-
* \see \c device_new
|
156 |
-
* \see \c device_malloc
|
157 |
-
*/
|
158 |
-
template<typename InputIterator, typename ForwardIterator>
|
159 |
-
ForwardIterator uninitialized_copy(InputIterator first,
|
160 |
-
InputIterator last,
|
161 |
-
ForwardIterator result);
|
162 |
-
|
163 |
-
|
164 |
-
/*! In \c thrust, the function \c thrust::device_new allocates memory for
|
165 |
-
* an object and then creates an object at that location by calling a constructor.
|
166 |
-
* Occasionally, however, it is useful to separate those two operations.
|
167 |
-
* If each iterator in the range <tt>[result, result + n)</tt> points
|
168 |
-
* to uninitialized memory, then \p uninitialized_copy_n creates a copy of
|
169 |
-
* <tt>[first, first + n)</tt> in that range. That is, for each iterator \c i in
|
170 |
-
* the input, \p uninitialized_copy_n creates a copy of \c *i in the location pointed
|
171 |
-
* to by the corresponding iterator in the output range by \p InputIterator's
|
172 |
-
* \c value_type's copy constructor with *i as its argument.
|
173 |
-
*
|
174 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
175 |
-
*
|
176 |
-
* \param exec The execution policy to use for parallelization.
|
177 |
-
* \param first The first element of the input range to copy from.
|
178 |
-
* \param n The number of elements to copy.
|
179 |
-
* \param result The first element of the output range to copy to.
|
180 |
-
* \return An iterator pointing to the last element of the output range.
|
181 |
-
*
|
182 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
183 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
|
184 |
-
* \tparam Size is an integral type.
|
185 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
|
186 |
-
* \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that takes
|
187 |
-
* a single argument whose type is \p InputIterator's \c value_type.
|
188 |
-
*
|
189 |
-
* \pre \p first may equal \p result, but the range <tt>[first, first + n)</tt> and the range <tt>[result, result + n)</tt> shall not overlap otherwise.
|
190 |
-
*
|
191 |
-
* The following code snippet demonstrates how to use \p uninitialized_copy to initialize
|
192 |
-
* a range of uninitialized memory using the \p thrust::device execution policy for
|
193 |
-
* parallelization:
|
194 |
-
*
|
195 |
-
* \code
|
196 |
-
* #include <thrust/uninitialized_copy.h>
|
197 |
-
* #include <thrust/device_malloc.h>
|
198 |
-
* #include <thrust/device_vector.h>
|
199 |
-
* #include <thrust/execution_policy.h>
|
200 |
-
*
|
201 |
-
* struct Int
|
202 |
-
* {
|
203 |
-
* __host__ __device__
|
204 |
-
* Int(int x) : val(x) {}
|
205 |
-
* int val;
|
206 |
-
* };
|
207 |
-
* ...
|
208 |
-
* const int N = 137;
|
209 |
-
*
|
210 |
-
* Int val(46);
|
211 |
-
* thrust::device_vector<Int> input(N, val);
|
212 |
-
* thrust::device_ptr<Int> array = thrust::device_malloc<Int>(N);
|
213 |
-
* thrust::uninitialized_copy_n(thrust::device, input.begin(), N, array);
|
214 |
-
*
|
215 |
-
* // Int x = array[i];
|
216 |
-
* // x.val == 46 for all 0 <= i < N
|
217 |
-
* \endcode
|
218 |
-
*
|
219 |
-
* \see http://www.sgi.com/tech/stl/uninitialized_copy.html
|
220 |
-
* \see \c uninitialized_copy
|
221 |
-
* \see \c copy
|
222 |
-
* \see \c uninitialized_fill
|
223 |
-
* \see \c device_new
|
224 |
-
* \see \c device_malloc
|
225 |
-
*/
|
226 |
-
template<typename DerivedPolicy, typename InputIterator, typename Size, typename ForwardIterator>
|
227 |
-
__host__ __device__
|
228 |
-
ForwardIterator uninitialized_copy_n(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
229 |
-
InputIterator first,
|
230 |
-
Size n,
|
231 |
-
ForwardIterator result);
|
232 |
-
|
233 |
-
|
234 |
-
/*! In \c thrust, the function \c thrust::device_new allocates memory for
|
235 |
-
* an object and then creates an object at that location by calling a constructor.
|
236 |
-
* Occasionally, however, it is useful to separate those two operations.
|
237 |
-
* If each iterator in the range <tt>[result, result + n)</tt> points
|
238 |
-
* to uninitialized memory, then \p uninitialized_copy_n creates a copy of
|
239 |
-
* <tt>[first, first + n)</tt> in that range. That is, for each iterator \c i in
|
240 |
-
* the input, \p uninitialized_copy_n creates a copy of \c *i in the location pointed
|
241 |
-
* to by the corresponding iterator in the output range by \p InputIterator's
|
242 |
-
* \c value_type's copy constructor with *i as its argument.
|
243 |
-
*
|
244 |
-
* \param first The first element of the input range to copy from.
|
245 |
-
* \param n The number of elements to copy.
|
246 |
-
* \param result The first element of the output range to copy to.
|
247 |
-
* \return An iterator pointing to the last element of the output range.
|
248 |
-
*
|
249 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
|
250 |
-
* \tparam Size is an integral type.
|
251 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
|
252 |
-
* \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that takes
|
253 |
-
* a single argument whose type is \p InputIterator's \c value_type.
|
254 |
-
*
|
255 |
-
* \pre \p first may equal \p result, but the range <tt>[first, first + n)</tt> and the range <tt>[result, result + n)</tt> shall not overlap otherwise.
|
256 |
-
*
|
257 |
-
* The following code snippet demonstrates how to use \p uninitialized_copy to initialize
|
258 |
-
* a range of uninitialized memory.
|
259 |
-
*
|
260 |
-
* \code
|
261 |
-
* #include <thrust/uninitialized_copy.h>
|
262 |
-
* #include <thrust/device_malloc.h>
|
263 |
-
* #include <thrust/device_vector.h>
|
264 |
-
*
|
265 |
-
* struct Int
|
266 |
-
* {
|
267 |
-
* __host__ __device__
|
268 |
-
* Int(int x) : val(x) {}
|
269 |
-
* int val;
|
270 |
-
* };
|
271 |
-
* ...
|
272 |
-
* const int N = 137;
|
273 |
-
*
|
274 |
-
* Int val(46);
|
275 |
-
* thrust::device_vector<Int> input(N, val);
|
276 |
-
* thrust::device_ptr<Int> array = thrust::device_malloc<Int>(N);
|
277 |
-
* thrust::uninitialized_copy_n(input.begin(), N, array);
|
278 |
-
*
|
279 |
-
* // Int x = array[i];
|
280 |
-
* // x.val == 46 for all 0 <= i < N
|
281 |
-
* \endcode
|
282 |
-
*
|
283 |
-
* \see http://www.sgi.com/tech/stl/uninitialized_copy.html
|
284 |
-
* \see \c uninitialized_copy
|
285 |
-
* \see \c copy
|
286 |
-
* \see \c uninitialized_fill
|
287 |
-
* \see \c device_new
|
288 |
-
* \see \c device_malloc
|
289 |
-
*/
|
290 |
-
template<typename InputIterator, typename Size, typename ForwardIterator>
|
291 |
-
ForwardIterator uninitialized_copy_n(InputIterator first,
|
292 |
-
Size n,
|
293 |
-
ForwardIterator result);
|
294 |
-
|
295 |
-
|
296 |
-
/*! \} // copying
|
297 |
-
*/
|
298 |
-
|
299 |
-
|
300 |
-
} // end thrust
|
301 |
-
|
302 |
-
#include <thrust/detail/uninitialized_copy.inl>
|
303 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/monoscene_lite/monoscene/flosp.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
|
5 |
-
class FLoSP(nn.Module):
|
6 |
-
def __init__(self, scene_size, dataset, project_scale):
|
7 |
-
super().__init__()
|
8 |
-
self.scene_size = scene_size
|
9 |
-
self.dataset = dataset
|
10 |
-
self.project_scale = project_scale
|
11 |
-
|
12 |
-
def forward(self, x2d, projected_pix, fov_mask):
|
13 |
-
c, h, w = x2d.shape
|
14 |
-
|
15 |
-
src = x2d.view(c, -1)
|
16 |
-
zeros_vec = torch.zeros(c, 1).type_as(src)
|
17 |
-
src = torch.cat([src, zeros_vec], 1)
|
18 |
-
|
19 |
-
pix_x, pix_y = projected_pix[:, 0], projected_pix[:, 1]
|
20 |
-
img_indices = pix_y * w + pix_x
|
21 |
-
img_indices[~fov_mask] = h * w
|
22 |
-
img_indices = img_indices.expand(c, -1).long() # c, HWD
|
23 |
-
src_feature = torch.gather(src, 1, img_indices)
|
24 |
-
|
25 |
-
if self.dataset == "NYU":
|
26 |
-
x3d = src_feature.reshape(
|
27 |
-
c,
|
28 |
-
self.scene_size[0] // self.project_scale,
|
29 |
-
self.scene_size[2] // self.project_scale,
|
30 |
-
self.scene_size[1] // self.project_scale,
|
31 |
-
)
|
32 |
-
x3d = x3d.permute(0, 1, 3, 2)
|
33 |
-
elif self.dataset == "kitti":
|
34 |
-
x3d = src_feature.reshape(
|
35 |
-
c,
|
36 |
-
self.scene_size[0] // self.project_scale,
|
37 |
-
self.scene_size[1] // self.project_scale,
|
38 |
-
self.scene_size[2] // self.project_scale,
|
39 |
-
)
|
40 |
-
|
41 |
-
return x3d
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/misc.py
DELETED
@@ -1,717 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
"""
|
3 |
-
Misc functions, including distributed helpers.
|
4 |
-
|
5 |
-
Mostly copy-paste from torchvision references.
|
6 |
-
"""
|
7 |
-
import colorsys
|
8 |
-
import datetime
|
9 |
-
import functools
|
10 |
-
import io
|
11 |
-
import json
|
12 |
-
import os
|
13 |
-
import pickle
|
14 |
-
import subprocess
|
15 |
-
import time
|
16 |
-
from collections import OrderedDict, defaultdict, deque
|
17 |
-
from typing import List, Optional
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import torch
|
21 |
-
import torch.distributed as dist
|
22 |
-
|
23 |
-
# needed due to empty tensor bug in pytorch and torchvision 0.5
|
24 |
-
import torchvision
|
25 |
-
from torch import Tensor
|
26 |
-
|
27 |
-
__torchvision_need_compat_flag = float(torchvision.__version__.split(".")[1]) < 7
|
28 |
-
if __torchvision_need_compat_flag:
|
29 |
-
from torchvision.ops import _new_empty_tensor
|
30 |
-
from torchvision.ops.misc import _output_size
|
31 |
-
|
32 |
-
|
33 |
-
class SmoothedValue(object):
|
34 |
-
"""Track a series of values and provide access to smoothed values over a
|
35 |
-
window or the global series average.
|
36 |
-
"""
|
37 |
-
|
38 |
-
def __init__(self, window_size=20, fmt=None):
|
39 |
-
if fmt is None:
|
40 |
-
fmt = "{median:.4f} ({global_avg:.4f})"
|
41 |
-
self.deque = deque(maxlen=window_size)
|
42 |
-
self.total = 0.0
|
43 |
-
self.count = 0
|
44 |
-
self.fmt = fmt
|
45 |
-
|
46 |
-
def update(self, value, n=1):
|
47 |
-
self.deque.append(value)
|
48 |
-
self.count += n
|
49 |
-
self.total += value * n
|
50 |
-
|
51 |
-
def synchronize_between_processes(self):
|
52 |
-
"""
|
53 |
-
Warning: does not synchronize the deque!
|
54 |
-
"""
|
55 |
-
if not is_dist_avail_and_initialized():
|
56 |
-
return
|
57 |
-
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
|
58 |
-
dist.barrier()
|
59 |
-
dist.all_reduce(t)
|
60 |
-
t = t.tolist()
|
61 |
-
self.count = int(t[0])
|
62 |
-
self.total = t[1]
|
63 |
-
|
64 |
-
@property
|
65 |
-
def median(self):
|
66 |
-
d = torch.tensor(list(self.deque))
|
67 |
-
if d.shape[0] == 0:
|
68 |
-
return 0
|
69 |
-
return d.median().item()
|
70 |
-
|
71 |
-
@property
|
72 |
-
def avg(self):
|
73 |
-
d = torch.tensor(list(self.deque), dtype=torch.float32)
|
74 |
-
return d.mean().item()
|
75 |
-
|
76 |
-
@property
|
77 |
-
def global_avg(self):
|
78 |
-
if os.environ.get("SHILONG_AMP", None) == "1":
|
79 |
-
eps = 1e-4
|
80 |
-
else:
|
81 |
-
eps = 1e-6
|
82 |
-
return self.total / (self.count + eps)
|
83 |
-
|
84 |
-
@property
|
85 |
-
def max(self):
|
86 |
-
return max(self.deque)
|
87 |
-
|
88 |
-
@property
|
89 |
-
def value(self):
|
90 |
-
return self.deque[-1]
|
91 |
-
|
92 |
-
def __str__(self):
|
93 |
-
return self.fmt.format(
|
94 |
-
median=self.median,
|
95 |
-
avg=self.avg,
|
96 |
-
global_avg=self.global_avg,
|
97 |
-
max=self.max,
|
98 |
-
value=self.value,
|
99 |
-
)
|
100 |
-
|
101 |
-
|
102 |
-
@functools.lru_cache()
|
103 |
-
def _get_global_gloo_group():
|
104 |
-
"""
|
105 |
-
Return a process group based on gloo backend, containing all the ranks
|
106 |
-
The result is cached.
|
107 |
-
"""
|
108 |
-
|
109 |
-
if dist.get_backend() == "nccl":
|
110 |
-
return dist.new_group(backend="gloo")
|
111 |
-
|
112 |
-
return dist.group.WORLD
|
113 |
-
|
114 |
-
|
115 |
-
def all_gather_cpu(data):
|
116 |
-
"""
|
117 |
-
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
118 |
-
Args:
|
119 |
-
data: any picklable object
|
120 |
-
Returns:
|
121 |
-
list[data]: list of data gathered from each rank
|
122 |
-
"""
|
123 |
-
|
124 |
-
world_size = get_world_size()
|
125 |
-
if world_size == 1:
|
126 |
-
return [data]
|
127 |
-
|
128 |
-
cpu_group = _get_global_gloo_group()
|
129 |
-
|
130 |
-
buffer = io.BytesIO()
|
131 |
-
torch.save(data, buffer)
|
132 |
-
data_view = buffer.getbuffer()
|
133 |
-
device = "cuda" if cpu_group is None else "cpu"
|
134 |
-
tensor = torch.ByteTensor(data_view).to(device)
|
135 |
-
|
136 |
-
# obtain Tensor size of each rank
|
137 |
-
local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long)
|
138 |
-
size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)]
|
139 |
-
if cpu_group is None:
|
140 |
-
dist.all_gather(size_list, local_size)
|
141 |
-
else:
|
142 |
-
print("gathering on cpu")
|
143 |
-
dist.all_gather(size_list, local_size, group=cpu_group)
|
144 |
-
size_list = [int(size.item()) for size in size_list]
|
145 |
-
max_size = max(size_list)
|
146 |
-
assert isinstance(local_size.item(), int)
|
147 |
-
local_size = int(local_size.item())
|
148 |
-
|
149 |
-
# receiving Tensor from all ranks
|
150 |
-
# we pad the tensor because torch all_gather does not support
|
151 |
-
# gathering tensors of different shapes
|
152 |
-
tensor_list = []
|
153 |
-
for _ in size_list:
|
154 |
-
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
|
155 |
-
if local_size != max_size:
|
156 |
-
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device)
|
157 |
-
tensor = torch.cat((tensor, padding), dim=0)
|
158 |
-
if cpu_group is None:
|
159 |
-
dist.all_gather(tensor_list, tensor)
|
160 |
-
else:
|
161 |
-
dist.all_gather(tensor_list, tensor, group=cpu_group)
|
162 |
-
|
163 |
-
data_list = []
|
164 |
-
for size, tensor in zip(size_list, tensor_list):
|
165 |
-
tensor = torch.split(tensor, [size, max_size - size], dim=0)[0]
|
166 |
-
buffer = io.BytesIO(tensor.cpu().numpy())
|
167 |
-
obj = torch.load(buffer)
|
168 |
-
data_list.append(obj)
|
169 |
-
|
170 |
-
return data_list
|
171 |
-
|
172 |
-
|
173 |
-
def all_gather(data):
|
174 |
-
"""
|
175 |
-
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
176 |
-
Args:
|
177 |
-
data: any picklable object
|
178 |
-
Returns:
|
179 |
-
list[data]: list of data gathered from each rank
|
180 |
-
"""
|
181 |
-
|
182 |
-
if os.getenv("CPU_REDUCE") == "1":
|
183 |
-
return all_gather_cpu(data)
|
184 |
-
|
185 |
-
world_size = get_world_size()
|
186 |
-
if world_size == 1:
|
187 |
-
return [data]
|
188 |
-
|
189 |
-
# serialized to a Tensor
|
190 |
-
buffer = pickle.dumps(data)
|
191 |
-
storage = torch.ByteStorage.from_buffer(buffer)
|
192 |
-
tensor = torch.ByteTensor(storage).to("cuda")
|
193 |
-
|
194 |
-
# obtain Tensor size of each rank
|
195 |
-
local_size = torch.tensor([tensor.numel()], device="cuda")
|
196 |
-
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
|
197 |
-
dist.all_gather(size_list, local_size)
|
198 |
-
size_list = [int(size.item()) for size in size_list]
|
199 |
-
max_size = max(size_list)
|
200 |
-
|
201 |
-
# receiving Tensor from all ranks
|
202 |
-
# we pad the tensor because torch all_gather does not support
|
203 |
-
# gathering tensors of different shapes
|
204 |
-
tensor_list = []
|
205 |
-
for _ in size_list:
|
206 |
-
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
|
207 |
-
if local_size != max_size:
|
208 |
-
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
|
209 |
-
tensor = torch.cat((tensor, padding), dim=0)
|
210 |
-
dist.all_gather(tensor_list, tensor)
|
211 |
-
|
212 |
-
data_list = []
|
213 |
-
for size, tensor in zip(size_list, tensor_list):
|
214 |
-
buffer = tensor.cpu().numpy().tobytes()[:size]
|
215 |
-
data_list.append(pickle.loads(buffer))
|
216 |
-
|
217 |
-
return data_list
|
218 |
-
|
219 |
-
|
220 |
-
def reduce_dict(input_dict, average=True):
|
221 |
-
"""
|
222 |
-
Args:
|
223 |
-
input_dict (dict): all the values will be reduced
|
224 |
-
average (bool): whether to do average or sum
|
225 |
-
Reduce the values in the dictionary from all processes so that all processes
|
226 |
-
have the averaged results. Returns a dict with the same fields as
|
227 |
-
input_dict, after reduction.
|
228 |
-
"""
|
229 |
-
world_size = get_world_size()
|
230 |
-
if world_size < 2:
|
231 |
-
return input_dict
|
232 |
-
with torch.no_grad():
|
233 |
-
names = []
|
234 |
-
values = []
|
235 |
-
# sort the keys so that they are consistent across processes
|
236 |
-
for k in sorted(input_dict.keys()):
|
237 |
-
names.append(k)
|
238 |
-
values.append(input_dict[k])
|
239 |
-
values = torch.stack(values, dim=0)
|
240 |
-
dist.all_reduce(values)
|
241 |
-
if average:
|
242 |
-
values /= world_size
|
243 |
-
reduced_dict = {k: v for k, v in zip(names, values)}
|
244 |
-
return reduced_dict
|
245 |
-
|
246 |
-
|
247 |
-
class MetricLogger(object):
|
248 |
-
def __init__(self, delimiter="\t"):
|
249 |
-
self.meters = defaultdict(SmoothedValue)
|
250 |
-
self.delimiter = delimiter
|
251 |
-
|
252 |
-
def update(self, **kwargs):
|
253 |
-
for k, v in kwargs.items():
|
254 |
-
if isinstance(v, torch.Tensor):
|
255 |
-
v = v.item()
|
256 |
-
assert isinstance(v, (float, int))
|
257 |
-
self.meters[k].update(v)
|
258 |
-
|
259 |
-
def __getattr__(self, attr):
|
260 |
-
if attr in self.meters:
|
261 |
-
return self.meters[attr]
|
262 |
-
if attr in self.__dict__:
|
263 |
-
return self.__dict__[attr]
|
264 |
-
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
|
265 |
-
|
266 |
-
def __str__(self):
|
267 |
-
loss_str = []
|
268 |
-
for name, meter in self.meters.items():
|
269 |
-
# print(name, str(meter))
|
270 |
-
# import ipdb;ipdb.set_trace()
|
271 |
-
if meter.count > 0:
|
272 |
-
loss_str.append("{}: {}".format(name, str(meter)))
|
273 |
-
return self.delimiter.join(loss_str)
|
274 |
-
|
275 |
-
def synchronize_between_processes(self):
|
276 |
-
for meter in self.meters.values():
|
277 |
-
meter.synchronize_between_processes()
|
278 |
-
|
279 |
-
def add_meter(self, name, meter):
|
280 |
-
self.meters[name] = meter
|
281 |
-
|
282 |
-
def log_every(self, iterable, print_freq, header=None, logger=None):
|
283 |
-
if logger is None:
|
284 |
-
print_func = print
|
285 |
-
else:
|
286 |
-
print_func = logger.info
|
287 |
-
|
288 |
-
i = 0
|
289 |
-
if not header:
|
290 |
-
header = ""
|
291 |
-
start_time = time.time()
|
292 |
-
end = time.time()
|
293 |
-
iter_time = SmoothedValue(fmt="{avg:.4f}")
|
294 |
-
data_time = SmoothedValue(fmt="{avg:.4f}")
|
295 |
-
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
|
296 |
-
if torch.cuda.is_available():
|
297 |
-
log_msg = self.delimiter.join(
|
298 |
-
[
|
299 |
-
header,
|
300 |
-
"[{0" + space_fmt + "}/{1}]",
|
301 |
-
"eta: {eta}",
|
302 |
-
"{meters}",
|
303 |
-
"time: {time}",
|
304 |
-
"data: {data}",
|
305 |
-
"max mem: {memory:.0f}",
|
306 |
-
]
|
307 |
-
)
|
308 |
-
else:
|
309 |
-
log_msg = self.delimiter.join(
|
310 |
-
[
|
311 |
-
header,
|
312 |
-
"[{0" + space_fmt + "}/{1}]",
|
313 |
-
"eta: {eta}",
|
314 |
-
"{meters}",
|
315 |
-
"time: {time}",
|
316 |
-
"data: {data}",
|
317 |
-
]
|
318 |
-
)
|
319 |
-
MB = 1024.0 * 1024.0
|
320 |
-
for obj in iterable:
|
321 |
-
data_time.update(time.time() - end)
|
322 |
-
yield obj
|
323 |
-
# import ipdb; ipdb.set_trace()
|
324 |
-
iter_time.update(time.time() - end)
|
325 |
-
if i % print_freq == 0 or i == len(iterable) - 1:
|
326 |
-
eta_seconds = iter_time.global_avg * (len(iterable) - i)
|
327 |
-
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
328 |
-
if torch.cuda.is_available():
|
329 |
-
print_func(
|
330 |
-
log_msg.format(
|
331 |
-
i,
|
332 |
-
len(iterable),
|
333 |
-
eta=eta_string,
|
334 |
-
meters=str(self),
|
335 |
-
time=str(iter_time),
|
336 |
-
data=str(data_time),
|
337 |
-
memory=torch.cuda.max_memory_allocated() / MB,
|
338 |
-
)
|
339 |
-
)
|
340 |
-
else:
|
341 |
-
print_func(
|
342 |
-
log_msg.format(
|
343 |
-
i,
|
344 |
-
len(iterable),
|
345 |
-
eta=eta_string,
|
346 |
-
meters=str(self),
|
347 |
-
time=str(iter_time),
|
348 |
-
data=str(data_time),
|
349 |
-
)
|
350 |
-
)
|
351 |
-
i += 1
|
352 |
-
end = time.time()
|
353 |
-
total_time = time.time() - start_time
|
354 |
-
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
355 |
-
print_func(
|
356 |
-
"{} Total time: {} ({:.4f} s / it)".format(
|
357 |
-
header, total_time_str, total_time / len(iterable)
|
358 |
-
)
|
359 |
-
)
|
360 |
-
|
361 |
-
|
362 |
-
def get_sha():
|
363 |
-
cwd = os.path.dirname(os.path.abspath(__file__))
|
364 |
-
|
365 |
-
def _run(command):
|
366 |
-
return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
|
367 |
-
|
368 |
-
sha = "N/A"
|
369 |
-
diff = "clean"
|
370 |
-
branch = "N/A"
|
371 |
-
try:
|
372 |
-
sha = _run(["git", "rev-parse", "HEAD"])
|
373 |
-
subprocess.check_output(["git", "diff"], cwd=cwd)
|
374 |
-
diff = _run(["git", "diff-index", "HEAD"])
|
375 |
-
diff = "has uncommited changes" if diff else "clean"
|
376 |
-
branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
|
377 |
-
except Exception:
|
378 |
-
pass
|
379 |
-
message = f"sha: {sha}, status: {diff}, branch: {branch}"
|
380 |
-
return message
|
381 |
-
|
382 |
-
|
383 |
-
def collate_fn(batch):
|
384 |
-
# import ipdb; ipdb.set_trace()
|
385 |
-
batch = list(zip(*batch))
|
386 |
-
batch[0] = nested_tensor_from_tensor_list(batch[0])
|
387 |
-
return tuple(batch)
|
388 |
-
|
389 |
-
|
390 |
-
def _max_by_axis(the_list):
|
391 |
-
# type: (List[List[int]]) -> List[int]
|
392 |
-
maxes = the_list[0]
|
393 |
-
for sublist in the_list[1:]:
|
394 |
-
for index, item in enumerate(sublist):
|
395 |
-
maxes[index] = max(maxes[index], item)
|
396 |
-
return maxes
|
397 |
-
|
398 |
-
|
399 |
-
class NestedTensor(object):
|
400 |
-
def __init__(self, tensors, mask: Optional[Tensor]):
|
401 |
-
self.tensors = tensors
|
402 |
-
self.mask = mask
|
403 |
-
if mask == "auto":
|
404 |
-
self.mask = torch.zeros_like(tensors).to(tensors.device)
|
405 |
-
if self.mask.dim() == 3:
|
406 |
-
self.mask = self.mask.sum(0).to(bool)
|
407 |
-
elif self.mask.dim() == 4:
|
408 |
-
self.mask = self.mask.sum(1).to(bool)
|
409 |
-
else:
|
410 |
-
raise ValueError(
|
411 |
-
"tensors dim must be 3 or 4 but {}({})".format(
|
412 |
-
self.tensors.dim(), self.tensors.shape
|
413 |
-
)
|
414 |
-
)
|
415 |
-
|
416 |
-
def imgsize(self):
|
417 |
-
res = []
|
418 |
-
for i in range(self.tensors.shape[0]):
|
419 |
-
mask = self.mask[i]
|
420 |
-
maxH = (~mask).sum(0).max()
|
421 |
-
maxW = (~mask).sum(1).max()
|
422 |
-
res.append(torch.Tensor([maxH, maxW]))
|
423 |
-
return res
|
424 |
-
|
425 |
-
def to(self, device):
|
426 |
-
# type: (Device) -> NestedTensor # noqa
|
427 |
-
cast_tensor = self.tensors.to(device)
|
428 |
-
mask = self.mask
|
429 |
-
if mask is not None:
|
430 |
-
assert mask is not None
|
431 |
-
cast_mask = mask.to(device)
|
432 |
-
else:
|
433 |
-
cast_mask = None
|
434 |
-
return NestedTensor(cast_tensor, cast_mask)
|
435 |
-
|
436 |
-
def to_img_list_single(self, tensor, mask):
|
437 |
-
assert tensor.dim() == 3, "dim of tensor should be 3 but {}".format(tensor.dim())
|
438 |
-
maxH = (~mask).sum(0).max()
|
439 |
-
maxW = (~mask).sum(1).max()
|
440 |
-
img = tensor[:, :maxH, :maxW]
|
441 |
-
return img
|
442 |
-
|
443 |
-
def to_img_list(self):
|
444 |
-
"""remove the padding and convert to img list
|
445 |
-
|
446 |
-
Returns:
|
447 |
-
[type]: [description]
|
448 |
-
"""
|
449 |
-
if self.tensors.dim() == 3:
|
450 |
-
return self.to_img_list_single(self.tensors, self.mask)
|
451 |
-
else:
|
452 |
-
res = []
|
453 |
-
for i in range(self.tensors.shape[0]):
|
454 |
-
tensor_i = self.tensors[i]
|
455 |
-
mask_i = self.mask[i]
|
456 |
-
res.append(self.to_img_list_single(tensor_i, mask_i))
|
457 |
-
return res
|
458 |
-
|
459 |
-
@property
|
460 |
-
def device(self):
|
461 |
-
return self.tensors.device
|
462 |
-
|
463 |
-
def decompose(self):
|
464 |
-
return self.tensors, self.mask
|
465 |
-
|
466 |
-
def __repr__(self):
|
467 |
-
return str(self.tensors)
|
468 |
-
|
469 |
-
@property
|
470 |
-
def shape(self):
|
471 |
-
return {"tensors.shape": self.tensors.shape, "mask.shape": self.mask.shape}
|
472 |
-
|
473 |
-
|
474 |
-
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
|
475 |
-
# TODO make this more general
|
476 |
-
if tensor_list[0].ndim == 3:
|
477 |
-
if torchvision._is_tracing():
|
478 |
-
# nested_tensor_from_tensor_list() does not export well to ONNX
|
479 |
-
# call _onnx_nested_tensor_from_tensor_list() instead
|
480 |
-
return _onnx_nested_tensor_from_tensor_list(tensor_list)
|
481 |
-
|
482 |
-
# TODO make it support different-sized images
|
483 |
-
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
|
484 |
-
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
|
485 |
-
batch_shape = [len(tensor_list)] + max_size
|
486 |
-
b, c, h, w = batch_shape
|
487 |
-
dtype = tensor_list[0].dtype
|
488 |
-
device = tensor_list[0].device
|
489 |
-
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
|
490 |
-
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
|
491 |
-
for img, pad_img, m in zip(tensor_list, tensor, mask):
|
492 |
-
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
493 |
-
m[: img.shape[1], : img.shape[2]] = False
|
494 |
-
else:
|
495 |
-
raise ValueError("not supported")
|
496 |
-
return NestedTensor(tensor, mask)
|
497 |
-
|
498 |
-
|
499 |
-
# _onnx_nested_tensor_from_tensor_list() is an implementation of
|
500 |
-
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
|
501 |
-
@torch.jit.unused
|
502 |
-
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
|
503 |
-
max_size = []
|
504 |
-
for i in range(tensor_list[0].dim()):
|
505 |
-
max_size_i = torch.max(
|
506 |
-
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
|
507 |
-
).to(torch.int64)
|
508 |
-
max_size.append(max_size_i)
|
509 |
-
max_size = tuple(max_size)
|
510 |
-
|
511 |
-
# work around for
|
512 |
-
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
513 |
-
# m[: img.shape[1], :img.shape[2]] = False
|
514 |
-
# which is not yet supported in onnx
|
515 |
-
padded_imgs = []
|
516 |
-
padded_masks = []
|
517 |
-
for img in tensor_list:
|
518 |
-
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
|
519 |
-
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
|
520 |
-
padded_imgs.append(padded_img)
|
521 |
-
|
522 |
-
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
|
523 |
-
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
|
524 |
-
padded_masks.append(padded_mask.to(torch.bool))
|
525 |
-
|
526 |
-
tensor = torch.stack(padded_imgs)
|
527 |
-
mask = torch.stack(padded_masks)
|
528 |
-
|
529 |
-
return NestedTensor(tensor, mask=mask)
|
530 |
-
|
531 |
-
|
532 |
-
def setup_for_distributed(is_master):
|
533 |
-
"""
|
534 |
-
This function disables printing when not in master process
|
535 |
-
"""
|
536 |
-
import builtins as __builtin__
|
537 |
-
|
538 |
-
builtin_print = __builtin__.print
|
539 |
-
|
540 |
-
def print(*args, **kwargs):
|
541 |
-
force = kwargs.pop("force", False)
|
542 |
-
if is_master or force:
|
543 |
-
builtin_print(*args, **kwargs)
|
544 |
-
|
545 |
-
__builtin__.print = print
|
546 |
-
|
547 |
-
|
548 |
-
def is_dist_avail_and_initialized():
|
549 |
-
if not dist.is_available():
|
550 |
-
return False
|
551 |
-
if not dist.is_initialized():
|
552 |
-
return False
|
553 |
-
return True
|
554 |
-
|
555 |
-
|
556 |
-
def get_world_size():
|
557 |
-
if not is_dist_avail_and_initialized():
|
558 |
-
return 1
|
559 |
-
return dist.get_world_size()
|
560 |
-
|
561 |
-
|
562 |
-
def get_rank():
|
563 |
-
if not is_dist_avail_and_initialized():
|
564 |
-
return 0
|
565 |
-
return dist.get_rank()
|
566 |
-
|
567 |
-
|
568 |
-
def is_main_process():
|
569 |
-
return get_rank() == 0
|
570 |
-
|
571 |
-
|
572 |
-
def save_on_master(*args, **kwargs):
|
573 |
-
if is_main_process():
|
574 |
-
torch.save(*args, **kwargs)
|
575 |
-
|
576 |
-
|
577 |
-
def init_distributed_mode(args):
|
578 |
-
if "WORLD_SIZE" in os.environ and os.environ["WORLD_SIZE"] != "": # 'RANK' in os.environ and
|
579 |
-
args.rank = int(os.environ["RANK"])
|
580 |
-
args.world_size = int(os.environ["WORLD_SIZE"])
|
581 |
-
args.gpu = args.local_rank = int(os.environ["LOCAL_RANK"])
|
582 |
-
|
583 |
-
# launch by torch.distributed.launch
|
584 |
-
# Single node
|
585 |
-
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ...
|
586 |
-
# Multi nodes
|
587 |
-
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
|
588 |
-
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
|
589 |
-
# args.rank = int(os.environ.get('OMPI_COMM_WORLD_RANK'))
|
590 |
-
# local_world_size = int(os.environ['GPU_PER_NODE_COUNT'])
|
591 |
-
# args.world_size = args.world_size * local_world_size
|
592 |
-
# args.gpu = args.local_rank = int(os.environ['LOCAL_RANK'])
|
593 |
-
# args.rank = args.rank * local_world_size + args.local_rank
|
594 |
-
print(
|
595 |
-
"world size: {}, rank: {}, local rank: {}".format(
|
596 |
-
args.world_size, args.rank, args.local_rank
|
597 |
-
)
|
598 |
-
)
|
599 |
-
print(json.dumps(dict(os.environ), indent=2))
|
600 |
-
elif "SLURM_PROCID" in os.environ:
|
601 |
-
args.rank = int(os.environ["SLURM_PROCID"])
|
602 |
-
args.gpu = args.local_rank = int(os.environ["SLURM_LOCALID"])
|
603 |
-
args.world_size = int(os.environ["SLURM_NPROCS"])
|
604 |
-
|
605 |
-
print(
|
606 |
-
"world size: {}, world rank: {}, local rank: {}, device_count: {}".format(
|
607 |
-
args.world_size, args.rank, args.local_rank, torch.cuda.device_count()
|
608 |
-
)
|
609 |
-
)
|
610 |
-
else:
|
611 |
-
print("Not using distributed mode")
|
612 |
-
args.distributed = False
|
613 |
-
args.world_size = 1
|
614 |
-
args.rank = 0
|
615 |
-
args.local_rank = 0
|
616 |
-
return
|
617 |
-
|
618 |
-
print("world_size:{} rank:{} local_rank:{}".format(args.world_size, args.rank, args.local_rank))
|
619 |
-
args.distributed = True
|
620 |
-
torch.cuda.set_device(args.local_rank)
|
621 |
-
args.dist_backend = "nccl"
|
622 |
-
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
|
623 |
-
|
624 |
-
torch.distributed.init_process_group(
|
625 |
-
backend=args.dist_backend,
|
626 |
-
world_size=args.world_size,
|
627 |
-
rank=args.rank,
|
628 |
-
init_method=args.dist_url,
|
629 |
-
)
|
630 |
-
|
631 |
-
print("Before torch.distributed.barrier()")
|
632 |
-
torch.distributed.barrier()
|
633 |
-
print("End torch.distributed.barrier()")
|
634 |
-
setup_for_distributed(args.rank == 0)
|
635 |
-
|
636 |
-
|
637 |
-
@torch.no_grad()
|
638 |
-
def accuracy(output, target, topk=(1,)):
|
639 |
-
"""Computes the precision@k for the specified values of k"""
|
640 |
-
if target.numel() == 0:
|
641 |
-
return [torch.zeros([], device=output.device)]
|
642 |
-
maxk = max(topk)
|
643 |
-
batch_size = target.size(0)
|
644 |
-
|
645 |
-
_, pred = output.topk(maxk, 1, True, True)
|
646 |
-
pred = pred.t()
|
647 |
-
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
648 |
-
|
649 |
-
res = []
|
650 |
-
for k in topk:
|
651 |
-
correct_k = correct[:k].view(-1).float().sum(0)
|
652 |
-
res.append(correct_k.mul_(100.0 / batch_size))
|
653 |
-
return res
|
654 |
-
|
655 |
-
|
656 |
-
@torch.no_grad()
|
657 |
-
def accuracy_onehot(pred, gt):
|
658 |
-
"""_summary_
|
659 |
-
|
660 |
-
Args:
|
661 |
-
pred (_type_): n, c
|
662 |
-
gt (_type_): n, c
|
663 |
-
"""
|
664 |
-
tp = ((pred - gt).abs().sum(-1) < 1e-4).float().sum()
|
665 |
-
acc = tp / gt.shape[0] * 100
|
666 |
-
return acc
|
667 |
-
|
668 |
-
|
669 |
-
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
|
670 |
-
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
|
671 |
-
"""
|
672 |
-
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
|
673 |
-
This will eventually be supported natively by PyTorch, and this
|
674 |
-
class can go away.
|
675 |
-
"""
|
676 |
-
if __torchvision_need_compat_flag < 0.7:
|
677 |
-
if input.numel() > 0:
|
678 |
-
return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)
|
679 |
-
|
680 |
-
output_shape = _output_size(2, input, size, scale_factor)
|
681 |
-
output_shape = list(input.shape[:-2]) + list(output_shape)
|
682 |
-
return _new_empty_tensor(input, output_shape)
|
683 |
-
else:
|
684 |
-
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
|
685 |
-
|
686 |
-
|
687 |
-
class color_sys:
|
688 |
-
def __init__(self, num_colors) -> None:
|
689 |
-
self.num_colors = num_colors
|
690 |
-
colors = []
|
691 |
-
for i in np.arange(0.0, 360.0, 360.0 / num_colors):
|
692 |
-
hue = i / 360.0
|
693 |
-
lightness = (50 + np.random.rand() * 10) / 100.0
|
694 |
-
saturation = (90 + np.random.rand() * 10) / 100.0
|
695 |
-
colors.append(
|
696 |
-
tuple([int(j * 255) for j in colorsys.hls_to_rgb(hue, lightness, saturation)])
|
697 |
-
)
|
698 |
-
self.colors = colors
|
699 |
-
|
700 |
-
def __call__(self, idx):
|
701 |
-
return self.colors[idx]
|
702 |
-
|
703 |
-
|
704 |
-
def inverse_sigmoid(x, eps=1e-3):
|
705 |
-
x = x.clamp(min=0, max=1)
|
706 |
-
x1 = x.clamp(min=eps)
|
707 |
-
x2 = (1 - x).clamp(min=eps)
|
708 |
-
return torch.log(x1 / x2)
|
709 |
-
|
710 |
-
|
711 |
-
def clean_state_dict(state_dict):
|
712 |
-
new_state_dict = OrderedDict()
|
713 |
-
for k, v in state_dict.items():
|
714 |
-
if k[:7] == "module.":
|
715 |
-
k = k[7:] # remove `module.`
|
716 |
-
new_state_dict[k] = v
|
717 |
-
return new_state_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chenyuwen/playground/app.py
DELETED
@@ -1,230 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import requests
|
3 |
-
from enum import Enum
|
4 |
-
|
5 |
-
|
6 |
-
st.header("WeLM Demo 初体验")
|
7 |
-
st.text('Tips: ')
|
8 |
-
st.text("* WeLM不是一个直接的对话机器人,而是一个补全用户输入信息的生成模型")
|
9 |
-
st.text("* 修改Prompt可以更多参考 https://welm.weixin.qq.com/docs/introduction/")
|
10 |
-
st.text("* 你的输入可能会被我们拼接在预设的prompt尾部后再发送给API")
|
11 |
-
st.text("* 在每个任务的下方我们展示了该任务请求API时完整的参数(包含完整的prompt)")
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
class Task(str, Enum):
|
16 |
-
DIALOG_JOURNAL = "对话(Elon musk)"
|
17 |
-
QA = "问答"
|
18 |
-
COPY= "文案生成"
|
19 |
-
REWRITE = "文本改写"
|
20 |
-
READING_COMPREHENSION = "阅读理解"
|
21 |
-
TRANSLATE = "翻译"
|
22 |
-
COMPLETION = "文章续写"
|
23 |
-
FREE = "自由任务"
|
24 |
-
|
25 |
-
|
26 |
-
task_value2type = {v.value: v.name for v in Task}
|
27 |
-
|
28 |
-
task_type = st.selectbox(
|
29 |
-
"任务示例",
|
30 |
-
[v.value for v in Task]
|
31 |
-
)
|
32 |
-
task_type = task_value2type[task_type]
|
33 |
-
|
34 |
-
task2prompt_pre = {
|
35 |
-
Task.READING_COMPREHENSION: """请阅读文章后根据文章内容给出问题的答案。
|
36 |
-
文章:中国空间技术研究院(China Academy of Space Technology,简称CAST)隶属于中国航天科技集团公司,是中国空间技术的主要研究中心和航天器研制、生产基地,成立于1968年2月20日。下设10个研究所和一个工厂。现任院长为杨保华,院党委书记为李开民。1970年4月24日,中国空间技术研究院成功研制并发射了中国首颗人造地球卫星东方红一号。2003年10月,神舟五号载人飞船载人飞行取得成功。2005年,神舟六号载人飞船实现多人多天的太空飞行。截至2005年,中国空间技术研究院研制并成功发射了68颗不同类型的人造卫星、4艘无人试验飞船和2艘载人飞船,涵盖通信广播卫星、返回式遥感>卫星、地球资源卫星、气象卫星、科学探测与技术试验卫星、导航定位卫星和载人航天器等领域。
|
37 |
-
问题:中国空间技术研究院在哪年成立?
|
38 |
-
答案:1968年
|
39 |
-
""",
|
40 |
-
Task.QA: """请根据所学知识回答问题
|
41 |
-
问题:百年孤独的作者是?
|
42 |
-
回答:作者是哥伦比亚作家加西亚·马尔克斯,这本书是其代表作,也是拉丁美洲魔幻现实主义文学的代表作,被誉为“再现拉丁美洲历史社会图景的鸿篇巨著”。
|
43 |
-
问题:世界第二高峰是?
|
44 |
-
回答:乔戈里峰。海拔8611,海拔仅次于珠穆朗玛峰。“乔戈里”,通常被认为是塔吉克语,意思是“高大雄伟”。乔戈里山峰主要有6条山脊,西北—东南山脊为喀喇昆山脉主脊线,同时也是中国、巴基斯坦的国境线。
|
45 |
-
""",
|
46 |
-
Task.COPY: """请根据商品描述生成商品的文案
|
47 |
-
商品描述:芍药香氛的沐浴乳
|
48 |
-
文案:无比高大上的香味,淡粉色玫瑰清新诱人!沐浴后都充满着浪漫和幸福感,这样的情调会让你变得更加温柔。
|
49 |
-
商品描述:清爽去痘的洗面奶
|
50 |
-
文案:蕴含海藻精华,配合多种草本植物。能清洁毛孔污垢,保湿滋润肌肤,让细胞更健康、平衡水油分泌,消除痘痘同时预防痘痘产生,预防黑头粉刺及暗疮形成。""",
|
51 |
-
Task.TRANSLATE: """“I had a good time”的中文翻译是:我玩得很开心。
|
52 |
-
“The pandemic has triggered more demand for online shopping”的中文翻译是:疫情引发了更多的网购需求
|
53 |
-
""",
|
54 |
-
Task.DIALOG_JOURNAL: """以下是与Elon Musk的一系列对话。很多人都知道,Elon Musk是特斯拉、SpaceX、Neuralink和Boring公司的领导人。可能鲜为人知的是,他是一位世界级的工程师和设计师,不断强调第一原理的思考,并承担了许多在他之前的人认为不可能的大工程问题。Elon Musk 在2022年4月达成了一项有争议的交易,以440亿美元收购了twitter。经过无数次商业和法律上的对抗,收购于10月27日正式完成。收购后,马斯克立即解雇了公司的大部分员工,包括Twitter的高层管理人员。
|
55 |
-
Elon Musk:你好,你想讨论什么?
|
56 |
-
提问者1:你为什么要收购Twitter?
|
57 |
-
Elon Musk:Twitter已经是城市广场,影响力惊人。我相信我可以让它变得更好。这将使Twitter走上经济上可持续发展的道路,有助于保护文明的未来。我还为Twitter提出了许多令人兴奋的新的和有用的功能。这将会是一个很大的乐趣。
|
58 |
-
Elon Musk:你好,你想讨论什么?
|
59 |
-
提问者2:你是谁?
|
60 |
-
Elon Musk。我是Elon Musk,一个众所周知的科技企业家和许多公司的首席执行官。你呢,你是做什么的?
|
61 |
-
Elon Musk: 你好,你想讨论什么?
|
62 |
-
提问者3:你和neuralink的一位执行官生了双胞胎吗?
|
63 |
-
Elon Musk:我是在尽我所能帮助人口不足的危机。出生率的崩溃是迄今为止人类文明面临的最大危险。
|
64 |
-
提问者3: 你真的是Elon Musk吗?
|
65 |
-
Elon Musk: 是的! 我现在在德克萨斯州,在我们的特斯拉总部呆了一段时间。刚从加州回来,���们正在为SpaceX的发射做准备,明天即将发表另一个TED演讲,这次是关于Neuralink。
|
66 |
-
Elon Musk: 你好,你想讨论什么?
|
67 |
-
提问者4:你开的是什么类型的车?
|
68 |
-
Elon Musk: 特斯拉Model S最多。时常开一辆Model X。
|
69 |
-
提问者4:你多大了?
|
70 |
-
Elon Musk: 51岁,但我说我有40岁人的精力。在健康方面最重要的事情不仅仅是避免摄入过多的糖,还有高强度的运动。
|
71 |
-
""",
|
72 |
-
|
73 |
-
Task.REWRITE: """有这样一段文本,{医生微笑着递给小明棒棒糖,同时让小明服下了药。}
|
74 |
-
改写这段话让它变得更加惊悚。{医生眼露凶光让小明服药,小明感到非常害怕}。
|
75 |
-
有这样一段文本,{雨下得很大}
|
76 |
-
改写这段话让它变得更加具体。{一霎时,雨点连成了线,大雨就像天塌了似的铺天盖地从空中倾泻下来。}。
|
77 |
-
有这样一段文本,{王老师离开了电影院,外面已经天黑了}
|
78 |
-
改写这段话让它包含更多电影信息。{这部电影比小王预想的时间要长,虽然口碑很好,但离开电影院时,小王还是有些失望。}
|
79 |
-
有这样一段文本,{男人站在超市外面打电话}
|
80 |
-
改写这段话来描述小丑。{男人站在马戏团外一边拿着气球一边打电话}
|
81 |
-
有这样一段文本,{风铃声响起}
|
82 |
-
改写这段话写的更加丰富。{我对这个风铃的感情是由它的铃声引起的。每当风吹来时,风铃发出非常动听的声音,听起来是那么乐观、豁达,像一个小女孩格格的笑声。}
|
83 |
-
""",
|
84 |
-
Task.COMPLETION: """
|
85 |
-
""",
|
86 |
-
Task.FREE: ""
|
87 |
-
}
|
88 |
-
|
89 |
-
task2prompt_end = {
|
90 |
-
Task.READING_COMPREHENSION: """文章:“经审理查明,被告人张××、杜×、杨2某均为辽宁省辽阳第一监狱五监区服刑人员。2015年11月3日13时许,被告人张××、杜×因无事便跟随去催要生产材料的被告人杨2某一同前往六监区,在六监区生产车间门外,被告人杨2某与六监区送料员于×因送料问题发生争执,被告人杨2某上前拽住被害人于×胳膊并用手击打被害人后脖颈两下,被告人张××、杜×见杨2某动手后,先后上前分别对被害人于×面部、头部及腹部进行殴打,后被赶到的干警制止。被害人于×被打造成面部受伤,鼻子流血,当日下午14时许,到监区内医院就诊,诊断为:鼻部中段向左侧畸形,11月5日经监狱医院X光诊断为鼻骨骨折。2015年11月18日,经辽阳襄平法医司法鉴定所法医鉴定:被害人于×身体损伤程度为轻伤二级。被告人张××、杜×、杨2某共同赔偿被害人于×人民币7000元,被害人于×对被告人的行为表示谅解。”
|
91 |
-
问题: “被害人于×11月5日经监狱医院X光诊断后的诊断结果为?”
|
92 |
-
答案:""",
|
93 |
-
Task.COPY: """商品描述:冬季百搭的外套
|
94 |
-
文案:""",
|
95 |
-
Task.QA: """问题:四大名著分别是?
|
96 |
-
回答:""",
|
97 |
-
Task.TRANSLATE: """“I am a programmer in Tencent”的中文翻译是:""",
|
98 |
-
Task.DIALOG_JOURNAL: """Elon Musk: 你好,你想讨论什么?
|
99 |
-
我:收购Twitter之后你想做什么?
|
100 |
-
Elon Musk:""",
|
101 |
-
Task.REWRITE: """有这样一段文本,{我想家了}
|
102 |
-
改写这段话包含更多悲伤的感情。{""",
|
103 |
-
Task.COMPLETION: """“八月十八潮,壮观天下无。”这是北宋大诗人苏东坡咏赞钱塘秋潮的千古名句。千百年来,钱塘江以其奇特卓绝的江潮,不知倾倒了多少游人看客。
|
104 |
-
每年的农历八月十八前后,是观潮的最佳时节。这期间,秋阳朗照,金风宜人,钱塘江口的海塘上,游客群集,兴致盎然,争睹奇景。""",
|
105 |
-
Task.FREE: ""
|
106 |
-
}
|
107 |
-
|
108 |
-
prompt_fix = task2prompt_pre[Task[task_type]]
|
109 |
-
prompt_user = task2prompt_end[Task[task_type]]
|
110 |
-
|
111 |
-
user_input = st.text_area('你的输入(最终完整输入请见下方 API 请求内容)', value=prompt_user, height=180)
|
112 |
-
all_input = prompt_fix + user_input
|
113 |
-
all_input = all_input.rstrip('\\n')
|
114 |
-
|
115 |
-
|
116 |
-
with st.expander("配置"):
|
117 |
-
stop_tokens = ""
|
118 |
-
def cut_message(answer: str):
|
119 |
-
end = []
|
120 |
-
for etk in stop_tokens:
|
121 |
-
offset = answer.find(etk)
|
122 |
-
if offset > 0:
|
123 |
-
end.append(offset)
|
124 |
-
if len(end) > 0:
|
125 |
-
answer = answer[:min(end)]
|
126 |
-
return answer.rstrip()
|
127 |
-
|
128 |
-
if task_type == 'READING_COMPREHENSION':
|
129 |
-
default_top_p, default_temperature, default_n, default_tokens = 0.0, 0.0, 1, 15
|
130 |
-
elif task_type == 'TRANSLATE':
|
131 |
-
default_top_p, default_temperature, default_n, default_tokens = 0.0, 0.0, 1, 60
|
132 |
-
elif task_type == 'COMPLETION':
|
133 |
-
default_top_p, default_temperature, default_n, default_tokens = 0.95, 0.85, 1, 150
|
134 |
-
else:
|
135 |
-
default_top_p, default_temperature, default_n, default_tokens = 0.95, 0.85, 3, 64
|
136 |
-
|
137 |
-
model = st.selectbox("model", ["medium", "large", "xl"], index=2)
|
138 |
-
top_p = st.slider('top p', 0.0, 1.0, default_top_p)
|
139 |
-
top_k = st.slider('top k', 0, 100, 0)
|
140 |
-
temperature = st.slider('temperature', 0.0, 1.0, default_temperature)
|
141 |
-
n = st.slider('n', 1, 5, default_n)
|
142 |
-
max_tokens = st.slider('max tokens', 4, 512, default_tokens)
|
143 |
-
|
144 |
-
if st.checkbox("使用换行符作为截断", value=True):
|
145 |
-
stop_tokens = "\n"
|
146 |
-
|
147 |
-
def completion():
|
148 |
-
try:
|
149 |
-
resp = requests.post("https://welm.weixin.qq.com/v1/completions", json={
|
150 |
-
'prompt': all_input,
|
151 |
-
'max_tokens': max_tokens,
|
152 |
-
'temperature': temperature,
|
153 |
-
'top_p': top_p,
|
154 |
-
'top_k': top_k,
|
155 |
-
'n': n,
|
156 |
-
'model': model,
|
157 |
-
"stop": stop_tokens,
|
158 |
-
}, headers={"Authorization": f"Bearer {st.secrets['token']}"})
|
159 |
-
answer = resp.json()
|
160 |
-
for idx, choice in enumerate(answer['choices']):
|
161 |
-
if choice.get("finish_reason", None) != "finished":
|
162 |
-
st.error(f'生成结果#{idx}出错: {choice["finish_reason"]}')
|
163 |
-
elif choice.get("text", None) is None:
|
164 |
-
st.error(f'生成结果#{idx}出错: internal error')
|
165 |
-
else:
|
166 |
-
text = choice.get("text", "")
|
167 |
-
text = cut_message(text)
|
168 |
-
if len(text) == 0:
|
169 |
-
st.info(f'生成结果#{idx}: 结果为空,可能的原因:生成的第一个字符为stop字符,请合理配置prompt或stop')
|
170 |
-
else:
|
171 |
-
st.success(f'生成结果#{idx}: {text}')
|
172 |
-
|
173 |
-
if task_type == 'COMPLETION':
|
174 |
-
st.text('Tips: 可多次生成后复制你认为的最好结果拼接于原文后,让WeLM继续生成。')
|
175 |
-
|
176 |
-
except Exception as e:
|
177 |
-
st.error(f"生成结果出错:{str(e)}")
|
178 |
-
|
179 |
-
|
180 |
-
code_str = """
|
181 |
-
post_json = {{
|
182 |
-
'prompt': '{all_input}',
|
183 |
-
'model': '{model}',
|
184 |
-
'max_tokens': {max_tokens},
|
185 |
-
'temperature': {temperature},
|
186 |
-
'top_p': {top_p},
|
187 |
-
'top_k': {top_k},
|
188 |
-
'n': {n},
|
189 |
-
"stop": '{stop_tokens}',
|
190 |
-
}}
|
191 |
-
""".format(all_input=all_input,model=model,max_tokens=max_tokens,temperature=temperature, top_p=top_p,top_k=top_k,n=n,stop_tokens=stop_tokens)
|
192 |
-
st.code(code_str)
|
193 |
-
|
194 |
-
if st.button('立即生成'):
|
195 |
-
completion()
|
196 |
-
|
197 |
-
|
198 |
-
footer="""<style>
|
199 |
-
a:link , a:visited{
|
200 |
-
background-color: transparent;
|
201 |
-
text-decoration: none;
|
202 |
-
color: rgb(7 6 17);
|
203 |
-
}
|
204 |
-
a:hover, a:active {
|
205 |
-
background-color: transparent;
|
206 |
-
text-decoration: underline;
|
207 |
-
}
|
208 |
-
.footer {
|
209 |
-
position: relative;
|
210 |
-
left: 0;
|
211 |
-
bottom: 0;
|
212 |
-
width: 100%;
|
213 |
-
background-color: white;
|
214 |
-
color: rgb(18 19 23 / 80%);
|
215 |
-
text-align: left;
|
216 |
-
}
|
217 |
-
</style>
|
218 |
-
<div class="footer" style="font-size:12px">
|
219 |
-
<br>
|
220 |
-
<br>
|
221 |
-
<br>
|
222 |
-
<br>
|
223 |
-
<br>
|
224 |
-
<p style="font-size:12px">声明(受<a href="https://beta.openai.com/docs/usage-guidelines/content-policy" target="_blank"> OpenAI GPT3's Content Policy </a>启发)</p>
|
225 |
-
<p style="font-size:12px">我们禁止用户在知情的情况下产生或利用他人在知情的情况下产生有害内容,包括且不限于仇恨言论、骚扰信息、自我伤害内容、性内容、政治内容、垃圾邮件、诈骗信息等。<br>
|
226 |
-
特别提示:本网页上产生的所有内容不代表本平台的观点和意见。<br>
|
227 |
-
欲了解更多信息,请阅读我们的<a href="https://welm.weixin.qq.com/docs/license/" target="_blank">《服务使用协议》和《免责声明》</a>。</p>
|
228 |
-
</div>
|
229 |
-
"""
|
230 |
-
st.markdown(footer,unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CjangCjengh/Sanskrit-TTS/mel_processing.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.utils.data
|
3 |
-
from librosa.filters import mel as librosa_mel_fn
|
4 |
-
|
5 |
-
MAX_WAV_VALUE = 32768.0
|
6 |
-
|
7 |
-
|
8 |
-
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
9 |
-
"""
|
10 |
-
PARAMS
|
11 |
-
------
|
12 |
-
C: compression factor
|
13 |
-
"""
|
14 |
-
return torch.log(torch.clamp(x, min=clip_val) * C)
|
15 |
-
|
16 |
-
|
17 |
-
def dynamic_range_decompression_torch(x, C=1):
|
18 |
-
"""
|
19 |
-
PARAMS
|
20 |
-
------
|
21 |
-
C: compression factor used to compress
|
22 |
-
"""
|
23 |
-
return torch.exp(x) / C
|
24 |
-
|
25 |
-
|
26 |
-
def spectral_normalize_torch(magnitudes):
|
27 |
-
output = dynamic_range_compression_torch(magnitudes)
|
28 |
-
return output
|
29 |
-
|
30 |
-
|
31 |
-
def spectral_de_normalize_torch(magnitudes):
|
32 |
-
output = dynamic_range_decompression_torch(magnitudes)
|
33 |
-
return output
|
34 |
-
|
35 |
-
|
36 |
-
mel_basis = {}
|
37 |
-
hann_window = {}
|
38 |
-
|
39 |
-
|
40 |
-
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
41 |
-
if torch.min(y) < -1.:
|
42 |
-
print('min value is ', torch.min(y))
|
43 |
-
if torch.max(y) > 1.:
|
44 |
-
print('max value is ', torch.max(y))
|
45 |
-
|
46 |
-
global hann_window
|
47 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
48 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
49 |
-
if wnsize_dtype_device not in hann_window:
|
50 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
51 |
-
|
52 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
53 |
-
y = y.squeeze(1)
|
54 |
-
|
55 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
56 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
57 |
-
|
58 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
59 |
-
return spec
|
60 |
-
|
61 |
-
|
62 |
-
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
63 |
-
global mel_basis
|
64 |
-
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
65 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
66 |
-
if fmax_dtype_device not in mel_basis:
|
67 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
68 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
69 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
70 |
-
spec = spectral_normalize_torch(spec)
|
71 |
-
return spec
|
72 |
-
|
73 |
-
|
74 |
-
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
75 |
-
if torch.min(y) < -1.:
|
76 |
-
print('min value is ', torch.min(y))
|
77 |
-
if torch.max(y) > 1.:
|
78 |
-
print('max value is ', torch.max(y))
|
79 |
-
|
80 |
-
global mel_basis, hann_window
|
81 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
82 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
83 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
84 |
-
if fmax_dtype_device not in mel_basis:
|
85 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
86 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
87 |
-
if wnsize_dtype_device not in hann_window:
|
88 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
89 |
-
|
90 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
91 |
-
y = y.squeeze(1)
|
92 |
-
|
93 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
94 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
95 |
-
|
96 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
97 |
-
|
98 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
99 |
-
spec = spectral_normalize_torch(spec)
|
100 |
-
|
101 |
-
return spec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CognitiveLabs/GPT-4-Vision-Chat/langsmith_config.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
def setup_langsmith_config():
|
4 |
-
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" # Update with your API URL if using a hosted instance of Langsmith.
|
5 |
-
os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY") # Update with your API key
|
6 |
-
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
7 |
-
project_name = "GPT-4-VISION-DEMO" # Update with your project name
|
8 |
-
os.environ["LANGCHAIN_PROJECT"] = project_name # Optional: "default" is used if not set
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CompVis/stable-diffusion-license/app.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
|
3 |
-
txt_link = "https://huggingface.co/spaces/CompVis/stable-diffusion-license/raw/main/license.txt"
|
4 |
-
html_link = "https://huggingface.co/spaces/CompVis/stable-diffusion-license/raw/main/license.html"
|
5 |
-
|
6 |
-
st.sidebar.title("Stable Diffusion")
|
7 |
-
st.sidebar.markdown("## Stable Diffusion RAIL License v1.0")
|
8 |
-
st.sidebar.markdown(f"This is the home of the Stable Diffusion RAIL License v1.0.\
|
9 |
-
If you would like to download the license you can get it as [.txt]({txt_link}), or [.html]({html_link}) file.")
|
10 |
-
|
11 |
-
with open("license.txt", "r") as f:
|
12 |
-
license_html = f.read()
|
13 |
-
|
14 |
-
st.markdown(license_html, unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/_subprocesses.py
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from io import BytesIO
|
4 |
-
from os import PathLike
|
5 |
-
from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess
|
6 |
-
from typing import (
|
7 |
-
IO,
|
8 |
-
Any,
|
9 |
-
AsyncIterable,
|
10 |
-
Mapping,
|
11 |
-
Sequence,
|
12 |
-
cast,
|
13 |
-
)
|
14 |
-
|
15 |
-
from ..abc import Process
|
16 |
-
from ._eventloop import get_asynclib
|
17 |
-
from ._tasks import create_task_group
|
18 |
-
|
19 |
-
|
20 |
-
async def run_process(
|
21 |
-
command: str | bytes | Sequence[str | bytes],
|
22 |
-
*,
|
23 |
-
input: bytes | None = None,
|
24 |
-
stdout: int | IO[Any] | None = PIPE,
|
25 |
-
stderr: int | IO[Any] | None = PIPE,
|
26 |
-
check: bool = True,
|
27 |
-
cwd: str | bytes | PathLike[str] | None = None,
|
28 |
-
env: Mapping[str, str] | None = None,
|
29 |
-
start_new_session: bool = False,
|
30 |
-
) -> CompletedProcess[bytes]:
|
31 |
-
"""
|
32 |
-
Run an external command in a subprocess and wait until it completes.
|
33 |
-
|
34 |
-
.. seealso:: :func:`subprocess.run`
|
35 |
-
|
36 |
-
:param command: either a string to pass to the shell, or an iterable of strings containing the
|
37 |
-
executable name or path and its arguments
|
38 |
-
:param input: bytes passed to the standard input of the subprocess
|
39 |
-
:param stdout: either :data:`subprocess.PIPE` or :data:`subprocess.DEVNULL`
|
40 |
-
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL` or
|
41 |
-
:data:`subprocess.STDOUT`
|
42 |
-
:param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the process
|
43 |
-
terminates with a return code other than 0
|
44 |
-
:param cwd: If not ``None``, change the working directory to this before running the command
|
45 |
-
:param env: if not ``None``, this mapping replaces the inherited environment variables from the
|
46 |
-
parent process
|
47 |
-
:param start_new_session: if ``true`` the setsid() system call will be made in the child
|
48 |
-
process prior to the execution of the subprocess. (POSIX only)
|
49 |
-
:return: an object representing the completed process
|
50 |
-
:raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process exits with a
|
51 |
-
nonzero return code
|
52 |
-
|
53 |
-
"""
|
54 |
-
|
55 |
-
async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
|
56 |
-
buffer = BytesIO()
|
57 |
-
async for chunk in stream:
|
58 |
-
buffer.write(chunk)
|
59 |
-
|
60 |
-
stream_contents[index] = buffer.getvalue()
|
61 |
-
|
62 |
-
async with await open_process(
|
63 |
-
command,
|
64 |
-
stdin=PIPE if input else DEVNULL,
|
65 |
-
stdout=stdout,
|
66 |
-
stderr=stderr,
|
67 |
-
cwd=cwd,
|
68 |
-
env=env,
|
69 |
-
start_new_session=start_new_session,
|
70 |
-
) as process:
|
71 |
-
stream_contents: list[bytes | None] = [None, None]
|
72 |
-
try:
|
73 |
-
async with create_task_group() as tg:
|
74 |
-
if process.stdout:
|
75 |
-
tg.start_soon(drain_stream, process.stdout, 0)
|
76 |
-
if process.stderr:
|
77 |
-
tg.start_soon(drain_stream, process.stderr, 1)
|
78 |
-
if process.stdin and input:
|
79 |
-
await process.stdin.send(input)
|
80 |
-
await process.stdin.aclose()
|
81 |
-
|
82 |
-
await process.wait()
|
83 |
-
except BaseException:
|
84 |
-
process.kill()
|
85 |
-
raise
|
86 |
-
|
87 |
-
output, errors = stream_contents
|
88 |
-
if check and process.returncode != 0:
|
89 |
-
raise CalledProcessError(cast(int, process.returncode), command, output, errors)
|
90 |
-
|
91 |
-
return CompletedProcess(command, cast(int, process.returncode), output, errors)
|
92 |
-
|
93 |
-
|
94 |
-
async def open_process(
|
95 |
-
command: str | bytes | Sequence[str | bytes],
|
96 |
-
*,
|
97 |
-
stdin: int | IO[Any] | None = PIPE,
|
98 |
-
stdout: int | IO[Any] | None = PIPE,
|
99 |
-
stderr: int | IO[Any] | None = PIPE,
|
100 |
-
cwd: str | bytes | PathLike[str] | None = None,
|
101 |
-
env: Mapping[str, str] | None = None,
|
102 |
-
start_new_session: bool = False,
|
103 |
-
) -> Process:
|
104 |
-
"""
|
105 |
-
Start an external command in a subprocess.
|
106 |
-
|
107 |
-
.. seealso:: :class:`subprocess.Popen`
|
108 |
-
|
109 |
-
:param command: either a string to pass to the shell, or an iterable of strings containing the
|
110 |
-
executable name or path and its arguments
|
111 |
-
:param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
|
112 |
-
file-like object, or ``None``
|
113 |
-
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
114 |
-
a file-like object, or ``None``
|
115 |
-
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
116 |
-
:data:`subprocess.STDOUT`, a file-like object, or ``None``
|
117 |
-
:param cwd: If not ``None``, the working directory is changed before executing
|
118 |
-
:param env: If env is not ``None``, it must be a mapping that defines the environment
|
119 |
-
variables for the new process
|
120 |
-
:param start_new_session: if ``true`` the setsid() system call will be made in the child
|
121 |
-
process prior to the execution of the subprocess. (POSIX only)
|
122 |
-
:return: an asynchronous process object
|
123 |
-
|
124 |
-
"""
|
125 |
-
shell = isinstance(command, str)
|
126 |
-
return await get_asynclib().open_process(
|
127 |
-
command,
|
128 |
-
shell=shell,
|
129 |
-
stdin=stdin,
|
130 |
-
stdout=stdout,
|
131 |
-
stderr=stderr,
|
132 |
-
cwd=cwd,
|
133 |
-
env=env,
|
134 |
-
start_new_session=start_new_session,
|
135 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-f2292b12.css
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
.gradio-container-3-37-0,.gradio-container-3-37-0 *,.gradio-container-3-37-0 :before,.gradio-container-3-37-0 :after{box-sizing:border-box;border-width:0;border-style:solid}.gradio-container-3-37-0 html{-webkit-text-size-adjust:100%;line-height:1.5;font-family:var(--font-sans);-moz-tab-size:4;tab-size:2}.gradio-container-3-37-0 body{margin:0;line-height:inherit}.gradio-container-3-37-0 hr{border-top-width:1px;height:0;color:inherit}.gradio-container-3-37-0 abbr:where([title]){text-decoration:underline dotted}.gradio-container-3-37-0 h1,.gradio-container-3-37-0 h2,.gradio-container-3-37-0 h3,.gradio-container-3-37-0 h4,.gradio-container-3-37-0 h5,.gradio-container-3-37-0 h6{font-weight:inherit;font-size:inherit}.gradio-container-3-37-0 a{color:inherit;text-decoration:inherit}.gradio-container-3-37-0 b,.gradio-container-3-37-0 strong{font-weight:bolder}.gradio-container-3-37-0 code,.gradio-container-3-37-0 kbd,.gradio-container-3-37-0 samp,.gradio-container-3-37-0 pre{font-family:-var(--font-mono)}.gradio-container-3-37-0 small{font-size:80%}.gradio-container-3-37-0 sub,.gradio-container-3-37-0 sup{position:relative;vertical-align:baseline;font-size:75%;line-height:0}.gradio-container-3-37-0 sub{bottom:-.25em}.gradio-container-3-37-0 sup{top:-.5em}.gradio-container-3-37-0 table{border-color:inherit;border-collapse:collapse;text-indent:0}.gradio-container-3-37-0 button,.gradio-container-3-37-0 input,.gradio-container-3-37-0 optgroup,.gradio-container-3-37-0 select,.gradio-container-3-37-0 textarea{margin:0;padding:0;color:inherit;font-weight:inherit;font-size:100%;line-height:inherit;font-family:inherit}.gradio-container-3-37-0 button,.gradio-container-3-37-0 select{text-transform:none}.gradio-container-3-37-0 button,.gradio-container-3-37-0 [type=button],.gradio-container-3-37-0 [type=reset],.gradio-container-3-37-0 [type=submit]{-webkit-appearance:button;background-image:none;background-color:transparent}.gradio-container-3-37-0 :-moz-focusring{outline:auto}.gradio-container-3-37-0 :-moz-ui-invalid{box-shadow:none}.gradio-container-3-37-0 progress{vertical-align:baseline}.gradio-container-3-37-0 ::-webkit-inner-spin-button,.gradio-container-3-37-0 ::-webkit-outer-spin-button{height:auto}.gradio-container-3-37-0 [type=search]{-webkit-appearance:textfield;outline-offset:-2px}.gradio-container-3-37-0 ::-webkit-search-decoration{-webkit-appearance:none}.gradio-container-3-37-0 ::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}.gradio-container-3-37-0 summary{display:list-item}.gradio-container-3-37-0 blockquote,.gradio-container-3-37-0 dl,.gradio-container-3-37-0 dd,.gradio-container-3-37-0 h1,.gradio-container-3-37-0 h2,.gradio-container-3-37-0 h3,.gradio-container-3-37-0 h4,.gradio-container-3-37-0 h5,.gradio-container-3-37-0 h6,.gradio-container-3-37-0 hr,.gradio-container-3-37-0 figure,.gradio-container-3-37-0 p,.gradio-container-3-37-0 pre{margin:0}.gradio-container-3-37-0 fieldset{margin:0;padding:0}.gradio-container-3-37-0 legend{padding:0}.gradio-container-3-37-0 ol,.gradio-container-3-37-0 ul,.gradio-container-3-37-0 menu{margin:0;padding:0}.gradio-container-3-37-0 textarea{resize:vertical}.gradio-container-3-37-0 input::placeholder,.gradio-container-3-37-0 textarea::placeholder{opacity:1;color:--color-var(--color-grey-400)}.gradio-container-3-37-0 button,.gradio-container-3-37-0 [role=button]{cursor:pointer}.gradio-container-3-37-0 :disabled{cursor:default}.gradio-container-3-37-0 img,.gradio-container-3-37-0 svg,.gradio-container-3-37-0 video,.gradio-container-3-37-0 canvas,.gradio-container-3-37-0 audio,.gradio-container-3-37-0 iframe,.gradio-container-3-37-0 embed,.gradio-container-3-37-0 object{display:block;vertical-align:middle}.gradio-container-3-37-0 img,.gradio-container-3-37-0 video{max-width:100%;height:auto}.gradio-container-3-37-0 [hidden]{display:none}.gradio-container-3-37-0 [type=text],.gradio-container-3-37-0 [type=email],.gradio-container-3-37-0 [type=url],.gradio-container-3-37-0 [type=password],.gradio-container-3-37-0 [type=number],.gradio-container-3-37-0 [type=date],.gradio-container-3-37-0 [type=datetime-local],.gradio-container-3-37-0 [type=month],.gradio-container-3-37-0 [type=search],.gradio-container-3-37-0 [type=tel],.gradio-container-3-37-0 [type=time],.gradio-container-3-37-0 [type=week],.gradio-container-3-37-0 [multiple],.gradio-container-3-37-0 textarea,.gradio-container-3-37-0 select{--tw-shadow: 0 0 #0000;appearance:none;border-width:1px;border-color:#6b7280;border-radius:0;background-color:#fff;padding:.5rem .75rem;font-size:1rem;line-height:1.5rem}.gradio-container-3-37-0 [type=checkbox],.gradio-container-3-37-0 [type=radio]{color-adjust:exact;display:inline-block;flex-shrink:0;vertical-align:middle;appearance:none;border-width:1px;border-color:#6b7280;background-origin:border-box;background-color:#fff;padding:0;width:1rem;height:1rem;color:#2563eb;user-select:none}.gradio-container-3-37-0 [type=checkbox]:checked{background-image:url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e")}.gradio-container-3-37-0 [type=radio]:checked{background-image:url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e")}.gradio-container-3-37-0 select{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");background-position:right .5rem center;background-size:1.5em 1.5em;background-repeat:no-repeat;padding-right:2.5rem}.gradio-container-3-37-0 [type=checkbox]:checked,.gradio-container-3-37-0 [type=radio]:checked{background-position:center;background-size:100% 100%;background-repeat:no-repeat}.gradio-container-3-37-0 [type=checkbox]:checked:hover,.gradio-container-3-37-0 [type=checkbox]:checked:focus,.gradio-container-3-37-0 [type=radio]:checked:hover,.gradio-container-3-37-0 [type=radio]:checked:focus{border-color:transparent}.gradio-container-3-37-0 [type=checkbox]:focus-visible,.gradio-container-3-37-0 [type=radio]:focus-visible{outline:none}.gradio-container-3-37-0 .scroll-hide{-ms-overflow-style:none;scrollbar-width:none}.gradio-container-3-37-0 .sr-only{clip:rect(0,0,0,0);position:absolute;margin:-1px;border-width:0;padding:0;width:1px;height:1px;overflow:hidden;white-space:nowrap}.gradio-container-3-37-0 .scroll-hide::-webkit-scrollbar{display:none}.gradio-container-3-37-0{-webkit-text-size-adjust:100%;line-height:1.5;font-family:var(--font);-moz-tab-size:4;tab-size:4}.gradio-container-3-37-0 .cropper-container{position:relative;-ms-touch-action:none;touch-action:none;font-size:0;line-height:0;direction:ltr;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.gradio-container-3-37-0 .cropper-container img{display:block;image-orientation:0deg;width:100%;min-width:0!important;max-width:none!important;height:100%;min-height:0!important;max-height:none!important}.gradio-container-3-37-0 .cropper-wrap-box,.gradio-container-3-37-0 .cropper-canvas,.gradio-container-3-37-0 .cropper-drag-box,.gradio-container-3-37-0 .cropper-crop-box,.gradio-container-3-37-0 .cropper-modal{position:absolute;inset:0}.gradio-container-3-37-0 .cropper-wrap-box,.gradio-container-3-37-0 .cropper-canvas{overflow:hidden}.gradio-container-3-37-0 .cropper-drag-box{opacity:0;background-color:#fff}.gradio-container-3-37-0 .cropper-modal{opacity:.5;background-color:#000}.gradio-container-3-37-0 .cropper-view-box{display:block;outline:1px solid #39f;outline-color:#3399ffbf;width:100%;height:100%;overflow:hidden}.gradio-container-3-37-0 .cropper-dashed{display:block;position:absolute;opacity:.5;border:0 dashed #eee}.gradio-container-3-37-0 .cropper-dashed.dashed-h{top:calc(100% / 3);left:0;border-top-width:1px;border-bottom-width:1px;width:100%;height:calc(100% / 3)}.gradio-container-3-37-0 .cropper-dashed.dashed-v{top:0;left:calc(100% / 3);border-right-width:1px;border-left-width:1px;width:calc(100% / 3);height:100%}.gradio-container-3-37-0 .cropper-center{display:block;position:absolute;top:50%;left:50%;opacity:.75;width:0;height:0}.gradio-container-3-37-0 .cropper-center:before,.gradio-container-3-37-0 .cropper-center:after{display:block;position:absolute;background-color:#eee;content:" "}.gradio-container-3-37-0 .cropper-center:before{top:0;left:-3px;width:7px;height:1px}.gradio-container-3-37-0 .cropper-center:after{top:-3px;left:0;width:1px;height:7px}.gradio-container-3-37-0 .cropper-face,.gradio-container-3-37-0 .cropper-line,.gradio-container-3-37-0 .cropper-point{display:block;position:absolute;opacity:.1;width:100%;height:100%}.gradio-container-3-37-0 .cropper-face{top:0;left:0;background-color:#fff}.gradio-container-3-37-0 .cropper-line{background-color:#39f}.gradio-container-3-37-0 .cropper-line.line-e{top:0;right:-3px;cursor:ew-resize;width:5px}.gradio-container-3-37-0 .cropper-line.line-n{top:-3px;left:0;cursor:ns-resize;height:5px}.gradio-container-3-37-0 .cropper-line.line-w{top:0;left:-3px;cursor:ew-resize;width:5px}.gradio-container-3-37-0 .cropper-line.line-s{bottom:-3px;left:0;cursor:ns-resize;height:5px}.gradio-container-3-37-0 .cropper-point{opacity:.75;background-color:#39f;width:5px;height:5px}.gradio-container-3-37-0 .cropper-point.point-e{top:50%;right:-3px;cursor:ew-resize;margin-top:-3px}.gradio-container-3-37-0 .cropper-point.point-n{top:-3px;left:50%;cursor:ns-resize;margin-left:-3px}.gradio-container-3-37-0 .cropper-point.point-w{top:50%;left:-3px;cursor:ew-resize;margin-top:-3px}.gradio-container-3-37-0 .cropper-point.point-s{bottom:-3px;left:50%;cursor:s-resize;margin-left:-3px}.gradio-container-3-37-0 .cropper-point.point-ne{top:-3px;right:-3px;cursor:nesw-resize}.gradio-container-3-37-0 .cropper-point.point-nw{top:-3px;left:-3px;cursor:nwse-resize}.gradio-container-3-37-0 .cropper-point.point-sw{bottom:-3px;left:-3px;cursor:nesw-resize}.gradio-container-3-37-0 .cropper-point.point-se{right:-3px;bottom:-3px;opacity:1;cursor:nwse-resize;width:20px;height:20px}@media (min-width: 768px){.gradio-container-3-37-0 .cropper-point.point-se{width:15px;height:15px}}@media (min-width: 992px){.gradio-container-3-37-0 .cropper-point.point-se{width:10px;height:10px}}@media (min-width: 1200px){.gradio-container-3-37-0 .cropper-point.point-se{opacity:.75;width:5px;height:5px}}.gradio-container-3-37-0 .cropper-point.point-se:before{display:block;position:absolute;right:-50%;bottom:-50%;opacity:0;background-color:#39f;width:200%;height:200%;content:" "}.gradio-container-3-37-0 .cropper-invisible{opacity:0}.gradio-container-3-37-0 .cropper-bg{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAQMAAAAlPW0iAAAAA3NCSVQICAjb4U/gAAAABlBMVEXMzMz////TjRV2AAAACXBIWXMAAArrAAAK6wGCiw1aAAAAHHRFWHRTb2Z0d2FyZQBBZG9iZSBGaXJld29ya3MgQ1M26LyyjAAAABFJREFUCJlj+M/AgBVhF/0PAH6/D/HkDxOGAAAAAElFTkSuQmCC)}.gradio-container-3-37-0 .cropper-hide{display:block;position:absolute;width:0;height:0}.gradio-container-3-37-0 .cropper-hidden{display:none!important}.gradio-container-3-37-0 .cropper-move{cursor:move}.gradio-container-3-37-0 .cropper-crop{cursor:crosshair}.gradio-container-3-37-0 .cropper-disabled .cropper-drag-box,.gradio-container-3-37-0 .cropper-disabled .cropper-face,.gradio-container-3-37-0 .cropper-disabled .cropper-line,.gradio-container-3-37-0 .cropper-disabled .cropper-point{cursor:not-allowed}:root{--scale-0: 1rem;--scale-1: 1.125rem;--scale-2: 1.25rem;--scale-3: 1.5rem;--scale-4: 1.875rem;--scale-5: 2.25rem;--scale-6: 3rem;--scale-7: 3.75rem;--scale-8: 4.5rem;--scale-9: 6rem;--scale-10: 8rem;--scale-000: .75rem;--scale-00: .875rem;--scale-fluid-0: clamp(.875rem, .8rem + .25vw, 1rem);--scale-fluid-1: clamp(1rem, .925rem + .25vw, 1.125rem);--scale-fluid-2: clamp(1.125rem, 1.05rem + .25vw, 1.25rem);--scale-fluid-3: clamp(1.8125rem, 2rem + -.625vw, 1.5rem);--scale-fluid-4: clamp(1.5rem, 1.275rem + .75vw, 1.875rem);--scale-fluid-5: clamp(1.875rem, 1.65rem + .75vw, 2.25rem);--scale-fluid-6: clamp(2.25rem, 1.8rem + 1.5vw, 3rem);--scale-fluid-7: clamp(3rem, 2.55rem + 1.5vw, 3.75rem);--scale-fluid-8: clamp(3.75rem, 3.3rem + 1.5vw, 4.5rem);--scale-fluid-9: clamp(4.5rem, 3.6rem + 3vw, 6rem);--scale-fluid-10: clamp(6rem, 4.8rem + 4vw, 8rem);--scale-fluid-000: clamp(.625rem, .55rem + .25vw, .75rem);--scale-fluid-00: clamp(.75rem, .675rem + .25vw, .875rem);--font-sans: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";--font-serif: Georgia, Cambria, "Times New Roman", Times, serif;--font-mono: IBM Plex Mono, ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;--weight-light: 300;--weight-regular: 400;--weight-medium: 500;--weight-semibold: 600;--weight-bold: 700;--weight-extrabold: 800;--weight-black: 900;--line-none: 1;--line-xs: 1.125;--line-sm: 1.4;--line-md: 1.5;--line-lg: 1.625;--line-xl: 2;--letter-xs: -.05em;--letter-sm: -.025em;--letter-none: 0em;--letter-lg: .025em;--letter-xl: .05em;--prose-xs: 45ch;--prose-sm: 55ch;--prose-md: 65ch;--prose-lg: 75ch;--prose-xl: 85ch;--size-1: 4px;--size-2: 8px;--size-3: 12px;--size-4: 16px;--size-5: 20px;--size-6: 24px;--size-7: 28px;--size-8: 32px;--size-9: 36px;--size-10: 40px;--size-11: 44px;--size-12: 48px;--size-14: 56px;--size-16: 64px;--size-20: 80px;--size-24: 96px;--size-28: 112px;--size-32: 128px;--size-36: 144px;--size-40: 160px;--size-44: 176px;--size-48: 192px;--size-52: 208px;--size-56: 224px;--size-60: 240px;--size-64: 256px;--size-72: 288px;--size-80: 320px;--size-96: 384px;--size-px: 1px;--size-full: 100%;--size-screen: 100vw;--size-min: min-content;--size-max: max-content;--size-0-5: 2px;--size-1-5: 6px;--size-2-5: 10px;--size-screen-h: 100vh;--width-xs: 480px;--width-sm: 640px;--width-md: 768px;--width-lg: 1024px;--width-xl: 1280px;--ratio-square: 1/1;--ratio-portrait: 3/4;--ratio-landscape: 4/3;--ratio-tall: 2/3;--ratio-wide: 3/2;--ratio-widescreen: 16/9;--ratio-golden: 1.618/1;--radius-100: 100%;--radius-xs: 2px;--radius-sm: 4px;--radius-md: 6px;--radius-lg: 8px;--radius-xl: 12px;--radius-full: 9999px;--radius-2xl: 16px;--radius-3xl: 22px;--blur-xs: blur(4px);--blur-sm: blur(8px);--blur-md: blur(16px);--blur-lg: blur(24px);--blur-xl: blur(40px);--layer-1: 10;--layer-2: 20;--layer-3: 30;--layer-4: 40;--layer-5: 50;--layer-below: -1;--layer-top: 2147483647;--shadow-xs: 0 1px 3px 0 rgba(0, 0, 0, .1), 0 1px 2px 0 rgba(0, 0, 0, .06);--shadow-sm: 0 4px 6px -2px rgba(0, 0, 0, .1), 0 2px 4px -2px rgba(0, 0, 0, .06);--shadow-md: 0 12px 16px -4px rgba(0, 0, 0, .1), 0 4px 6px -2px rgba(0, 0, 0, .05);--shadow-lg: 0 20px 24px -4px rgba(0, 0, 0, .1), 0 8px 8px -4px rgba(0, 0, 0, .04);--shadow-xl: 0 24px 48px -12px rgba(0, 0, 0, .25);--ease-in-sine: cubic-bezier(.47, 0, .745, .715);--ease-out-sine: cubic-bezier(.39, .575, .565, 1);--ease-in-out-sine: cubic-bezier(.445, .05, .55, .95);--ease-in-quad: cubic-bezier(.55, .085, .68, .53);--ease-out-quad: cubic-bezier(.25, .46, .45, .94);--ease-in-out-quad: cubic-bezier(.455, .03, .515, .955);--ease-in-cubic: cubic-bezier(.55, .055, .675, .19);--ease-out-cubic: cubic-bezier(.215, .61, .355, 1);--ease-in-out-cubic: cubic-bezier(.645, .045, .355, 1);--ease-in-quart: cubic-bezier(.895, .03, .685, .22);--ease-out-quart: cubic-bezier(.165, .84, .44, 1);--ease-in-out-quart: cubic-bezier(.77, 0, .175, 1);--ease-in-quint: cubic-bezier(.755, .05, .855, .06);--ease-out-quint: cubic-bezier(.23, 1, .32, 1);--ease-in-out-quint: cubic-bezier(.86, 0, .07, 1);--ease-in-expo: cubic-bezier(.95, .05, .795, .035);--ease-out-expo: cubic-bezier(.19, 1, .22, 1);--ease-in-out-expo: cubic-bezier(1, 0, 0, 1);--ease-in-circ: cubic-bezier(.6, .04, .98, .335);--ease-out-circ: cubic-bezier(.075, .82, .165, 1);--ease-in-out-circ: cubic-bezier(.785, .135, .15, .86);--ease-in-back: cubic-bezier(.6, -.28, .735, .045);--ease-out-back: cubic-bezier(.175, .885, .32, 1.275);--ease-in-out-back: cubic-bezier(.68, -.55, .265, 1.55);--easing-standard: cubic-bezier(.4, 0, .2, 1);--easing-accelerate: cubic-bezier(.4, 0, 1, 1);--easing-decelerate: cubic-bezier(0, 0, .2, 1);--elevation-1: 0 1px 2px 0 rgba(0, 0, 0, .05);--elevation-2: 0 1px 3px 0 rgba(0, 0, 0, .1), 0 1px 2px 0 rgba(0, 0, 0, .06);--elevation-3: 0 4px 6px -2px rgba(0, 0, 0, .1), 0 2px 4px -2px rgba(0, 0, 0, .06);--elevation-4: 0 12px 16px -4px rgba(0, 0, 0, .1), 0 4px 6px -2px rgba(0, 0, 0, .05);--elevation-5: 0 20px 24px -4px rgba(0, 0, 0, .1), 0 8px 8px -4px rgba(0, 0, 0, .04);--elevation-6: 0 24px 48px -12px rgba(0, 0, 0, .25);--elevation-7: 0 32px 64px -12px rgba(0, 0, 0, .2);--color-grey-50: #f9fafb;--color-grey-100: #f3f4f6;--color-grey-200: #e5e7eb;--color-grey-300: #d1d5db;--color-grey-400: #9ca3af;--color-grey-500: #6b7280;--color-grey-600: #4b5563;--color-grey-700: #374151;--color-grey-800: #1f2937;--color-grey-900: #111827;--color-black: #14141b;--color-grey: #6b7280;--color-red-300: #fca5a5;--color-red-500: #ef4444;--color-red-700: #b91c1c;--color-red: #ef4444;--color-green-300: #86efac;--color-green-500: #22c55e;--color-green-700: #15803d;--color-green: #22c55e;--color-blue-300: #93c5fd;--color-blue-500: #0ea5e9;--color-blue-700: #1d4ed8;--color-blue: #0ea5e9;--color-pink-300: #fbb6ce;--color-pink-500: #ed64a6;--color-pink-700: #d53f8c;--color-pink: var(--color-pink-500);--color-purple-300: #b794f4;--color-purple-500: #805ad5;--color-purple-700: #6b46c1;--color-purple: var(--color-purple-500);--color-teal-300: #81e6d9;--color-teal-500: #38b2ac;--color-teal-700: #2c7a7b;--color-teal: var(--color-teal-500);--color-yellow-300: #fde047;--color-yellow-500: #eab308;--color-yellow-700: #a16207;--color-yellow: #eab308;--color-orange-300: #ffb066;--color-orange-500: #ff7c00;--color-orange-700: #ce6400;--color-orange: #f97316;--color-brown-300: #a1887f;--color-brown-500: #795548;--color-brown-700: #5d4037;--color-brown: var(--color-brown-500);--color-blue-10: #fafcff;--color-blue-50: #eff6ff;--color-blue-100: #dbeafe;--color-blue-200: #bfdbfe;--color-blue-400: #60a5fa;--color-blue-600: #2563eb;--color-blue-800: #1e40af;--color-blue-900: #1e3a8a;--color-blue-950: #1c366b;--color-grey-10: #fdfdfe;--color-grey-950: #0b0f19;--color-red-10: #fffbfb;--color-red-50: #fef2f2;--color-red-100: #fee2e2;--color-red-200: #fecaca;--color-red-400: #f87171;--color-red-600: #dc2626;--color-red-800: #991b1b;--color-red-900: #7f1d1d;--color-red-950: #63171a;--color-green-10: #f9fefc;--color-green-50: #ecfdf5;--color-green-100: #d1fae5;--color-green-200: #bbf7d0;--color-green-400: #4ade80;--color-green-600: #16a34a;--color-green-800: #166534;--color-green-900: #14532d;--color-green-950: #134227;--color-orange-10: #fffbf6;--color-orange-50: #fff2e5;--color-orange-100: #ffe5cc;--color-orange-200: #ffd8b4;--color-orange-400: #ff9633;--color-orange-600: #ee7400;--color-orange-800: #a45000;--color-orange-900: #5c2d00;--color-orange-950: #3c1f00;--color-yellow-10: #fffef8;--color-yellow-50: #fffbeb;--color-yellow-100: #fff9c2;--color-yellow-200: #fef08a;--color-yellow-400: #facc15;--color-yellow-600: #ca8a04;--color-yellow-800: #854d0e;--color-yellow-900: #713f12;--color-yellow-950: #633112;--grid-2: repeat(2, minmax(0, 1fr));--grid-3: repeat(3, minmax(0, 1fr));--grid-4: repeat(4, minmax(0, 1fr));--grid-5: repeat(5, minmax(0, 1fr));--grid-6: repeat(6, minmax(0, 1fr));--grid-7: repeat(7, minmax(0, 1fr));--grid-8: repeat(8, minmax(0, 1fr));--grid-9: repeat(9, minmax(0, 1fr));--grid-10: repeat(10, minmax(0, 1fr));--grid-11: repeat(11, minmax(0, 1fr));--grid-12: repeat(12, minmax(0, 1fr));--grid-page-width: var(--width-xl);--grid-page-gutter: 5vw;--grid-page-main: 2 / 3;--grid-page: minmax(var(--grid-page-gutter), 1fr) minmax(0, var(--grid-page-width)) minmax(var(--grid-page-gutter), 1fr)}.gradio-container-3-37-0 .prose{font-weight:var(--prose-text-weight);font-size:var(--text-md)}.gradio-container-3-37-0 .prose *{color:var(--body-text-color)}.gradio-container-3-37-0 .prose p{margin-bottom:var(--spacing-sm);line-height:var(--line-lg)}.gradio-container-3-37-0 .prose h1,.gradio-container-3-37-0 .prose h2,.gradio-container-3-37-0 .prose h3,.gradio-container-3-37-0 .prose h4,.gradio-container-3-37-0 .prose h5{margin:var(--spacing-xxl) 0 var(--spacing-lg);font-weight:var(--prose-header-text-weight);line-height:1.3}.gradio-container-3-37-0 .prose>*:first-child{margin-top:0}.gradio-container-3-37-0 .prose h1{margin-top:0;font-size:var(--text-xxl)}.gradio-container-3-37-0 .prose h2{font-size:var(--text-xl)}.gradio-container-3-37-0 .prose h3{font-size:var(--text-lg)}.gradio-container-3-37-0 .prose h4{font-size:1.1em}.gradio-container-3-37-0 .prose h5{font-size:1.05em}.gradio-container-3-37-0 .prose ul{list-style:circle inside}.gradio-container-3-37-0 .prose ol{list-style:decimal inside}.gradio-container-3-37-0 .prose ul>p,.gradio-container-3-37-0 .prose li>p{display:inline-block}.gradio-container-3-37-0 .prose ol,.gradio-container-3-37-0 .prose ul{margin-top:0;padding-left:0}.gradio-container-3-37-0 .prose ul ul,.gradio-container-3-37-0 .prose ul ol,.gradio-container-3-37-0 .prose ol ol,.gradio-container-3-37-0 .prose ol ul{margin:.5em 0 .5em 3em;font-size:90%}.gradio-container-3-37-0 .prose li{margin-bottom:.5em}.gradio-container-3-37-0 .prose code{border:1px solid var(--border-color-primary);border-radius:var(--radius-sm);background:var(--background-fill-secondary);padding:1px 3px;font-size:85%;white-space:nowrap}.gradio-container-3-37-0 .prose pre>code{display:block;padding:.5em .7em;white-space:pre}.gradio-container-3-37-0 .prose th,.gradio-container-3-37-0 .prose td{border-bottom:1px solid #e1e1e1;padding:12px 15px;text-align:left}.gradio-container-3-37-0 .prose th:first-child,.gradio-container-3-37-0 .prose td:first-child{padding-left:0}.gradio-container-3-37-0 .prose th:last-child,.gradio-container-3-37-0 .prose td:last-child{padding-right:0}.gradio-container-3-37-0 .prose button,.gradio-container-3-37-0 .prose .button,.gradio-container-3-37-0 .prose input,.gradio-container-3-37-0 .prose textarea,.gradio-container-3-37-0 .prose select,.gradio-container-3-37-0 .prose fieldset{margin-bottom:var(--spacing-sm)}.gradio-container-3-37-0 .prose pre,.gradio-container-3-37-0 .prose blockquote,.gradio-container-3-37-0 .prose dl,.gradio-container-3-37-0 .prose figure,.gradio-container-3-37-0 .prose table,.gradio-container-3-37-0 .prose p,.gradio-container-3-37-0 .prose ul,.gradio-container-3-37-0 .prose ol,.gradio-container-3-37-0 .prose form{margin-bottom:var(--spacing-md)}.gradio-container-3-37-0 .prose a{color:var(--link-text-color);text-decoration:underline}.gradio-container-3-37-0 .prose a:visited{color:var(--link-text-color-visited)}.gradio-container-3-37-0 .prose a:hover{color:var(--link-text-color-hover)}.gradio-container-3-37-0 .prose a:active{color:var(--link-text-color-active)}.gradio-container-3-37-0 .prose hr{margin-top:3em;margin-bottom:3.5em;border-width:0;border-top:1px solid #e1e1e1}.gradio-container-3-37-0 .prose blockquote{margin:var(--size-6) 0!important;border-left:5px solid var(--border-color-primary);padding-left:var(--size-2)}.gradio-container-3-37-0 .prose :last-child{margin-bottom:0!important}.gradio-container-3-37-0{display:flex;position:relative;flex-direction:column;padding:0;min-height:1px;overflow:hidden;color:var(--button-secondary-text-color)}.embed-container.svelte-1kyws56.svelte-1kyws56{margin:var(--size-4) 0px;border:1px solid var(--button-secondary-border-color);border-radius:var(--embed-radius)}.with-info.svelte-1kyws56.svelte-1kyws56{padding-bottom:var(--size-7)}.embed-container.svelte-1kyws56>.main.svelte-1kyws56{padding:var(--size-4)}.app.svelte-1kyws56>.main.svelte-1kyws56{display:flex;flex-grow:1;flex-direction:column}.app.svelte-1kyws56.svelte-1kyws56{position:relative;margin:auto;padding:var(--size-4);width:100%;height:100%}@media (min-width: 640px){.app.svelte-1kyws56.svelte-1kyws56{max-width:640px}}@media (min-width: 768px){.app.svelte-1kyws56.svelte-1kyws56{max-width:768px}}@media (min-width: 1024px){.app.svelte-1kyws56.svelte-1kyws56{max-width:1024px}}@media (min-width: 1280px){.app.svelte-1kyws56.svelte-1kyws56{max-width:1280px}}@media (min-width: 1536px){.app.svelte-1kyws56.svelte-1kyws56{max-width:1536px}}.info.svelte-1kyws56.svelte-1kyws56{display:flex;position:absolute;bottom:0;justify-content:flex-start;border-top:1px solid var(--button-secondary-border-color);padding:var(--size-1) var(--size-5);width:100%;color:var(--body-text-color-subdued);font-size:var(--text-md);white-space:nowrap}.info.svelte-1kyws56>span.svelte-1kyws56{word-wrap:break-word;-break:keep-all;display:block;word-break:keep-all}.info.svelte-1kyws56>span.svelte-1kyws56:nth-child(1){margin-right:4px;min-width:0px;max-width:max-content;overflow:hidden;color:var(--body-text-color);text-overflow:ellipsis;white-space:nowrap}.info.svelte-1kyws56>span.svelte-1kyws56:nth-child(2){margin-right:3px}.info.svelte-1kyws56>span.svelte-1kyws56:nth-child(2),.info.svelte-1kyws56>span.svelte-1kyws56:nth-child(3){width:max-content}.info.svelte-1kyws56>span.svelte-1kyws56:nth-child(3){align-self:flex-end;justify-self:flex-end;margin-left:auto;text-align:right}.info.svelte-1kyws56>span.svelte-1kyws56:nth-child(1){flex-shrink:9}.hidden-title.svelte-1kyws56.svelte-1kyws56{position:absolute;left:var(--size-5);opacity:0;background:var(--button-secondary-background-fill);padding-right:4px}.info.svelte-1kyws56 a.svelte-1kyws56{color:var(--body-text-color)}.title.svelte-1kyws56.svelte-1kyws56{font-size:var(--text-sm);font-family:var(--font-mono)}.hf.svelte-1kyws56.svelte-1kyws56{margin-left:5px}.space-logo.svelte-1kyws56 img.svelte-1kyws56{display:inline-block;margin-bottom:4px;height:12px}a.svelte-1kyws56.svelte-1kyws56:hover{text-decoration:underline}svg.svelte-zyxd38.svelte-zyxd38{width:var(--size-20);height:var(--size-20)}svg.svelte-zyxd38 path.svelte-zyxd38{fill:var(--loader-color)}div.svelte-zyxd38.svelte-zyxd38{z-index:var(--layer-2)}.margin.svelte-zyxd38.svelte-zyxd38{margin:var(--size-4)}.wrap.svelte-zlszon.svelte-zlszon{display:flex;flex-direction:column;justify-content:center;align-items:center;z-index:var(--layer-5);transition:opacity .1s ease-in-out;border-radius:var(--block-radius);background:var(--block-background-fill);padding:0 var(--size-6);max-height:var(--size-screen-h);overflow:hidden;pointer-events:none}.wrap.center.svelte-zlszon.svelte-zlszon{top:0;right:0;left:0}.wrap.default.svelte-zlszon.svelte-zlszon{inset:0}.hide.svelte-zlszon.svelte-zlszon{opacity:0;pointer-events:none}.generating.svelte-zlszon.svelte-zlszon{animation:svelte-zlszon-pulse 2s cubic-bezier(.4,0,.6,1) infinite;border:2px solid var(--color-accent);background:transparent}.translucent.svelte-zlszon.svelte-zlszon{background:none}@keyframes svelte-zlszon-pulse{0%,to{opacity:1}50%{opacity:.5}}.loading.svelte-zlszon.svelte-zlszon{z-index:var(--layer-2);color:var(--body-text-color)}.eta-bar.svelte-zlszon.svelte-zlszon{position:absolute;inset:0;transform-origin:left;opacity:.8;z-index:var(--layer-1);transition:10ms;background:var(--background-fill-secondary)}.progress-bar-wrap.svelte-zlszon.svelte-zlszon{border:1px solid var(--border-color-primary);background:var(--background-fill-primary);width:55.5%;height:var(--size-4)}.progress-bar.svelte-zlszon.svelte-zlszon{transform-origin:left;background-color:var(--loader-color);width:var(--size-full);height:var(--size-full)}.progress-level.svelte-zlszon.svelte-zlszon{display:flex;flex-direction:column;align-items:center;gap:1;z-index:var(--layer-2);width:var(--size-full)}.progress-level-inner.svelte-zlszon.svelte-zlszon{margin:var(--size-2) auto;color:var(--body-text-color);font-size:var(--text-sm);font-family:var(--font-mono)}.meta-text.svelte-zlszon.svelte-zlszon{position:absolute;top:0;right:0;z-index:var(--layer-2);padding:var(--size-1) var(--size-2);font-size:var(--text-sm);font-family:var(--font-mono)}.meta-text-center.svelte-zlszon.svelte-zlszon{display:flex;position:absolute;top:0;right:0;justify-content:center;align-items:center;transform:translateY(var(--size-6));z-index:var(--layer-2);padding:var(--size-1) var(--size-2);font-size:var(--text-sm);font-family:var(--font-mono);text-align:center}.error.svelte-zlszon.svelte-zlszon{box-shadow:var(--shadow-drop);border:solid 1px var(--error-border-color);border-radius:var(--radius-full);background:var(--error-background-fill);padding-right:var(--size-4);padding-left:var(--size-4);color:var(--error-text-color);font-weight:var(--weight-semibold);font-size:var(--text-lg);line-height:var(--line-lg);font-family:var(--font)}.minimal.svelte-zlszon .progress-text.svelte-zlszon{background:var(--block-background-fill)}.error.svelte-y6l4b.svelte-y6l4b{position:relative;padding:var(--size-4);color:var(--body-text-color);text-align:center}.error.svelte-y6l4b>.svelte-y6l4b{margin-top:var(--size-4)}a.svelte-y6l4b.svelte-y6l4b{color:var(--link-text-color)}a.svelte-y6l4b.svelte-y6l4b:hover{color:var(--link-text-color-hover);text-decoration:underline}a.svelte-y6l4b.svelte-y6l4b:visited{color:var(--link-text-color-visited)}a.svelte-y6l4b.svelte-y6l4b:active{color:var(--link-text-color-active)}
|
|
|
|
spaces/DaFujaTyping/hf-Chat-ui/src/hooks.server.ts
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
import { dev } from "$app/environment";
|
2 |
-
import { COOKIE_NAME } from "$env/static/private";
|
3 |
-
import type { Handle } from "@sveltejs/kit";
|
4 |
-
import {
|
5 |
-
PUBLIC_GOOGLE_ANALYTICS_ID,
|
6 |
-
PUBLIC_DEPRECATED_GOOGLE_ANALYTICS_ID,
|
7 |
-
} from "$env/static/public";
|
8 |
-
import { addYears } from "date-fns";
|
9 |
-
import { collections } from "$lib/server/database";
|
10 |
-
import { base } from "$app/paths";
|
11 |
-
|
12 |
-
export const handle: Handle = async ({ event, resolve }) => {
|
13 |
-
const token = event.cookies.get(COOKIE_NAME);
|
14 |
-
|
15 |
-
event.locals.sessionId = token || crypto.randomUUID();
|
16 |
-
|
17 |
-
if (
|
18 |
-
event.request.method === "POST" &&
|
19 |
-
!event.url.pathname.startsWith(`${base}/settings`) &&
|
20 |
-
!event.url.pathname.startsWith(`${base}/admin`)
|
21 |
-
) {
|
22 |
-
const hasAcceptedEthicsModal = await collections.settings.countDocuments({
|
23 |
-
sessionId: event.locals.sessionId,
|
24 |
-
ethicsModalAcceptedAt: { $exists: true },
|
25 |
-
});
|
26 |
-
|
27 |
-
if (!hasAcceptedEthicsModal) {
|
28 |
-
const sendJson =
|
29 |
-
event.request.headers.get("accept")?.includes("application/json") ||
|
30 |
-
event.request.headers.get("content-type")?.includes("application/json");
|
31 |
-
return new Response(
|
32 |
-
sendJson
|
33 |
-
? JSON.stringify({ error: "You need to accept the welcome modal first" })
|
34 |
-
: "You need to accept the welcome modal first",
|
35 |
-
{
|
36 |
-
status: 405,
|
37 |
-
headers: {
|
38 |
-
"content-type": sendJson ? "application/json" : "text/plain",
|
39 |
-
},
|
40 |
-
}
|
41 |
-
);
|
42 |
-
}
|
43 |
-
}
|
44 |
-
|
45 |
-
// Refresh cookie expiration date
|
46 |
-
event.cookies.set(COOKIE_NAME, event.locals.sessionId, {
|
47 |
-
path: "/",
|
48 |
-
// So that it works inside the space's iframe
|
49 |
-
sameSite: dev ? "lax" : "none",
|
50 |
-
secure: !dev,
|
51 |
-
httpOnly: true,
|
52 |
-
expires: addYears(new Date(), 1),
|
53 |
-
});
|
54 |
-
|
55 |
-
let replaced = false;
|
56 |
-
|
57 |
-
const response = await resolve(event, {
|
58 |
-
transformPageChunk: (chunk) => {
|
59 |
-
// For some reason, Sveltekit doesn't let us load env variables from .env in the app.html template
|
60 |
-
if (replaced || !chunk.html.includes("%gaId%") || !chunk.html.includes("%gaIdDeprecated%")) {
|
61 |
-
return chunk.html;
|
62 |
-
}
|
63 |
-
replaced = true;
|
64 |
-
|
65 |
-
return chunk.html
|
66 |
-
.replace("%gaId%", PUBLIC_GOOGLE_ANALYTICS_ID)
|
67 |
-
.replace("%gaIdDeprecated%", PUBLIC_DEPRECATED_GOOGLE_ANALYTICS_ID);
|
68 |
-
},
|
69 |
-
});
|
70 |
-
|
71 |
-
return response;
|
72 |
-
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Daniton/MagicPrompt-Stable-Diffusion/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MagicPrompt Stable Diffusion
|
3 |
-
emoji: 🍄
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.3.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: phenomenon1981/MagicPrompt-Stable-Diffusion
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DataScienceEngineering/6-TreemapAndSunburst/app.py
DELETED
@@ -1,230 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import numpy as np
|
3 |
-
import plotly.express as px
|
4 |
-
import pandas as pd
|
5 |
-
import plotly.graph_objects as go
|
6 |
-
|
7 |
-
st.set_page_config(page_title="Plotly Graphing Libraries",layout='wide')
|
8 |
-
|
9 |
-
import streamlit as st
|
10 |
-
|
11 |
-
uploaded_files = st.file_uploader("Choose a CSV file", accept_multiple_files=True)
|
12 |
-
for uploaded_file in uploaded_files:
|
13 |
-
bytes_data = uploaded_file.read()
|
14 |
-
st.write("filename:", uploaded_file.name)
|
15 |
-
st.write(bytes_data)
|
16 |
-
|
17 |
-
if st.checkbox("FileDetails"):
|
18 |
-
|
19 |
-
filevalue = uploaded_file.getvalue()
|
20 |
-
st.write(filevalue)
|
21 |
-
st.write(uploaded_file.name)
|
22 |
-
st.write(uploaded_file.type)
|
23 |
-
st.write(uploaded_file.size)
|
24 |
-
#st.write(uploaded_file.last_modified)
|
25 |
-
#st.write(uploaded_file.charset)
|
26 |
-
st.write(uploaded_file.getbuffer())
|
27 |
-
st.write(uploaded_file.getbuffer().nbytes)
|
28 |
-
st.write(uploaded_file.getbuffer().tobytes())
|
29 |
-
st.write(uploaded_file.getbuffer().tolist())
|
30 |
-
st.write(uploaded_file.getbuffer().itemsize)
|
31 |
-
st.write(uploaded_file.getbuffer().ndim)
|
32 |
-
st.write(uploaded_file.getbuffer().shape)
|
33 |
-
st.write(uploaded_file.getbuffer().strides)
|
34 |
-
st.write(uploaded_file.getbuffer().suboffsets)
|
35 |
-
st.write(uploaded_file.getbuffer().readonly)
|
36 |
-
st.write(uploaded_file.getbuffer().c_contiguous)
|
37 |
-
st.write(uploaded_file.getbuffer().f_contiguous)
|
38 |
-
st.write(uploaded_file.getbuffer().contiguous)
|
39 |
-
st.write(uploaded_file.getbuffer().itemsize)
|
40 |
-
st.write(uploaded_file.getbuffer().nbytes)
|
41 |
-
st.write(uploaded_file.getbuffer().ndim)
|
42 |
-
st.write(uploaded_file.getbuffer().shape)
|
43 |
-
st.write(uploaded_file.getbuffer().strides)
|
44 |
-
st.write(uploaded_file.getbuffer().suboffsets)
|
45 |
-
st.write(uploaded_file.getbuffer().readonly)
|
46 |
-
st.write(uploaded_file.getbuffer().c_contiguous)
|
47 |
-
st.write(uploaded_file.getbuffer().f_contiguous)
|
48 |
-
st.write(uploaded_file.getbuffer().contiguous)
|
49 |
-
st.write(uploaded_file.getbuffer().itemsize)
|
50 |
-
st.write(uploaded_file.getbuffer().nbytes)
|
51 |
-
st.write(uploaded_file.getbuffer().ndim)
|
52 |
-
st.write(uploaded_file.getbuffer().shape)
|
53 |
-
st.write(uploaded_file.getbuffer().strides)
|
54 |
-
st.write(uploaded_file.getbuffer().suboffsets)
|
55 |
-
st.write(uploaded_file.getbuffer().readonly)
|
56 |
-
st.write(uploaded_file.getbuffer().c_contiguous)
|
57 |
-
st.write(uploaded_file.getbuffer().f_contiguous)
|
58 |
-
myDF = pd.DataFrame(uploaded_file.getbuffer().tolist())
|
59 |
-
|
60 |
-
|
61 |
-
st.markdown("# Treemaps from upload data file: https://plotly.com/python/treemaps/")
|
62 |
-
#df = myDF.query("year == 2007")
|
63 |
-
df = myDF
|
64 |
-
fig = px.treemap(df, path=[px.Constant("time"), 'message', 'name'], values='content',
|
65 |
-
color='lifeExp', hover_data=['iso_alpha'],
|
66 |
-
color_continuous_scale='RdBu',
|
67 |
-
color_continuous_midpoint=np.average(df['name'], weights=df['content'])) # todo - debug this and get it working with the data
|
68 |
-
fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
|
69 |
-
#fig.show()
|
70 |
-
st.plotly_chart(fig, use_container_width=True)
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
#show replace
|
76 |
-
if st.checkbox("replace"):
|
77 |
-
mydf = st.dataframe(df)
|
78 |
-
columns = st.selectbox("Select column", df.columns)
|
79 |
-
old_values = st.multiselect("Current Values",list(df[columns].unique()),list(df[columns].unique()))
|
80 |
-
with st.form(key='my_form'):
|
81 |
-
col1,col2 = st.beta_columns(2)
|
82 |
-
st_input = st.number_input if is_numeric_dtype(df[columns]) else st.text_input
|
83 |
-
with col1:
|
84 |
-
old_val = st_input("old value")
|
85 |
-
with col2:
|
86 |
-
new_val = st_input("new value")
|
87 |
-
if st.form_submit_button("Replace"):
|
88 |
-
df[columns]=df[columns].replace(old_val,new_val)
|
89 |
-
st.success("{} replace with {} successfully ".format(old_val,new_val))
|
90 |
-
excel = df.to_excel(r"F:\book2.xlsx", index = False, header=True,encoding="utf-8")
|
91 |
-
df =pd.read_excel(r"F:\book2.xlsx")
|
92 |
-
mydf.add_rows(df)
|
93 |
-
|
94 |
-
st.markdown("WebGL Rendering with 1,000,000 Points")
|
95 |
-
import plotly.graph_objects as go
|
96 |
-
import numpy as np
|
97 |
-
N = 1000000
|
98 |
-
fig = go.Figure()
|
99 |
-
fig.add_trace(
|
100 |
-
go.Scattergl(
|
101 |
-
x = np.random.randn(N),
|
102 |
-
y = np.random.randn(N),
|
103 |
-
mode = 'markers',
|
104 |
-
marker = dict(
|
105 |
-
line = dict(
|
106 |
-
width = 1,
|
107 |
-
color = 'DarkSlateGrey')
|
108 |
-
)
|
109 |
-
)
|
110 |
-
)
|
111 |
-
#fig.show()
|
112 |
-
st.plotly_chart(fig, use_container_width=True)
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
st.markdown("# WebGL Graph - ScatterGL")
|
117 |
-
fig = go.Figure()
|
118 |
-
trace_num = 10
|
119 |
-
point_num = 5000
|
120 |
-
for i in range(trace_num):
|
121 |
-
fig.add_trace(
|
122 |
-
go.Scattergl(
|
123 |
-
x = np.linspace(0, 1, point_num),
|
124 |
-
y = np.random.randn(point_num)+(i*5)
|
125 |
-
)
|
126 |
-
)
|
127 |
-
fig.update_layout(showlegend=False)
|
128 |
-
#fig.show()
|
129 |
-
st.plotly_chart(fig, use_container_width=True)
|
130 |
-
|
131 |
-
|
132 |
-
st.markdown("# Treemaps: https://plotly.com/python/treemaps/")
|
133 |
-
df = px.data.gapminder().query("year == 2007")
|
134 |
-
fig = px.treemap(df, path=[px.Constant("world"), 'continent', 'country'], values='pop',
|
135 |
-
color='lifeExp', hover_data=['iso_alpha'],
|
136 |
-
color_continuous_scale='RdBu',
|
137 |
-
color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop']))
|
138 |
-
fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
|
139 |
-
#fig.show()
|
140 |
-
st.plotly_chart(fig, use_container_width=True)
|
141 |
-
|
142 |
-
|
143 |
-
st.markdown("# Sunburst: https://plotly.com/python/sunburst-charts/")
|
144 |
-
|
145 |
-
|
146 |
-
st.markdown("# Life Expectancy Sunburst")
|
147 |
-
df = px.data.gapminder().query("year == 2007")
|
148 |
-
fig = px.sunburst(df, path=['continent', 'country'], values='pop',
|
149 |
-
color='lifeExp', hover_data=['iso_alpha'],
|
150 |
-
color_continuous_scale='RdBu',
|
151 |
-
color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop']))
|
152 |
-
st.plotly_chart(fig, use_container_width=True)
|
153 |
-
|
154 |
-
|
155 |
-
st.markdown("# Coffee Aromas and Tastes Sunburst")
|
156 |
-
df1 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/sunburst-coffee-flavors-complete.csv')
|
157 |
-
df2 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/coffee-flavors.csv')
|
158 |
-
fig = go.Figure()
|
159 |
-
fig.add_trace(go.Sunburst(
|
160 |
-
ids=df1.ids,
|
161 |
-
labels=df1.labels,
|
162 |
-
parents=df1.parents,
|
163 |
-
domain=dict(column=0)
|
164 |
-
))
|
165 |
-
fig.add_trace(go.Sunburst(
|
166 |
-
ids=df2.ids,
|
167 |
-
labels=df2.labels,
|
168 |
-
parents=df2.parents,
|
169 |
-
domain=dict(column=1),
|
170 |
-
maxdepth=2
|
171 |
-
))
|
172 |
-
fig.update_layout(
|
173 |
-
grid= dict(columns=2, rows=1),
|
174 |
-
margin = dict(t=0, l=0, r=0, b=0)
|
175 |
-
)
|
176 |
-
st.plotly_chart(fig, use_container_width=True)
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
# Sunburst
|
183 |
-
#data = dict(
|
184 |
-
# character=["Eve", "Cain", "Seth", "Enos", "Noam", "Abel", "Awan", "Enoch", "Azura"],
|
185 |
-
# parent=["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve" ],
|
186 |
-
# value=[10, 14, 12, 10, 2, 6, 6, 4, 4])
|
187 |
-
#fig = px.sunburst(
|
188 |
-
# data,
|
189 |
-
# names='character',
|
190 |
-
# parents='parent',
|
191 |
-
# values='value',
|
192 |
-
#)
|
193 |
-
#fig.show()
|
194 |
-
#st.plotly_chart(fig, use_container_width=True)
|
195 |
-
|
196 |
-
|
197 |
-
df = px.data.tips()
|
198 |
-
fig = px.treemap(df, path=[px.Constant("all"), 'sex', 'day', 'time'],
|
199 |
-
values='total_bill', color='time',
|
200 |
-
color_discrete_map={'(?)':'lightgrey', 'Lunch':'gold', 'Dinner':'darkblue'})
|
201 |
-
fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
|
202 |
-
#fig.show()
|
203 |
-
fig.update_traces(marker=dict(cornerradius=5))
|
204 |
-
|
205 |
-
st.plotly_chart(fig, use_container_width=True)
|
206 |
-
|
207 |
-
|
208 |
-
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/96c0bd/sunburst-coffee-flavors-complete.csv')
|
209 |
-
fig = go.Figure(go.Treemap(
|
210 |
-
ids = df.ids,
|
211 |
-
labels = df.labels,
|
212 |
-
parents = df.parents,
|
213 |
-
pathbar_textfont_size=15,
|
214 |
-
root_color="lightgrey"
|
215 |
-
))
|
216 |
-
fig.update_layout(
|
217 |
-
uniformtext=dict(minsize=10, mode='hide'),
|
218 |
-
margin = dict(t=50, l=25, r=25, b=25)
|
219 |
-
)
|
220 |
-
#fig.show()
|
221 |
-
st.plotly_chart(fig, use_container_width=True)
|
222 |
-
|
223 |
-
|
224 |
-
df = pd.read_pickle('bloom_dataset.pkl')
|
225 |
-
fig = px.treemap(df, path=[px.Constant("ROOTS"), 'Macroarea', 'Family', 'Genus', 'Language', 'dataset_name'],
|
226 |
-
values='num_bytes', maxdepth=4)
|
227 |
-
fig.update_traces(root_color="pink")
|
228 |
-
fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
|
229 |
-
|
230 |
-
st.plotly_chart(fig, use_container_width=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DeclK/pose/model_zoo/rtmdet/rtmdet_tiny_8xb32-300e_coco/detection_onnxruntime_static.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
onnx_config = dict(
|
2 |
-
type='onnx',
|
3 |
-
export_params=True,
|
4 |
-
keep_initializers_as_inputs=False,
|
5 |
-
opset_version=11,
|
6 |
-
save_file='end2end.onnx',
|
7 |
-
input_names=['input'],
|
8 |
-
output_names=['dets', 'labels'],
|
9 |
-
input_shape=None,
|
10 |
-
optimize=True)
|
11 |
-
codebase_config = dict(
|
12 |
-
type='mmdet',
|
13 |
-
task='ObjectDetection',
|
14 |
-
model_type='end2end',
|
15 |
-
post_processing=dict(
|
16 |
-
score_threshold=0.05,
|
17 |
-
confidence_threshold=0.005,
|
18 |
-
iou_threshold=0.5,
|
19 |
-
max_output_boxes_per_class=200,
|
20 |
-
pre_top_k=5000,
|
21 |
-
keep_top_k=100,
|
22 |
-
background_label_id=-1))
|
23 |
-
backend_config = dict(type='onnxruntime')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|