Commit
·
05c9856
1
Parent(s):
596aa28
Update parquet files (step 34 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Blender with These Amazing Tips and Tricks to Boost Your Workflow.md +0 -24
- spaces/1gistliPinn/ChatGPT4/Examples/Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.!FREE! Keygen-NeoX Serial Key !FREE! Keygen.md +0 -11
- spaces/1gistliPinn/ChatGPT4/Examples/Chintakayala Ravi Full Movie 107 __TOP__.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Cool Edit Pro 2.1 Crack Full Version [Portable] - Why You Need it for Your Music Projects.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Echangiste MILF Sexe Anal Videos ((BETTER)).md +0 -5
- spaces/1line/AutoGPT/autogpt/__init__.py +0 -0
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Emergency 5 and Join the Team of First Responders.md +0 -165
- spaces/1phancelerku/anime-remove-background/CarX Drift Racing 2 Hack APK 2022 Everything You Need to Know.md +0 -108
- spaces/1phancelerku/anime-remove-background/Download Jigsaw APK and Enjoy Hundreds of Fun Puzzles.md +0 -99
- spaces/4Taps/SadTalker/src/generate_facerender_batch.py +0 -128
- spaces/801artistry/RVC801/demucs/repitch.py +0 -96
- spaces/801artistry/RVC801/get-pip.py +0 -0
- spaces/801artistry/RVC801/infer/lib/train/data_utils.py +0 -517
- spaces/AIFILMS/StyleGANEX/scripts/style_mixing.py +0 -101
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/modules.py +0 -314
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco.py +0 -14
- spaces/Abubakari/Sepsis-fastapi-prediction-app/README.md +0 -10
- spaces/ActivatedOne/JorisCos-ConvTasNet_Libri1Mix_enhsingle_16k/app.py +0 -3
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Factory.d.ts +0 -6
- spaces/Ama434/neutral-barlow/README.md +0 -17
- spaces/Amrrs/yt-shorts-video-captioning/README.md +0 -13
- spaces/Androidonnxfork/CivitAi-to-Diffusers/app1.py +0 -93
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/inference/README.md +0 -8
- spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py +0 -7
- spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py +0 -7
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_cell_widths.py +0 -451
- spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets.py +0 -123
- spaces/Benson/text-generation/Examples/Ataque A Titan 2 Ps Vita Parche Ingls.md +0 -71
- spaces/Benson/text-generation/Examples/Betty Namaganda Yesu Anatera Okudda Download.md +0 -71
- spaces/Benson/text-generation/Examples/Caramelo Crush Saga Apkfeed.md +0 -243
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/config.py +0 -335
- spaces/Blaise-g/summarize-biomedical-papers-long-summary-or-tldr/utils.py +0 -63
- spaces/BramVanroy/llama-2-13b-chat-dutch-space/USE_POLICY.md +0 -50
- spaces/BramVanroy/text-to-amr/app.py +0 -118
- spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/main.py +0 -69
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/tests/__init__.py +0 -1
- spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubInstallRules.cmake +0 -15
- spaces/Cong723/gpt-academic-public/crazy_functions/test_project/latex/attention/model_architecture.tex +0 -155
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/fcos/loss.py +0 -194
- spaces/DEVILOVER/image_captioning/app.py +0 -3
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/http_writer.py +0 -198
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/S__i_l_l.py +0 -87
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9001a1ae.js +0 -6
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_connection.py +0 -633
- spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/analytics.ts +0 -39
- spaces/DaleChen/AutoGPT/autogpt/memory/pinecone.py +0 -75
- spaces/Detomo/ai-comic-generation/src/components/ui/command.tsx +0 -155
- spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/bias_act.h +0 -40
- spaces/EPFL-VILAB/MultiMAE/multimae/output_adapter_utils.py +0 -290
- spaces/Edward-Ji/essentials-of-microeconomics/essentials_of_microeconomics/elasticity.py +0 -338
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Blender with These Amazing Tips and Tricks to Boost Your Workflow.md
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Blender with Add-ons and Extensions</h1>
|
3 |
-
<p>Blender is a free and open source 3D creation suite that can be used for modeling, animation, rendering, simulation, video editing and more. Blender has a rich ecosystem of add-ons and extensions that can enhance its functionality and features. In this article, we will show you how to download Blender with some of the most popular and useful add-ons and extensions.</p>
|
4 |
-
<h2>Step 1: Download Blender</h2>
|
5 |
-
<p>The first step is to download Blender from its official website: <a href="https://www.blender.org/download/">https://www.blender.org/download/</a>. You can choose the version that suits your operating system and hardware. The latest stable version at the time of writing is 2.93.5.</p>
|
6 |
-
<h2>download blender with crack</h2><br /><p><b><b>Download</b> › <a href="https://byltly.com/2uKw6g">https://byltly.com/2uKw6g</a></b></p><br /><br />
|
7 |
-
<h2>Step 2: Install Blender</h2>
|
8 |
-
<p>The next step is to install Blender on your computer. The installation process may vary depending on your operating system, but it is usually straightforward and easy to follow. You can find detailed instructions for different platforms here: <a href="https://docs.blender.org/manual/en/latest/getting_started/installing/index.html">https://docs.blender.org/manual/en/latest/getting_started/installing/index.html</a>.</p>
|
9 |
-
<h2>Step 3: Find Add-ons and Extensions</h2>
|
10 |
-
<p>Now that you have Blender installed, you can start looking for add-ons and extensions that you want to use. There are many sources where you can find them, such as:</p>
|
11 |
-
<ul>
|
12 |
-
<li>The official Blender add-on repository: <a href="https://www.blender.org/download/addons/">https://www.blender.org/download/addons/</a>. Here you can find add-ons that are developed and maintained by the Blender Foundation or the Blender community. They are categorized by topics such as animation, modeling, rendering, etc.</li>
|
13 |
-
<li>The Blender Market: <a href="https://blendermarket.com/">https://blendermarket.com/</a>. This is a platform where you can buy or sell add-ons and other assets for Blender. You can find a wide range of products from various creators and developers.</li>
|
14 |
-
<li>The Gumroad: <a href="https://gumroad.com/discover?query=blender">https://gumroad.com/discover?query=blender</a>. This is another platform where you can buy or sell add-ons and other assets for Blender. You can also find some free or pay-what-you-want products.</li>
|
15 |
-
<li>The GitHub: <a href="https://github.com/search?q=blender+addon">https://github.com/search?q=blender+addon</a>. This is a website where you can find open source projects and code. You can search for blender addon and find many repositories that contain add-ons and extensions for Blender. Some of them may be experimental or unfinished, so use them at your own risk.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>Step 4: Install Add-ons and Extensions</h2>
|
18 |
-
<p>Once you have found the add-ons and extensions that you want to use, you need to install them in Blender. The installation process may vary depending on the source and the type of the add-on or extension, but it usually involves one of these methods:</p>
|
19 |
-
<ul>
|
20 |
-
<li>Using the built-in add-on manager in Blender. This is the easiest and most recommended way to install add-ons that are available in the official repository or in a zip file. You can access the add-on manager by going to Edit > Preferences > Add-ons in Blender. Then you can click on the Install button and browse to the zip file of the add-on or extension that you downloaded. After that, you can enable the add-on or extension by checking the box next to its name.</li>
|
21 |
-
<li>Copying the files manually to the Blender folder. This is another way to install add-ons or extensions that are not in a zip file or are not compatible with the add-on manager. You need to copy the files of the add-on or extension to the appropriate folder in your Blender installation directory. For example, if you are using Windows and have installed Blender in C:\Program Files\Blender Foundation\Blender 2.93\ , then you need to copy the files to C:\Program Files\Blender Foundation\Blender 2.93\scripts\addons\ . After that, you can enable the add-on or extension by going to Edit > Preferences > Add-ons in Blender and checking the box next to its name.</li></p>
|
22 |
-
<p></p> ddb901b051<br />
|
23 |
-
<br />
|
24 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.!FREE! Keygen-NeoX Serial Key !FREE! Keygen.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>A limited number of downloads are available from the Microsoft Downloads site. Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.KeyGen-NeoX Serial Key Keygen. (Software) Asterisk Key 8.3 Build 2974 - DLLOZ.exe. Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.KeyGen-NeoX.exe. 73728. Agile.MP4.Video.Joiner.v2.3.5. </p>
|
3 |
-
<h2>Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.KeyGen-NeoX Serial Key Keygen</h2><br /><p><b><b>Download File</b> ✪ <a href="https://imgfil.com/2uy13O">https://imgfil.com/2uy13O</a></b></p><br /><br />
|
4 |
-
<p>A limited number of downloads are available from the Microsoft Downloads site.. Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.KeyGen-NeoX Serial Key Keygen. (Software) Asterisk Key 8.3 Build 2974 - DLLOZ.exe. Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.KeyGen-NeoX.exe. 73728. Agile.MP4.Video.Joiner.v2.3.5. </p>
|
5 |
-
<p>DownloadAgile.MOV.Video.Converter.v3.3.5.WinAll.Incl.KeyGen-NeoX.rar Fillon chapitre terre habité.on song download genetikos pokemon dialga vs palkia vs giratina vs arceus. de sceaux rough video songs trailer thesetpieces everton kriss. </p>
|
6 |
-
<p>,Total Installer.Total Installer.Total Installer.Total Installer. Agile.MOV.Video.Converter.v3.3.5.WinAll.Incl.KeyGen-NeoX.exe. 1504060. Agile.MP4.Video.. Werben.v3.0.1-RATINGS.exe. 502932. Agile.MOV.Video.Converter.v3.3.5.WinAll.Incl.KeyGen-NeoX. </p>
|
7 |
-
<p></p>
|
8 |
-
<p>Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.KeyGen-NeoX. So, simply run the new setup and finish the process. Likewise, this process does not remove all the game data. Agile.MOV.Video.Splitter.v1.9.2.WinAll.Incl.KeyGen-NeoX. </p>
|
9 |
-
<p>Xtream MPEG to AVI Converter.xml. V1.0.1. Modify the file, such as.txt. Microsoft Office 2003 (SP4) Serial Number Key Reader.exe. Agile.MOV.Video.Converter.v1.9.2.WinAll.Incl.KeyGen-NeoX.exe. 3777837. MPEG.Converter.v2.2.0.WinAll.Incl.Keygen-BLiZZARD.exe. </p> 899543212b<br />
|
10 |
-
<br />
|
11 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Chintakayala Ravi Full Movie 107 __TOP__.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>the film picks up the pace later in the third act and one would expect more drama from a comedy. anushka needs more opportunities as an actor. the possibility of a musical interlude in the film should be explored more thoroughly. chinnarasu is a character with no significance to the story. kona venkat's dialogues are the weakest link in the movie.</p>
|
3 |
-
<h2>Chintakayala Ravi Full Movie 107</h2><br /><p><b><b>Download</b> 🗸🗸🗸 <a href="https://imgfil.com/2uy26j">https://imgfil.com/2uy26j</a></b></p><br /><br />
|
4 |
-
<p>mamatha mohandas and venkatesh have displayed superb interactions. venkatesh's acting brings life into his character, as he is that much of a software engineer in the us. anushka's performance is another reason why c ravi is the greater-than-god-great character. sometimes you get to see how perfectly well chandramohan & venu madhav negotiate their scenes. </p>
|
5 |
-
<p>c ravi's grand-mom is played by laxmi. aunty used to say that even if chintakayala ravi & c ravi are married off, as long as they are happy, she is going to be happy. what a way to live in old age, to be content with being the happy ever-after. what is great about the movie is that each character has its own section. due to lack of budget, the movie is staged differently in each zone. part i is shot in mysore while part ii, iii, & iv is shot in new york. and all these characters interact with each other very well. laxmi's role is also an interesting one that actually talks about the relationship between women and their daughters. talking about women at that age, venkatesh's performance in mother-son interactions is brilliant. </p>
|
6 |
-
<p>what is so bad: what hurts is that c ravi and sunita's love has to be sacrificed (temporarily) for the greater good of the movie. director's cut and venkatesh's laptop captured scenes in the past (enough to tell the audience about his past) would have added so much more value to the film and have been better received. also the character of mamatha mohandas is so stereotyped that it is ridiculous. its okay to play an engineer, but not an engineer who drinks and fixes drinks. the film is filled with so many coincidences that the making of the film looks like a movie script story, that it's a webisode of a movie where you can stop when you think you are getting bored. so, if you are planning to watch the movie, skip this scene and go watch the one where the couple is about to say “i love you.” the missing scenes are so witty that once you have forgotten them, you won't understand what the buzz is all about.</p>
|
7 |
-
<p></p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cool Edit Pro 2.1 Crack Full Version [Portable] - Why You Need it for Your Music Projects.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Monsoon Full Movie 1080p Download</h2><br /><p><b><b>Download File</b> >>>>> <a href="https://imgfil.com/2uxZIW">https://imgfil.com/2uxZIW</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Echangiste MILF Sexe Anal Videos ((BETTER)).md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>I met this married milf on --- xmxx freeporno xxnn pornxxx nxxn ass4all ixxx xxnxxx xnxxcom xbxx xnvideos seqsi xcxx xnxz xnxc xxnnx xxnnxx xxnc xxnv xmxxx pornoitalia pornoita freepor pornx lupoporno nxnxx nxgx nx nxxxn xncx xxnl cxnxx sxxx</p>
|
3 |
-
<h2>Echangiste MILF Sexe Anal Videos</h2><br /><p><b><b>Download</b> <a href="https://imgfil.com/2uxYqC">https://imgfil.com/2uxYqC</a></b></p><br /><br /> aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/__init__.py
DELETED
File without changes
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Emergency 5 and Join the Team of First Responders.md
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Emergency 5 and Become a Rescue Commander</h1>
|
3 |
-
<p>Do you love strategy games that test your skills and decision-making abilities? Do you want to experience what it's like to be in charge of a team of first responders in various emergency situations? If so, then you should try Emergency 5, a realistic and challenging strategy simulation game that puts you in the role of a rescue commander. In this article, we will show you how to download Emergency 5 and how to play it effectively. We will also give you some tips on how to enhance your Emergency 5 experience with mods, friends, and challenges. By the end of this article, you will be ready to take on any emergency scenario and save lives.</p>
|
4 |
-
<h2>What is Emergency 5?</h2>
|
5 |
-
<p>Emergency 5 is the fifth installment of the Emergency series, a popular franchise of strategy simulation games that focus on managing emergency services in various scenarios. The game was released in November 2014 by Sixteen Tons Entertainment, a German developer that specializes in simulation games. Emergency 5 features three full campaigns with 20 missions each, set in Berlin, Hamburg, and Munich. The game also offers freeplay mode, where you can choose from over 20 maps with different settings and challenges. Additionally, the game has multiplayer mode, where you can cooperate or compete with other players online or locally.</p>
|
6 |
-
<h2>download emergency 5</h2><br /><p><b><b>DOWNLOAD</b> ✏ ✏ ✏ <a href="https://urlin.us/2uSYWc">https://urlin.us/2uSYWc</a></b></p><br /><br />
|
7 |
-
<p>The gameplay of Emergency 5 is similar to previous games in the series, but with improved graphics and mechanics. You have to coordinate your team of units, such as firefighters, police officers, paramedics, engineers, and more, in order to deal with various emergencies, such as fires, accidents, crimes, disasters, and terrorist attacks. You have to deploy your units strategically, control them individually or as a group, and use their abilities and equipment wisely. You also have to manage your resources, such as vehicles, water supply, ammunition, and medical supplies. You have to complete your objectives within a time limit or before the situation gets out of control. The game is realistic and challenging, requiring you to think fast and act smart.</p>
|
8 |
-
<h2>Why You Should Play Emergency 5?</h2>
|
9 |
-
<p>There are many reasons why you should play Emergency 5 if you are a fan of strategy games or emergency services. Here are some of them:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Emergency 5 is a realistic and immersive game that lets you experience what it's like to be a rescue commander in various scenarios. You have to deal with realistic situations that require quick thinking and problem-solving skills. You also have to face the consequences of your actions and decisions.</li>
|
12 |
-
<li>Emergency 5 is a challenging and rewarding game that tests your skills and abilities as a strategist and a leader. You have to manage your units effectively, use their strengths and weaknesses wisely, and adapt to changing circumstances. You also have to balance your resources and priorities <p>and goals. You also have to overcome the challenges and difficulties that the game throws at you.</li>
|
13 |
-
<li>Emergency 5 is a fun and entertaining game that offers a lot of variety and replay value. You can choose from different campaigns, missions, maps, modes, and difficulty levels. You can also customize your game with mods, play with friends, or create your own scenarios. You can enjoy the game in different ways and never get bored.</li>
|
14 |
-
</ul>
|
15 |
-
<p>If you are looking for a game that combines realism, challenge, and fun, then Emergency 5 is the game for you.</p>
|
16 |
-
<h2>How to Download Emergency 5?</h2>
|
17 |
-
<p>Now that you know what Emergency 5 is and why you should play it, you might be wondering how to download it. There are two main ways to download Emergency 5: from Steam or from other sources. We will explain both methods in detail below.</p>
|
18 |
-
<h3>Downloading from Steam</h3>
|
19 |
-
<p>Steam is a popular online platform that allows you to buy, download, and play games on your computer. Steam also offers other features, such as cloud saving, achievements, community, and more. If you want to download Emergency 5 from Steam, you need to follow these steps:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Create a Steam account if you don't have one already. You can do this by visiting <a href="">https://store.steampowered.com/join/</a> and following the instructions.</li>
|
22 |
-
<li>Download and install the Steam client on your computer. You can do this by visiting <a href="">https://store.steampowered.com/about/</a> and clicking on the "Install Steam" button.</li>
|
23 |
-
<li>Launch the Steam client and log in with your Steam account.</li>
|
24 |
-
<li>Search for Emergency 5 in the Steam store or visit <a href="">https://store.steampowered.com/app/328240/Emergency_5__Deluxe_Edition/</a>.</li>
|
25 |
-
<li>Add the game to your cart and proceed to checkout. You can pay with various methods, such as credit card, PayPal, or Steam wallet.</li>
|
26 |
-
<li>Once the payment is confirmed, the game will be added to your library. You can then download and install the game by clicking on the "Install" button.</li>
|
27 |
-
<li>After the installation is complete, you can launch the game by clicking on the "Play" button.</li>
|
28 |
-
</ol>
|
29 |
-
<p>Congratulations! You have successfully downloaded Emergency 5 from Steam. Enjoy the game!</p>
|
30 |
-
<h3>Downloading from Other Sources</h3>
|
31 |
-
<p>If you don't want to use Steam or you prefer other sources, you can also download Emergency 5 from other websites or platforms. However, you need to be careful and make sure that the source is reliable and trustworthy. Some of the possible sources are:</p>
|
32 |
-
<p>download emergency 5 deluxe edition<br />
|
33 |
-
download emergency 5 free full version<br />
|
34 |
-
download emergency 5 mod installer<br />
|
35 |
-
download emergency 5 los angeles mod<br />
|
36 |
-
download emergency 5 steam<br />
|
37 |
-
download emergency 5 crack<br />
|
38 |
-
download emergency 5 update<br />
|
39 |
-
download emergency 5 game for pc<br />
|
40 |
-
download emergency 5 mac<br />
|
41 |
-
download emergency 5 windows 10<br />
|
42 |
-
download emergency 5 demo<br />
|
43 |
-
download emergency 5 trainer<br />
|
44 |
-
download emergency 5 cheats<br />
|
45 |
-
download emergency 5 patch<br />
|
46 |
-
download emergency 5 editor<br />
|
47 |
-
download emergency 5 torrent<br />
|
48 |
-
download emergency 5 iso<br />
|
49 |
-
download emergency 5 keygen<br />
|
50 |
-
download emergency 5 serial number<br />
|
51 |
-
download emergency 5 activation code<br />
|
52 |
-
download emergency 5 gameplay<br />
|
53 |
-
download emergency 5 missions<br />
|
54 |
-
download emergency 5 scenarios<br />
|
55 |
-
download emergency 5 vehicles<br />
|
56 |
-
download emergency 5 maps<br />
|
57 |
-
download emergency 5 mods berlin<br />
|
58 |
-
download emergency 5 mods hamburg<br />
|
59 |
-
download emergency 5 mods munich<br />
|
60 |
-
download emergency 5 mods cologne<br />
|
61 |
-
download emergency 5 mods london<br />
|
62 |
-
download emergency 5 mods paris<br />
|
63 |
-
download emergency 5 mods new york<br />
|
64 |
-
download emergency 5 mods san francisco<br />
|
65 |
-
download emergency 5 mods tokyo<br />
|
66 |
-
download emergency 5 mods sydney<br />
|
67 |
-
download emergency 5 mods dubai<br />
|
68 |
-
download emergency 5 mods rio de janeiro<br />
|
69 |
-
download emergency 5 mods moscow<br />
|
70 |
-
download emergency 5 mods beijing<br />
|
71 |
-
download emergency 5 mods seoul<br />
|
72 |
-
download emergency 5 mods singapore<br />
|
73 |
-
download emergency 5 mods amsterdam<br />
|
74 |
-
download emergency 5 mods stockholm<br />
|
75 |
-
download emergency 5 mods copenhagen<br />
|
76 |
-
download emergency 5 mods oslo<br />
|
77 |
-
download emergency 5 mods helsinki<br />
|
78 |
-
download emergency 5 mods warsaw<br />
|
79 |
-
download emergency 5 mods prague<br />
|
80 |
-
download emergency 5 mods vienna</p>
|
81 |
-
<ul>
|
82 |
-
<li>The official website of Sixteen Tons Entertainment: <a href="">https://www.sixteen-tons.de/en/</a>. Here you can find information about the game and the developer, as well as links to buy or download the game from various platforms.</li>
|
83 |
-
<li>The official website of Deep Silver: <a href="">https://www.deepsilver.com/en/games/emergency-5/</a>. Here you can find information about the game and the publisher, as well as links to buy or download the game from various platforms.</li>
|
84 |
-
<li>The official website of Gamesplanet: <a href="">https://us.gamesplanet.com/game/emergency-5-deluxe-edition-steam-key--2639-1</a>. Here you can buy or download the game directly from Gamesplanet, a trusted online retailer of digital games.</li>
|
85 |
-
<li>The official website of GOG: <a href="">https://www.gog.com/game/emergency_5_deluxe_edition</a>. Here you can buy or download the game directly from GOG, a trusted online platform that offers DRM-free games.</li>
|
86 |
-
</ul>
|
87 |
-
<p>No matter which source you choose, you need to follow these steps:</p>
|
88 |
-
<ol>
|
89 |
-
<li>Visit the website of your chosen source and find Emergency 5.</li>
|
90 |
-
<li>Add the game to your cart and proceed to checkout. You can pay with various methods, depending on the source.</li>
|
91 |
-
<li>Once the payment is confirmed, you will receive a confirmation email with a link or a code to download or activate the game.</li>
|
92 |
-
<li>Follow the instructions in the email to download or activate the game on your computer.</li>
|
93 |
-
<li>After the download or activation is complete, you can launch the game by clicking on its icon or shortcut.</li>
|
94 |
-
</ol>
|
95 |
-
<p>Congratulations! You have successfully downloaded Emergency 5 from another source. Enjoy the game!</p>
|
96 |
-
<h2>How to Play Emergency 5?</h2>
|
97 |
-
<p>Now that you have downloaded Emergency 5, you might be wondering how to play it. Don't worry, we will guide you through the basics of playing the game and give you some tips and tricks to help you become a better rescue commander. Here are the main aspects of playing Emergency 5:</p>
|
98 |
-
<h3>Choosing a Mode</h3>
|
99 |
-
<p>The first thing you need to do when you launch the game is to choose a mode. There are three modes available in Emergency 5: campaign, freeplay, and multiplayer. Each mode has its own features and objectives. Here is a brief description of each mode:</p>
|
100 |
-
<ul>
|
101 |
-
<li>Campaign: This is the main mode of the game, where you have to complete 20 missions in each of the three cities: Berlin, Hamburg, and Munich. Each mission has a different scenario and a different goal. You have to follow the instructions and complete the objectives within a time limit or before the situation escalates. You can choose the difficulty level of each mission, from easy to hard. You can also unlock achievements and medals for completing the missions.</li>
|
102 |
-
<li>Freeplay: This is the mode where you can play without any time limit or specific goal. You can choose from over 20 maps with different settings and challenges. You can also customize the parameters of the game, such as the number and type of emergencies, the weather, the traffic, and more. You can play as long as you want and see how well you can handle the emergencies.</li>
|
103 |
-
<li>Multiplayer: This is the mode where you can play with other players online or locally. You can cooperate or compete with other players in various scenarios. You can also chat with other players and share your strategies and experiences. You can join or create a server and invite your friends or join a random server and meet new people.</li>
|
104 |
-
</ul>
|
105 |
-
<p>You can choose any mode you want and switch between them anytime. You can also save your progress and resume it later.</p>
|
106 |
-
<h3>Managing Your Units</h3>
|
107 |
-
<p>The next thing you need to do when you play Emergency 5 is to manage your units. Your units are your most valuable assets in the game, as they are the ones who will perform the tasks and deal with the emergencies. You have to deploy, control, and coordinate your units effectively in order to succeed in the game. Here are some tips on how to manage your units:</p>
|
108 |
-
<ul>
|
109 |
-
<li>Deploy your units strategically: You have to decide which units to send to which location, depending on the situation and the objective. You have to consider the type, number, and distance of the emergencies, as well as the abilities and equipment of your units. You also have to consider the traffic and the terrain, as they can affect the speed and mobility of your units.</li>
|
110 |
-
<li>Control your units individually or as a group: You have to control your units manually or automatically, depending on your preference and convenience. You can select one unit or multiple units at once and give them commands, such as move, stop, attack, defend, use ability, use equipment, etc. You can also assign hotkeys to your units for easier access.</li>
|
111 |
-
<li>Coordinate your units efficiently: You have to coordinate your units with each other and with other services, such as fire department, police department, ambulance service, etc. You have to communicate with them and share information and resources. You also have to avoid conflicts and collisions between your units and other services.</li>
|
112 |
-
</ul>
|
113 |
-
<p>You have to manage your units carefully and wisely, as they are limited in number and resources. You also have to protect them from harm and danger, as they can get injured or killed in the game.</p>
|
114 |
-
<h3>Completing Missions</h3>
|
115 |
-
<p>The last thing you need to do when you play Emergency 5 is to complete missions. Missions are the main objectives of the game, where you have to deal with various emergencies and achieve certain goals. Missions vary in difficulty, complexity, and length. Here are some tips on how to complete missions:</p>
|
116 |
-
<ul>
|
117 |
-
<li>Follow the instructions: You have to follow the instructions given by the narrator or by other characters in the game. They will tell you what to do, where to go, what to expect, etc. They will also give you hints and tips on how to deal with certain situations.</li>
|
118 |
-
<li>Complete the objectives: You have to complete the objectives given by the game. They will show up on your screen or on your map. They will tell you what you need to do in order to complete the mission or advance in the game. They can be simple or complex, such as extinguishing a fire, rescuing a person, arresting a criminal, etc.</li>
|
119 |
-
<li>Deal with the emergencies: You have to deal with the emergencies that occur in the game. They can be random or scripted, depending on the mission and the mode. They can be minor or major, such as a car crash, a robbery, a flood, a bomb threat, etc. You have to use your units and resources to handle the emergencies and prevent them from getting worse.</li>
|
120 |
-
</ul>
|
121 |
-
<p>You have to complete the missions within a time limit or before the situation gets out of control. You also have to avoid failing the mission by losing all your units, running out of resources, or causing too much damage or casualties.</p>
|
122 |
-
<h2>How to Enhance Your Emergency 5 Experience?</h2>
|
123 |
-
<p>If you want to improve your gameplay and enjoy the game more, there are some ways to enhance your Emergency 5 experience. Here are some of them:</p>
|
124 |
-
<h3>Playing with Mods</h3>
|
125 |
-
<p>Mods are modifications or additions to the game that change or improve some aspects of it. Mods can add new features, content, graphics, sounds, etc. to the game. Mods can also fix bugs, errors, or issues in the game. Mods are created by fans or developers of the game and are usually available for free. If you want to play with mods, you need to follow these steps:</p>
|
126 |
-
<ol>
|
127 |
-
<li>Find mods that you like and that are compatible with your version of the game. You can find mods on various websites or platforms, such as <a href="">https://www.emergency-forum.de/filebase/index.php?filebase/</a>, <a href="">https://www.nexusmods.com/emergency5</a>, or <a href="">https://steamcommunity.com/app/328240/workshop/</a>.</li>
|
128 |
-
<li>Download and install the mods on your computer. You can do this by following the instructions given by the mod creators or by using a mod manager, such as <a href="">https://www.nexusmods.com/about/vortex/</a>.</li>
|
129 |
-
<li>Launch the game and enable the mods in the settings or in the mod manager.</li>
|
130 |
-
<li>Enjoy the game with the mods.</li>
|
131 |
-
</ol>
|
132 |
-
<p>Playing with mods can make your Emergency 5 experience more fun and interesting. However, you need to be careful and make sure that the mods are safe and reliable. You also need to backup your game files before installing any mods, in case something goes wrong.</p>
|
133 |
-
<h3>Playing with Friends</h3>
|
134 |
-
<p>Playing with friends can make your Emergency 5 experience more social and cooperative. You can play with friends online or locally in multiplayer mode. You can cooperate or compete with your friends in various scenarios and see who is the better rescue commander. If you want to play with friends, you need to follow these steps:</p>
|
135 |
-
<ol>
|
136 |
-
<li>Launch the game and select multiplayer mode.</li>
|
137 |
-
<li>Choose whether you want to play online or locally.</li>
|
138 |
-
<li>If you want to play online, you need to have an internet connection and a Steam account. You can then join or create a server and invite your friends or join a random server and meet new people.</li>
|
139 |
-
<li>If you want to play locally, you need to have a LAN connection and multiple computers with the game installed. You can then join or create a server and invite your friends or join a random server and meet new people.</li>
|
140 |
-
<li>Select a map and a mode and start playing.</li>
|
141 |
-
<li>Enjoy the game with your friends.</li>
|
142 |
-
</ol>
|
143 |
-
<p>Playing with friends can make your Emergency 5 experience more fun and challenging. However, you need to be respectful and friendly with other players and follow the rules and etiquette of multiplayer gaming.</p>
|
144 |
-
<h3>Playing with Challenges</h3>
|
145 |
-
<p>Playing with challenges can make your Emergency 5 experience more difficult and complex. You can play with challenges by increasing the difficulty level of the game, by customizing the parameters of the game, or by creating your own scenarios. Here are some examples of challenges:</p>
|
146 |
-
<ul>
|
147 |
-
<li>Increase the difficulty level of the game by choosing hard mode or by adjusting the settings in the options menu. This will make the emergencies more frequent, severe, and unpredictable. It will also make your units more vulnerable, less effective, and more expensive.</li>
|
148 |
-
<li>Customize the parameters of the game by changing the values in the freeplay mode or by using a mod that allows you to do so. You can change the number and type of emergencies, the weather, the traffic, the population, and more. You can make the game more realistic, chaotic, or fun, depending on your preference.</li>
|
149 |
-
<li>Create your own scenarios by using the editor mode or by using a mod that allows you to do so. You can create your own maps, emergencies, objectives, and rules. You can also share your scenarios with other players or download scenarios created by other players.</li>
|
150 |
-
</ul>
|
151 |
-
<p>Playing with challenges can make your Emergency 5 experience more exciting and satisfying. However, you need to be prepared and skilled to face the challenges and overcome them.</p>
|
152 |
-
<h1>Conclusion</h1>
|
153 |
-
<p>Emergency 5 is a realistic and challenging strategy simulation game that puts you in the role of a rescue commander. You have to manage your units and deal with various emergencies in different scenarios. You can also enhance your Emergency 5 experience with mods, friends, and challenges. Emergency 5 is a game that will test your skills and decision-making abilities, as well as entertain and immerse you. If you are looking for a game that combines realism, challenge, and fun, then Emergency 5 is the game for you.</p>
|
154 |
-
<p>So what are you waiting for? Download Emergency 5 today and become a rescue commander!</p>
|
155 |
-
<h2>FAQs</h2>
|
156 |
-
<p>Here are some frequently asked questions about Emergency 5:</p>
|
157 |
-
<ul>
|
158 |
-
<li>Q: What are the system requirements for Emergency 5?<br>A: The minimum system requirements for Emergency 5 are: Windows Vista/7/8/10, Intel Quad Core 2.6 GHz or AMD Quad Core 3.2 GHz processor, 6 GB RAM, NVIDIA GeForce 550 Ti or AMD Radeon HD 6670 graphics card, DirectX 9.0c compatible sound card, 18 GB available hard disk space.</li>
|
159 |
-
<li>Q: How can I update Emergency 5?<br>A: You can update Emergency 5 by launching the game and following the instructions on the screen or by using Steam or other platforms that offer automatic updates.</li>
|
160 |
-
<li>Q: How can I get help or support for Emergency 5?<br>A: You can get help or support for Emergency 5 by visiting the official website of Sixteen Tons Entertainment or Deep Silver, by contacting them via email or phone, or by visiting the forums or communities of Emergency 5 on Steam or other platforms.</li>
|
161 |
-
<li>Q: How can I give feedback or suggestions for Emergency 5?<br>A: You can give feedback or suggestions for Emergency 5 by visiting the official website of Sixteen Tons Entertainment or Deep Silver, by contacting them via email or phone, or by visiting the forums or communities of Emergency 5 on Steam or other platforms.</li>
|
162 |
-
<li>Q: How can I learn more about Emergency 5?<br>A: You can learn more about Emergency 5 by visiting the official website of Sixteen Tons Entertainment or Deep Silver, by watching videos or trailers of the game on YouTube or other platforms, or by reading reviews or articles about the game on various websites or blogs.</li>
|
163 |
-
</ul></p> 197e85843d<br />
|
164 |
-
<br />
|
165 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/CarX Drift Racing 2 Hack APK 2022 Everything You Need to Know.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>CarX Drift Racing 2 Hack APK 2022: How to Download and Install It</h1>
|
3 |
-
<p>If you are a fan of racing games, especially drifting games, you might have heard of CarX Drift Racing 2. It is one of the most popular and realistic drifting games on Android devices. However, if you want to enjoy the game to the fullest, you might need some extra money and features to unlock all the cars and tracks. That's why you need CarX Drift Racing 2 Hack APK 2022, a modified version of the game that gives you unlimited money, gold, and other features. In this article, we will show you how to download and install CarX Drift Racing 2 Hack APK 2022 on your device.</p>
|
4 |
-
<h2>What is CarX Drift Racing 2?</h2>
|
5 |
-
<p>CarX Drift Racing 2 is a sequel to the popular CarX Drift Racing game that was released in 2014. It is a racing game that focuses on drifting, a driving technique where the driver intentionally oversteers the car to make it slide sideways. The game features realistic physics, stunning graphics, and a variety of cars and tracks to choose from. You can customize your car with different parts, paint, and stickers, and compete with other players online or offline. You can also join a club or create your own club to challenge other clubs and win prizes.</p>
|
6 |
-
<h2>carx drift racing 2 hack apk 2022</h2><br /><p><b><b>Download Zip</b> »»» <a href="https://jinyurl.com/2uNQeU">https://jinyurl.com/2uNQeU</a></b></p><br /><br />
|
7 |
-
<h3>Features of CarX Drift Racing 2</h3>
|
8 |
-
<p>Some of the features of CarX Drift Racing 2 are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>A new game mode called XDS, where you can practice tandem drifting with an AI driver.</li>
|
11 |
-
<li>A career mode with over 100 missions and events.</li>
|
12 |
-
<li>A tuning mode where you can adjust your car's suspension, engine, brakes, tires, and more.</li>
|
13 |
-
<li>A garage mode where you can change your car's appearance with over 1000 parts and accessories.</li>
|
14 |
-
<li>A ghost mode where you can race against your own best time or other players' ghosts.</li>
|
15 |
-
<li>A live stream mode where you can broadcast your gameplay to other players.</li>
|
16 |
-
<li>A photo mode where you can take screenshots of your car and share them with others.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>Why do you need CarX Drift Racing 2 Hack APK?</h3>
|
19 |
-
<p>CarX Drift Racing 2 is a free-to-play game, but it also has some in-app purchases that require real money. For example, you need money and gold to buy new cars, upgrade your car's performance, or unlock new tracks. However, earning money and gold in the game can be quite slow and tedious. You might have to play for hours or even days to get enough money and gold to buy what you want. That's why some players prefer to use CarX Drift Racing 2 Hack APK, a modified version of the game that gives them unlimited money and gold without spending any real money.</p>
|
20 |
-
<p>carx drift racing 2 mod apk unlimited money and gold<br />
|
21 |
-
carx drift racing 2 hack apk download for android<br />
|
22 |
-
carx drift racing 2 mod apk latest version 2022<br />
|
23 |
-
carx drift racing 2 hack apk no root<br />
|
24 |
-
carx drift racing 2 mod apk free shopping<br />
|
25 |
-
carx drift racing 2 hack apk online<br />
|
26 |
-
carx drift racing 2 mod apk obb<br />
|
27 |
-
carx drift racing 2 hack apk ios<br />
|
28 |
-
carx drift racing 2 mod apk revdl<br />
|
29 |
-
carx drift racing 2 hack apk without human verification<br />
|
30 |
-
carx drift racing 2 mod apk rexdl<br />
|
31 |
-
carx drift racing 2 hack apk unlimited coins<br />
|
32 |
-
carx drift racing 2 mod apk happymod<br />
|
33 |
-
carx drift racing 2 hack apk android 1<br />
|
34 |
-
carx drift racing 2 mod apk all cars unlocked<br />
|
35 |
-
carx drift racing 2 hack apk pure<br />
|
36 |
-
carx drift racing 2 mod apk data<br />
|
37 |
-
carx drift racing 2 hack apk offline<br />
|
38 |
-
carx drift racing 2 mod apk android republic<br />
|
39 |
-
carx drift racing 2 hack apk mediafıre<br />
|
40 |
-
carx drift racing 2 mod apk an1<br />
|
41 |
-
carx drift racing 2 hack apk indir<br />
|
42 |
-
carx drift racing 2 mod apk vip unlocked<br />
|
43 |
-
carx drift racing 2 hack apk uptodown<br />
|
44 |
-
carx drift racing 2 mod apk unlimited everything<br />
|
45 |
-
carx drift racing 2 hack apk mega<br />
|
46 |
-
carx drift racing 2 mod apk new update<br />
|
47 |
-
carx drift racing 2 hack apk original<br />
|
48 |
-
carx drift racing 2 mod apk platinmods<br />
|
49 |
-
carx drift racing 2 hack apk pc</p>
|
50 |
-
<h2>How to download and install CarX Drift Racing 2 Hack APK 2022?</h2>
|
51 |
-
<p>If you want to download and install CarX Drift Racing 2 Hack APK 2022 on your device, you need to follow these steps:</p>
|
52 |
-
<h3>Step 1: Enable unknown sources on your device</h <p>Step 1: Enable unknown sources on your device</p>
|
53 |
-
<p>Before you can install CarX Drift Racing 2 Hack APK 2022 on your device, you need to enable unknown sources on your device. This will allow you to install apps that are not from the official Google Play Store. To do this, you need to go to your device's settings, then security, then unknown sources, and toggle it on. You might see a warning message that says installing apps from unknown sources can harm your device, but don't worry, as long as you download the CarX Drift Racing 2 Hack APK file from a trusted source, you should be fine.</p>
|
54 |
-
<h3>Step 2: Download the CarX Drift Racing 2 Hack APK file from a trusted source</h3>
|
55 |
-
<p>Next, you need to download the CarX Drift Racing 2 Hack APK file from a trusted source. There are many websites that claim to offer the CarX Drift Racing 2 Hack APK file, but some of them might be fake or contain viruses or malware. Therefore, you need to be careful and only download the CarX Drift Racing 2 Hack APK file from a reputable website. One of the websites that we recommend is [CarXDriftRacing2HackAPK.com], where you can find the latest version of the CarX Drift Racing 2 Hack APK file for free. To download the CarX Drift Racing 2 Hack APK file from this website, you need to follow these steps:</p>
|
56 |
-
<ol>
|
57 |
-
<li>Go to [CarXDriftRacing2HackAPK.com] on your device's browser.</li>
|
58 |
-
<li>Scroll down and find the download button that says "Download CarX Drift Racing 2 Hack APK 2022".</li>
|
59 |
-
<li>Tap on the download button and wait for the download to start.</li>
|
60 |
-
<li>You might see a pop-up window that asks you to verify that you are not a robot. If so, just follow the instructions and complete the verification process.</li>
|
61 |
-
<li>Once the download is complete, you will see a notification that says "CarX Drift Racing 2 Hack APK 2022 downloaded".</li>
|
62 |
-
</ol>
|
63 |
-
<h3>Step 3: Locate and install the CarX Drift Racing 2 Hack APK file on your device</h3>
|
64 |
-
<p>After you have downloaded the CarX Drift Racing 2 Hack APK file on your device, you need to locate and install it on your device. To do this, you need to follow these steps:</p>
|
65 |
-
<ol>
|
66 |
-
<li>Go to your device's file manager and find the folder where you downloaded the CarX Drift Racing 2 Hack APK file. It might be in your downloads folder or in your external storage.</li>
|
67 |
-
<li>Tap on the CarX Drift Racing 2 Hack APK file and select "Install".</li>
|
68 |
-
<li>You might see a pop-up window that asks you to confirm the installation. If so, just tap on "Install" again.</li>
|
69 |
-
<li>Wait for the installation to finish. You might see a progress bar that shows how much time is left for the installation.</li>
|
70 |
-
<li>Once the installation is done, you will see a message that says "App installed".</li>
|
71 |
-
</ol>
|
72 |
-
<h3>Step 4: Launch the game and enjoy the unlimited money and features</h3>
|
73 |
-
<p>Congratulations! You have successfully installed CarX Drift Racing 2 Hack APK 2022 on your device. Now, you can launch the game and enjoy the unlimited money and features that it offers. To launch the game, you need to follow these steps:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Go to your device's app drawer and find the icon of CarX Drift Racing 2.</li>
|
76 |
-
<li>Tap on the icon and wait for the game to load.</li>
|
77 |
-
<li>You might see a pop-up window that asks you to allow some permissions for the game. If so, just tap on "Allow" or "OK".</li>
|
78 |
-
<li>You will see a splash screen that shows the logo of CarX Drift Racing 2 and some loading animations.</li>
|
79 |
-
<li>After a few seconds, you will see the main menu of the game, where you can choose your game mode, car, track, settings, and more.</li>
|
80 |
-
</ol>
|
81 |
-
<h2>What are the benefits of using CarX Drift Racing 2 Hack APK 2022?</h2>
|
82 |
-
<p>By using CarX Drift Racing 2 Hack APK 2022, you can enjoy many benefits that will make your gaming experience more fun and exciting. Some of these benefits are:</p>
|
83 |
-
<h3>Unlimited money and gold</h3>
|
84 |
-
<p>The most obvious benefit of using CarX Drift Racing 2 Hack APK 2022 is that you will have unlimited money and gold <p>The most obvious benefit of using CarX Drift Racing 2 Hack APK 2022 is that you will have unlimited money and gold in the game. Money and gold are the main currencies in the game that you can use to buy new cars, upgrade your car's performance, or unlock new tracks. With unlimited money and gold, you can buy any car you want, upgrade it to the max, or unlock all the tracks in the game. You don't have to worry about running out of money or gold, or grinding for hours or days to earn enough money or gold to buy what you want. You can enjoy the game without any limitations or restrictions.</p>
|
85 |
-
<h3>Unlocked cars and tracks</h3>
|
86 |
-
<p>Another benefit of using CarX Drift Racing 2 Hack APK 2022 is that you will have all the cars and tracks unlocked in the game. The game has over 80 cars and over 30 tracks to choose from, but not all of them are available at the beginning. You have to complete certain missions, events, or achievements to unlock some of the cars and tracks in the game. However, with CarX Drift Racing 2 Hack APK 2022, you don't have to do any of that. You can access all the cars and tracks in the game from the start. You can try out different cars and tracks and find your favorite ones.</p>
|
87 |
-
<h3>No ads and no root required</h3>
|
88 |
-
<p>A third benefit of using CarX Drift Racing 2 Hack APK 2022 is that you will not see any ads in the game and you will not need to root your device. The game has some ads that might pop up occasionally and interrupt your gameplay. These ads can be annoying and distracting, especially when you are in the middle of a race or a drift. However, with CarX Drift Racing 2 Hack APK 2022, you will not see any ads in the game. You can play the game without any interruptions or distractions. Moreover, you will not need to root your device to install CarX Drift Racing 2 Hack APK 2022. Rooting your device can be risky and complicated, as it might void your warranty, expose your device to malware, or cause some issues with your device's performance. However, with CarX Drift Racing 2 Hack APK 2022, you don't have to root your device at all. You can install it easily and safely on your device.</p>
|
89 |
-
<h3>Enhanced graphics and performance</h3>
|
90 |
-
<p>A fourth benefit of using CarX Drift Racing 2 Hack APK 2022 is that you will enjoy enhanced graphics and performance in the game. The game already has amazing graphics and physics that make it look realistic and immersive. However, with CarX Drift Racing 2 Hack APK 2022, you will experience even better graphics and performance in the game. You will see more details, colors, shadows, and effects in the game. You will also experience smoother gameplay, faster loading times, and less lagging or crashing in the game. You will have a more enjoyable and satisfying gaming experience with CarX Drift Racing 2 Hack APK 2022.</p>
|
91 |
-
<h2>Conclusion</h2>
|
92 |
-
<p>CarX Drift Racing 2 is a fantastic racing game that lets you experience the thrill and excitement of drifting. However, if you want to have more fun and freedom in the game, you might want to try CarX Drift Racing 2 Hack APK 2022, a modified version of the game that gives you unlimited money, gold, and other features. With CarX Drift Racing 2 Hack APK 2022, you can buy any car you want, upgrade it to the max, unlock all the tracks in the game, play without any ads or root requirement, and enjoy enhanced graphics and performance in the game. To download and install CarX Drift Racing 2 Hack APK 2022 on your device, just follow the steps that we have explained above. We hope that this article has helped you learn more about CarX Drift Racing 2 Hack APK 2022 and how to download and install it on your device.</p>
|
93 |
-
<h3>FAQs</h3>
|
94 |
-
<p>Here are some frequently asked questions about CarX Drift Racing 2 Hack APK 2022:</p>
|
95 |
-
<ul>
|
96 |
-
<li>Q: Is CarX Drift Racing 2 Hack APK 2022 safe to use?</li>
|
97 |
-
<li>A: Yes, CarX Drift Racing 2 Hack APK 2022 is safe to use as long as you download it from a trusted source like [CarXDriftRacing2HackAPK.com]. However, we still recommend that you scan the file with an antivirus software before installing it on your device.</li>
|
98 |
-
<li>Q: Is CarX Drift Racing 2 Hack APK 2022 compatible with my device?</li>
|
99 |
-
<li>A: CarX Drift Racing <li>A: CarX Drift Racing 2 Hack APK 2022 is compatible with most Android devices that have Android 4.1 or higher. However, some devices might have some issues with the game, such as crashing, freezing, or lagging. If you encounter any problems with the game, you can try to clear the cache, restart your device, or reinstall the game.</li>
|
100 |
-
<li>Q: Will I get banned for using CarX Drift Racing 2 Hack APK 2022?</li>
|
101 |
-
<li>A: There is a low chance that you will get banned for using CarX Drift Racing 2 Hack APK 2022, as the game does not have a strict anti-cheat system. However, we still advise that you use the hack APK at your own risk and discretion. You can also avoid using the hack APK in online mode or in club battles, as that might raise suspicion from other players or the game developers.</li>
|
102 |
-
<li>Q: How can I update CarX Drift Racing 2 Hack APK 2022?</li>
|
103 |
-
<li>A: To update CarX Drift Racing 2 Hack APK 2022, you need to download the latest version of the hack APK file from [CarXDriftRacing2HackAPK.com] and install it on your device. You don't need to uninstall the previous version of the hack APK, as the new version will overwrite it automatically.</li>
|
104 |
-
<li>Q: Can I use CarX Drift Racing 2 Hack APK 2022 with other mods or cheats?</li>
|
105 |
-
<li>A: We do not recommend that you use CarX Drift Racing 2 Hack APK 2022 with other mods or cheats, as that might cause some conflicts or errors in the game. You should only use CarX Drift Racing 2 Hack APK 2022 by itself, as it already provides you with enough features and benefits.</li>
|
106 |
-
</ul></p> 197e85843d<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Jigsaw APK and Enjoy Hundreds of Fun Puzzles.md
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Jigsaw APK Download: How to Enjoy Jigsaw Puzzles on Your Android Device</h1>
|
3 |
-
<p>If you love jigsaw puzzles, you might be wondering how you can enjoy them on your Android device. After all, jigsaw puzzles are a great way to relax, have fun, and challenge your brain. Fortunately, there is an app that lets you do just that. It's called Jigsaw APK, and it's one of the best jigsaw puzzle apps available for Android users. In this article, we'll tell you what Jigsaw APK is, how to download and install it on your device, why you should try jigsaw puzzles, and how to solve them like an expert. Let's get started!</p>
|
4 |
-
<h2>jigsaw apk download</h2><br /><p><b><b>Download</b> ☆☆☆☆☆ <a href="https://jinyurl.com/2uNN6m">https://jinyurl.com/2uNN6m</a></b></p><br /><br />
|
5 |
-
<h2>What is Jigsaw APK?</h2>
|
6 |
-
<h3>A brief introduction to the app and its features</h3>
|
7 |
-
<p>Jigsaw APK is an app that allows you to play thousands of high-quality jigsaw puzzles on your Android device. You can choose from a variety of categories, such as animals, nature, art, food, and more. You can also adjust the difficulty level by changing the number of pieces, from 9 to 400. You can even create your own custom puzzles from your photos or images. Jigsaw APK has many features that make it a great app for puzzle lovers, such as:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Easy and intuitive controls. You can drag and drop pieces, zoom in and out, rotate pieces, use hints, and preview the image.</li>
|
10 |
-
<li>Auto-save function. You can pause and resume your puzzle anytime without losing your progress.</li>
|
11 |
-
<li>Daily puzzles. You can get a new free puzzle every day and collect rewards.</li>
|
12 |
-
<li>Online mode. You can play online with other players, chat with them, and compete for the best score.</li>
|
13 |
-
<li>Offline mode. You can play offline without an internet connection.</li>
|
14 |
-
</ul>
|
15 |
-
<h3>How to download and install Jigsaw APK on your Android device</h3>
|
16 |
-
<p>Downloading and installing Jigsaw APK on your Android device is very easy. Just follow these steps:</p>
|
17 |
-
<ol>
|
18 |
-
<li>Go to <a href="(^1^)">this link</a> on your device's browser.</li>
|
19 |
-
<li>Tap on the download button and wait for the file to be downloaded.</li>
|
20 |
-
<li>Once the file is downloaded, tap on it to open it.</li>
|
21 |
-
<li>If you see a warning message that says "Install blocked", go to your device's settings and enable "Unknown sources". This will allow you to install apps from sources other than the Google Play Store.</li>
|
22 |
-
<li>Tap on "Install" and wait for the installation to finish.</li>
|
23 |
-
<li>Tap on "Open" to launch the app and start playing!</li>
|
24 |
-
</ol>
|
25 |
-
<h2>Why You Should Try Jigsaw Puzzles</h2>
|
26 |
-
<h3>The health benefits of doing jigsaw puzzles</h3>
|
27 |
-
<p>Jigsaw puzzles are not only fun but also good for your health. Doing jigsaw puzzles can have several benefits for people of all ages. Studies have shown that it can improve cognition, visual-spatial reasoning, concentration, short-term memory, problem solving, creativity, and productivity. According to a health professional's. <p>Jigsaw puzzles are not only fun but also good for your health. Doing jigsaw puzzles can have several benefits for people of all ages. Studies have shown that it can improve cognition, visual-spatial reasoning, concentration, short-term memory, problem solving, creativity, and productivity. According to a health professional's experience, it can also speed up problem-solving skills, relieve stress and anxiety, improve decision-making skills, enhance soft skills, lengthen attention span, relieve eye strains, and reconnect one's inner self. Puzzle solving can also improve brain, visual, and spatial skills, and stimulate plasticity. Jigsaw puzzles are often linked to higher degrees of intelligence due to the mental skills they involve and reinforce.</p>
|
28 |
-
<h3>The social benefits of doing jigsaw puzzles</h3>
|
29 |
-
<p>Jigsaw puzzles are also a great way to socialize and bond with others. Whether you do them with your family, friends, or online community, jigsaw puzzles can foster a sense of cooperation, communication, and teamwork. You can share tips and tricks, help each other find the right pieces, chat about the puzzle image or other topics, and celebrate your achievements together. Jigsaw puzzles can also help you get away from screens and create a personal connection. Working on a puzzle with someone can also improve your empathy, trust, and understanding of each other. Jigsaw puzzles can also be a source of entertainment and joy for yourself or as a gift for someone else.</p>
|
30 |
-
<h2>How to Solve Jigsaw Puzzles Like an Expert</h2>
|
31 |
-
<h3>Tips and tricks for solving jigsaw puzzles faster and easier</h3>
|
32 |
-
<p>If you want to improve your puzzle-solving skills and speed, here are some tips and tricks that experts use:</p>
|
33 |
-
<p>jigsaw puzzles apk download free<br />
|
34 |
-
jigsaw puzzle games apk download<br />
|
35 |
-
jigsaw puzzles epic apk download<br />
|
36 |
-
jigsaw puzzles hd apk download<br />
|
37 |
-
jigsaw puzzles pro apk download<br />
|
38 |
-
jigsaw puzzles world apk download<br />
|
39 |
-
jigsaw puzzles offline apk download<br />
|
40 |
-
jigsaw puzzles for adults apk download<br />
|
41 |
-
jigsaw puzzles for kids apk download<br />
|
42 |
-
jigsaw puzzles online apk download<br />
|
43 |
-
jigsaw puzzles 1000 pieces apk download<br />
|
44 |
-
jigsaw puzzles 5000 pieces apk download<br />
|
45 |
-
jigsaw puzzles 10000 pieces apk download<br />
|
46 |
-
jigsaw puzzles animals apk download<br />
|
47 |
-
jigsaw puzzles art apk download<br />
|
48 |
-
jigsaw puzzles anime apk download<br />
|
49 |
-
jigsaw puzzles cars apk download<br />
|
50 |
-
jigsaw puzzles cats apk download<br />
|
51 |
-
jigsaw puzzles christmas apk download<br />
|
52 |
-
jigsaw puzzles disney apk download<br />
|
53 |
-
jigsaw puzzles dogs apk download<br />
|
54 |
-
jigsaw puzzles flowers apk download<br />
|
55 |
-
jigsaw puzzles food apk download<br />
|
56 |
-
jigsaw puzzles fantasy apk download<br />
|
57 |
-
jigsaw puzzles halloween apk download<br />
|
58 |
-
jigsaw puzzles horror apk download<br />
|
59 |
-
jigsaw puzzles landscapes apk download<br />
|
60 |
-
jigsaw puzzles nature apk download<br />
|
61 |
-
jigsaw puzzles ocean apk download<br />
|
62 |
-
jigsaw puzzles space apk download<br />
|
63 |
-
magic jigsaw puzzles apk download<br />
|
64 |
-
real jigsaw puzzles apk download<br />
|
65 |
-
best jigsaw puzzles apk download<br />
|
66 |
-
new jigsaw puzzles apk download<br />
|
67 |
-
latest jigsaw puzzles apk download<br />
|
68 |
-
classic jigsaw puzzles apk download<br />
|
69 |
-
3d jigsaw puzzles apk download<br />
|
70 |
-
4d jigsaw puzzles apk download<br />
|
71 |
-
5d jigsaw puzzles apk download<br />
|
72 |
-
6d jigsaw puzzles apk download<br />
|
73 |
-
7d jigsaw puzzles apk download<br />
|
74 |
-
8d jigsaw puzzles apk download<br />
|
75 |
-
9d jigsaw puzzles apk download<br />
|
76 |
-
10d jigsaw puzzles apk download<br />
|
77 |
-
easybrain jigsaw puzzle games - apps on google play[^1^]</p>
|
78 |
-
<ul>
|
79 |
-
<li>Turn all the pieces picture-side-up. This will save you time and make your puzzle-solving more efficient.</li>
|
80 |
-
<li>Sort pieces into groups by color, shape, pattern, or letter. This will help you find the pieces you need faster and organize your workspace better .</li>
|
81 |
-
<li>Assemble the border first. This will define the space you'll be working in and give you a reference point for the rest of the puzzle .</li>
|
82 |
-
<li>Assemble by sorting groups, colors, and patterns. Start with the easy stuff, such as high-contrast areas or distinctive features. Then work on the more challenging parts, such as large areas of similar colors or patterns .</li>
|
83 |
-
<li>Pay attention to piece shapes. Jigsaw puzzle pieces come in six basic shapes, ranging from zero "knobs" and four "holes" to four knobs and zero holes—and all permutations in between. The more experienced you are, the more easily you'll be able to tell at a glance if an individual piece has the slightest chance of fitting where you want it to go.</li>
|
84 |
-
<li>Spread it out. Make sure you have enough space to spread out your pieces and see them clearly. You can also use trays or mats to sort and store your pieces .</li>
|
85 |
-
<li>Take a break. If you get stuck or frustrated, don't give up. Take a break and come back later with fresh eyes. You might see something you missed before or have a new idea .</li>
|
86 |
-
<li>Have fun. Remember that jigsaw puzzles are supposed to be enjoyable and relaxing. Don't stress too much about the time or the difficulty. Just enjoy the process and the outcome.</li>
|
87 |
-
</ul>
|
88 |
-
<h3>Strategies for solving different types of jigsaw puzzles</h3>
|
89 |
-
<p>Depending on the type of jigsaw puzzle you're doing, you might need to adjust your strategy accordingly. Here are some examples of different types of jigsaw puzzles and how to approach them:</p>
|
90 |
-
<ul>
|
91 |
-
<li>Shaped puzzles: These are puzzles that have pieces shaped like animals or other objects instead of regular rectangles or squares. They can be more challenging because they don't have a straight border or uniform pieces. To solve them, you might want to start with the most recognizable shapes first and then work your way around them.</li>
|
92 |
-
<li>Round puzzles: These are puzzles that have a circular shape instead of a rectangular one. They can be tricky because they don't have corners or edges to guide you. To solve them, you might want to start with the center piece first and then work your way outward in concentric circles.</li>
|
93 |
-
<li>Panorama puzzles: These are puzzles that have a wide horizontal image instead of a square or vertical one. They can be difficult because they have more pieces than usual and require more space to assemble. To solve them, you might want to sort the pieces by <p>If you're using Jigsaw APK, you can make your own jigsaw puzzles from your photos or images. Just tap on the "Create" button on the main screen and select an image from your gallery or camera. You can then adjust the number of pieces, the shape of the pieces, and the rotation of the pieces. You can also add a title and a description to your puzzle. Once you're done, you can save your puzzle and share it with others or play it yourself.</p>
|
94 |
-
<h3>How can I display my finished jigsaw puzzles?</h3>
|
95 |
-
<p>If you're proud of your finished jigsaw puzzles and want to display them, you have a few options. You can take a screenshot of your puzzle and set it as your wallpaper or share it on social media. You can also print your puzzle and frame it or glue it to a board. You can also use a puzzle saver or a puzzle glue to preserve your puzzle and hang it on a wall. Alternatively, you can use Jigsaw APK's online mode to showcase your puzzles to other players and see their puzzles as well.</p>
|
96 |
-
<h3>How can I clean and store my jigsaw puzzles?</h3>
|
97 |
-
<p>If you want to keep your jigsaw puzzles in good condition, you should clean and store them properly. To clean your puzzles, you can use a soft cloth or a brush to wipe off any dust or dirt. You can also use a damp cloth or a mild soap to remove any stains or marks. To store your puzzles, you can use a puzzle mat, a puzzle roll, a puzzle box, or a ziplock bag. You should keep your puzzles in a cool, dry, and dark place away from direct sunlight, heat, or moisture.</p> 401be4b1e0<br />
|
98 |
-
<br />
|
99 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/generate_facerender_batch.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import numpy as np
|
3 |
-
from PIL import Image
|
4 |
-
from skimage import io, img_as_float32, transform
|
5 |
-
import torch
|
6 |
-
import scipy.io as scio
|
7 |
-
|
8 |
-
def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path,
|
9 |
-
batch_size, camera_yaw_list=[0], camera_pitch_list=[0], camera_roll_list=[0],
|
10 |
-
expression_scale=1.0, still_mode = False):
|
11 |
-
|
12 |
-
semantic_radius = 13
|
13 |
-
video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]
|
14 |
-
txt_path = os.path.splitext(coeff_path)[0]
|
15 |
-
|
16 |
-
data={}
|
17 |
-
|
18 |
-
img1 = Image.open(pic_path)
|
19 |
-
source_image = np.array(img1)
|
20 |
-
source_image = img_as_float32(source_image)
|
21 |
-
source_image = transform.resize(source_image, (256, 256, 3))
|
22 |
-
source_image = source_image.transpose((2, 0, 1))
|
23 |
-
source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)
|
24 |
-
source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)
|
25 |
-
data['source_image'] = source_image_ts
|
26 |
-
|
27 |
-
source_semantics_dict = scio.loadmat(first_coeff_path)
|
28 |
-
source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70
|
29 |
-
source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)
|
30 |
-
source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)
|
31 |
-
source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)
|
32 |
-
data['source_semantics'] = source_semantics_ts
|
33 |
-
|
34 |
-
# target
|
35 |
-
generated_dict = scio.loadmat(coeff_path)
|
36 |
-
generated_3dmm = generated_dict['coeff_3dmm']
|
37 |
-
generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale
|
38 |
-
|
39 |
-
if still_mode:
|
40 |
-
generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)
|
41 |
-
|
42 |
-
with open(txt_path+'.txt', 'w') as f:
|
43 |
-
for coeff in generated_3dmm:
|
44 |
-
for i in coeff:
|
45 |
-
f.write(str(i)[:7] + ' '+'\t')
|
46 |
-
f.write('\n')
|
47 |
-
|
48 |
-
target_semantics_list = []
|
49 |
-
frame_num = generated_3dmm.shape[0]
|
50 |
-
data['frame_num'] = frame_num
|
51 |
-
for frame_idx in range(frame_num):
|
52 |
-
target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)
|
53 |
-
target_semantics_list.append(target_semantics)
|
54 |
-
|
55 |
-
remainder = frame_num%batch_size
|
56 |
-
if remainder!=0:
|
57 |
-
for _ in range(batch_size-remainder):
|
58 |
-
target_semantics_list.append(target_semantics)
|
59 |
-
|
60 |
-
target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1
|
61 |
-
target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])
|
62 |
-
data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)
|
63 |
-
data['video_name'] = video_name
|
64 |
-
data['audio_path'] = audio_path
|
65 |
-
|
66 |
-
yaw_c_seq = gen_camera_pose(camera_yaw_list, frame_num, batch_size)
|
67 |
-
pitch_c_seq = gen_camera_pose(camera_pitch_list, frame_num, batch_size)
|
68 |
-
roll_c_seq = gen_camera_pose(camera_roll_list, frame_num, batch_size)
|
69 |
-
|
70 |
-
data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)
|
71 |
-
data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)
|
72 |
-
data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)
|
73 |
-
return data
|
74 |
-
|
75 |
-
def transform_semantic_1(semantic, semantic_radius):
|
76 |
-
semantic_list = [semantic for i in range(0, semantic_radius*2+1)]
|
77 |
-
coeff_3dmm = np.concatenate(semantic_list, 0)
|
78 |
-
return coeff_3dmm.transpose(1,0)
|
79 |
-
|
80 |
-
def transform_semantic_target(coeff_3dmm, frame_index, semantic_radius):
|
81 |
-
num_frames = coeff_3dmm.shape[0]
|
82 |
-
seq = list(range(frame_index- semantic_radius, frame_index+ semantic_radius+1))
|
83 |
-
index = [ min(max(item, 0), num_frames-1) for item in seq ]
|
84 |
-
coeff_3dmm_g = coeff_3dmm[index, :]
|
85 |
-
return coeff_3dmm_g.transpose(1,0)
|
86 |
-
|
87 |
-
def gen_camera_pose(camera_degree_list, frame_num, batch_size):
|
88 |
-
|
89 |
-
new_degree_list = []
|
90 |
-
if len(camera_degree_list) == 1:
|
91 |
-
for _ in range(frame_num):
|
92 |
-
new_degree_list.append(camera_degree_list[0])
|
93 |
-
remainder = frame_num%batch_size
|
94 |
-
if remainder!=0:
|
95 |
-
for _ in range(batch_size-remainder):
|
96 |
-
new_degree_list.append(new_degree_list[-1])
|
97 |
-
new_degree_np = np.array(new_degree_list).reshape(batch_size, -1)
|
98 |
-
return new_degree_np
|
99 |
-
|
100 |
-
degree_sum = 0.
|
101 |
-
for i, degree in enumerate(camera_degree_list[1:]):
|
102 |
-
degree_sum += abs(degree-camera_degree_list[i])
|
103 |
-
|
104 |
-
degree_per_frame = degree_sum/(frame_num-1)
|
105 |
-
for i, degree in enumerate(camera_degree_list[1:]):
|
106 |
-
degree_last = camera_degree_list[i]
|
107 |
-
degree_step = degree_per_frame * abs(degree-degree_last)/(degree-degree_last)
|
108 |
-
new_degree_list = new_degree_list + list(np.arange(degree_last, degree, degree_step))
|
109 |
-
if len(new_degree_list) > frame_num:
|
110 |
-
new_degree_list = new_degree_list[:frame_num]
|
111 |
-
elif len(new_degree_list) < frame_num:
|
112 |
-
for _ in range(frame_num-len(new_degree_list)):
|
113 |
-
new_degree_list.append(new_degree_list[-1])
|
114 |
-
print(len(new_degree_list))
|
115 |
-
print(frame_num)
|
116 |
-
|
117 |
-
remainder = frame_num%batch_size
|
118 |
-
if remainder!=0:
|
119 |
-
for _ in range(batch_size-remainder):
|
120 |
-
new_degree_list.append(new_degree_list[-1])
|
121 |
-
new_degree_np = np.array(new_degree_list).reshape(batch_size, -1)
|
122 |
-
return new_degree_np
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/demucs/repitch.py
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import io
|
8 |
-
import random
|
9 |
-
import subprocess as sp
|
10 |
-
import tempfile
|
11 |
-
|
12 |
-
import numpy as np
|
13 |
-
import torch
|
14 |
-
from scipy.io import wavfile
|
15 |
-
|
16 |
-
|
17 |
-
def i16_pcm(wav):
|
18 |
-
if wav.dtype == np.int16:
|
19 |
-
return wav
|
20 |
-
return (wav * 2**15).clamp_(-2**15, 2**15 - 1).short()
|
21 |
-
|
22 |
-
|
23 |
-
def f32_pcm(wav):
|
24 |
-
if wav.dtype == np.float:
|
25 |
-
return wav
|
26 |
-
return wav.float() / 2**15
|
27 |
-
|
28 |
-
|
29 |
-
class RepitchedWrapper:
|
30 |
-
"""
|
31 |
-
Wrap a dataset to apply online change of pitch / tempo.
|
32 |
-
"""
|
33 |
-
def __init__(self, dataset, proba=0.2, max_pitch=2, max_tempo=12, tempo_std=5, vocals=[3]):
|
34 |
-
self.dataset = dataset
|
35 |
-
self.proba = proba
|
36 |
-
self.max_pitch = max_pitch
|
37 |
-
self.max_tempo = max_tempo
|
38 |
-
self.tempo_std = tempo_std
|
39 |
-
self.vocals = vocals
|
40 |
-
|
41 |
-
def __len__(self):
|
42 |
-
return len(self.dataset)
|
43 |
-
|
44 |
-
def __getitem__(self, index):
|
45 |
-
streams = self.dataset[index]
|
46 |
-
in_length = streams.shape[-1]
|
47 |
-
out_length = int((1 - 0.01 * self.max_tempo) * in_length)
|
48 |
-
|
49 |
-
if random.random() < self.proba:
|
50 |
-
delta_pitch = random.randint(-self.max_pitch, self.max_pitch)
|
51 |
-
delta_tempo = random.gauss(0, self.tempo_std)
|
52 |
-
delta_tempo = min(max(-self.max_tempo, delta_tempo), self.max_tempo)
|
53 |
-
outs = []
|
54 |
-
for idx, stream in enumerate(streams):
|
55 |
-
stream = repitch(
|
56 |
-
stream,
|
57 |
-
delta_pitch,
|
58 |
-
delta_tempo,
|
59 |
-
voice=idx in self.vocals)
|
60 |
-
outs.append(stream[:, :out_length])
|
61 |
-
streams = torch.stack(outs)
|
62 |
-
else:
|
63 |
-
streams = streams[..., :out_length]
|
64 |
-
return streams
|
65 |
-
|
66 |
-
|
67 |
-
def repitch(wav, pitch, tempo, voice=False, quick=False, samplerate=44100):
|
68 |
-
"""
|
69 |
-
tempo is a relative delta in percentage, so tempo=10 means tempo at 110%!
|
70 |
-
pitch is in semi tones.
|
71 |
-
Requires `soundstretch` to be installed, see
|
72 |
-
https://www.surina.net/soundtouch/soundstretch.html
|
73 |
-
"""
|
74 |
-
outfile = tempfile.NamedTemporaryFile(suffix=".wav")
|
75 |
-
in_ = io.BytesIO()
|
76 |
-
wavfile.write(in_, samplerate, i16_pcm(wav).t().numpy())
|
77 |
-
command = [
|
78 |
-
"soundstretch",
|
79 |
-
"stdin",
|
80 |
-
outfile.name,
|
81 |
-
f"-pitch={pitch}",
|
82 |
-
f"-tempo={tempo:.6f}",
|
83 |
-
]
|
84 |
-
if quick:
|
85 |
-
command += ["-quick"]
|
86 |
-
if voice:
|
87 |
-
command += ["-speech"]
|
88 |
-
try:
|
89 |
-
sp.run(command, capture_output=True, input=in_.getvalue(), check=True)
|
90 |
-
except sp.CalledProcessError as error:
|
91 |
-
raise RuntimeError(f"Could not change bpm because {error.stderr.decode('utf-8')}")
|
92 |
-
sr, wav = wavfile.read(outfile.name)
|
93 |
-
wav = wav.copy()
|
94 |
-
wav = f32_pcm(torch.from_numpy(wav).t())
|
95 |
-
assert sr == samplerate
|
96 |
-
return wav
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/get-pip.py
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/801artistry/RVC801/infer/lib/train/data_utils.py
DELETED
@@ -1,517 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import traceback
|
3 |
-
import logging
|
4 |
-
|
5 |
-
logger = logging.getLogger(__name__)
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import torch.utils.data
|
10 |
-
|
11 |
-
from infer.lib.train.mel_processing import spectrogram_torch
|
12 |
-
from infer.lib.train.utils import load_filepaths_and_text, load_wav_to_torch
|
13 |
-
|
14 |
-
|
15 |
-
class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset):
|
16 |
-
"""
|
17 |
-
1) loads audio, text pairs
|
18 |
-
2) normalizes text and converts them to sequences of integers
|
19 |
-
3) computes spectrograms from audio files.
|
20 |
-
"""
|
21 |
-
|
22 |
-
def __init__(self, audiopaths_and_text, hparams):
|
23 |
-
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
|
24 |
-
self.max_wav_value = hparams.max_wav_value
|
25 |
-
self.sampling_rate = hparams.sampling_rate
|
26 |
-
self.filter_length = hparams.filter_length
|
27 |
-
self.hop_length = hparams.hop_length
|
28 |
-
self.win_length = hparams.win_length
|
29 |
-
self.sampling_rate = hparams.sampling_rate
|
30 |
-
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
31 |
-
self.max_text_len = getattr(hparams, "max_text_len", 5000)
|
32 |
-
self._filter()
|
33 |
-
|
34 |
-
def _filter(self):
|
35 |
-
"""
|
36 |
-
Filter text & store spec lengths
|
37 |
-
"""
|
38 |
-
# Store spectrogram lengths for Bucketing
|
39 |
-
# wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
|
40 |
-
# spec_length = wav_length // hop_length
|
41 |
-
audiopaths_and_text_new = []
|
42 |
-
lengths = []
|
43 |
-
for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text:
|
44 |
-
if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
|
45 |
-
audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv])
|
46 |
-
lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
|
47 |
-
self.audiopaths_and_text = audiopaths_and_text_new
|
48 |
-
self.lengths = lengths
|
49 |
-
|
50 |
-
def get_sid(self, sid):
|
51 |
-
sid = torch.LongTensor([int(sid)])
|
52 |
-
return sid
|
53 |
-
|
54 |
-
def get_audio_text_pair(self, audiopath_and_text):
|
55 |
-
# separate filename and text
|
56 |
-
file = audiopath_and_text[0]
|
57 |
-
phone = audiopath_and_text[1]
|
58 |
-
pitch = audiopath_and_text[2]
|
59 |
-
pitchf = audiopath_and_text[3]
|
60 |
-
dv = audiopath_and_text[4]
|
61 |
-
|
62 |
-
phone, pitch, pitchf = self.get_labels(phone, pitch, pitchf)
|
63 |
-
spec, wav = self.get_audio(file)
|
64 |
-
dv = self.get_sid(dv)
|
65 |
-
|
66 |
-
len_phone = phone.size()[0]
|
67 |
-
len_spec = spec.size()[-1]
|
68 |
-
# print(123,phone.shape,pitch.shape,spec.shape)
|
69 |
-
if len_phone != len_spec:
|
70 |
-
len_min = min(len_phone, len_spec)
|
71 |
-
# amor
|
72 |
-
len_wav = len_min * self.hop_length
|
73 |
-
|
74 |
-
spec = spec[:, :len_min]
|
75 |
-
wav = wav[:, :len_wav]
|
76 |
-
|
77 |
-
phone = phone[:len_min, :]
|
78 |
-
pitch = pitch[:len_min]
|
79 |
-
pitchf = pitchf[:len_min]
|
80 |
-
|
81 |
-
return (spec, wav, phone, pitch, pitchf, dv)
|
82 |
-
|
83 |
-
def get_labels(self, phone, pitch, pitchf):
|
84 |
-
phone = np.load(phone)
|
85 |
-
phone = np.repeat(phone, 2, axis=0)
|
86 |
-
pitch = np.load(pitch)
|
87 |
-
pitchf = np.load(pitchf)
|
88 |
-
n_num = min(phone.shape[0], 900) # DistributedBucketSampler
|
89 |
-
# print(234,phone.shape,pitch.shape)
|
90 |
-
phone = phone[:n_num, :]
|
91 |
-
pitch = pitch[:n_num]
|
92 |
-
pitchf = pitchf[:n_num]
|
93 |
-
phone = torch.FloatTensor(phone)
|
94 |
-
pitch = torch.LongTensor(pitch)
|
95 |
-
pitchf = torch.FloatTensor(pitchf)
|
96 |
-
return phone, pitch, pitchf
|
97 |
-
|
98 |
-
def get_audio(self, filename):
|
99 |
-
audio, sampling_rate = load_wav_to_torch(filename)
|
100 |
-
if sampling_rate != self.sampling_rate:
|
101 |
-
raise ValueError(
|
102 |
-
"{} SR doesn't match target {} SR".format(
|
103 |
-
sampling_rate, self.sampling_rate
|
104 |
-
)
|
105 |
-
)
|
106 |
-
audio_norm = audio
|
107 |
-
# audio_norm = audio / self.max_wav_value
|
108 |
-
# audio_norm = audio / np.abs(audio).max()
|
109 |
-
|
110 |
-
audio_norm = audio_norm.unsqueeze(0)
|
111 |
-
spec_filename = filename.replace(".wav", ".spec.pt")
|
112 |
-
if os.path.exists(spec_filename):
|
113 |
-
try:
|
114 |
-
spec = torch.load(spec_filename)
|
115 |
-
except:
|
116 |
-
logger.warn("%s %s", spec_filename, traceback.format_exc())
|
117 |
-
spec = spectrogram_torch(
|
118 |
-
audio_norm,
|
119 |
-
self.filter_length,
|
120 |
-
self.sampling_rate,
|
121 |
-
self.hop_length,
|
122 |
-
self.win_length,
|
123 |
-
center=False,
|
124 |
-
)
|
125 |
-
spec = torch.squeeze(spec, 0)
|
126 |
-
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
127 |
-
else:
|
128 |
-
spec = spectrogram_torch(
|
129 |
-
audio_norm,
|
130 |
-
self.filter_length,
|
131 |
-
self.sampling_rate,
|
132 |
-
self.hop_length,
|
133 |
-
self.win_length,
|
134 |
-
center=False,
|
135 |
-
)
|
136 |
-
spec = torch.squeeze(spec, 0)
|
137 |
-
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
138 |
-
return spec, audio_norm
|
139 |
-
|
140 |
-
def __getitem__(self, index):
|
141 |
-
return self.get_audio_text_pair(self.audiopaths_and_text[index])
|
142 |
-
|
143 |
-
def __len__(self):
|
144 |
-
return len(self.audiopaths_and_text)
|
145 |
-
|
146 |
-
|
147 |
-
class TextAudioCollateMultiNSFsid:
|
148 |
-
"""Zero-pads model inputs and targets"""
|
149 |
-
|
150 |
-
def __init__(self, return_ids=False):
|
151 |
-
self.return_ids = return_ids
|
152 |
-
|
153 |
-
def __call__(self, batch):
|
154 |
-
"""Collate's training batch from normalized text and aduio
|
155 |
-
PARAMS
|
156 |
-
------
|
157 |
-
batch: [text_normalized, spec_normalized, wav_normalized]
|
158 |
-
"""
|
159 |
-
# Right zero-pad all one-hot text sequences to max input length
|
160 |
-
_, ids_sorted_decreasing = torch.sort(
|
161 |
-
torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
|
162 |
-
)
|
163 |
-
|
164 |
-
max_spec_len = max([x[0].size(1) for x in batch])
|
165 |
-
max_wave_len = max([x[1].size(1) for x in batch])
|
166 |
-
spec_lengths = torch.LongTensor(len(batch))
|
167 |
-
wave_lengths = torch.LongTensor(len(batch))
|
168 |
-
spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
|
169 |
-
wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
|
170 |
-
spec_padded.zero_()
|
171 |
-
wave_padded.zero_()
|
172 |
-
|
173 |
-
max_phone_len = max([x[2].size(0) for x in batch])
|
174 |
-
phone_lengths = torch.LongTensor(len(batch))
|
175 |
-
phone_padded = torch.FloatTensor(
|
176 |
-
len(batch), max_phone_len, batch[0][2].shape[1]
|
177 |
-
) # (spec, wav, phone, pitch)
|
178 |
-
pitch_padded = torch.LongTensor(len(batch), max_phone_len)
|
179 |
-
pitchf_padded = torch.FloatTensor(len(batch), max_phone_len)
|
180 |
-
phone_padded.zero_()
|
181 |
-
pitch_padded.zero_()
|
182 |
-
pitchf_padded.zero_()
|
183 |
-
# dv = torch.FloatTensor(len(batch), 256)#gin=256
|
184 |
-
sid = torch.LongTensor(len(batch))
|
185 |
-
|
186 |
-
for i in range(len(ids_sorted_decreasing)):
|
187 |
-
row = batch[ids_sorted_decreasing[i]]
|
188 |
-
|
189 |
-
spec = row[0]
|
190 |
-
spec_padded[i, :, : spec.size(1)] = spec
|
191 |
-
spec_lengths[i] = spec.size(1)
|
192 |
-
|
193 |
-
wave = row[1]
|
194 |
-
wave_padded[i, :, : wave.size(1)] = wave
|
195 |
-
wave_lengths[i] = wave.size(1)
|
196 |
-
|
197 |
-
phone = row[2]
|
198 |
-
phone_padded[i, : phone.size(0), :] = phone
|
199 |
-
phone_lengths[i] = phone.size(0)
|
200 |
-
|
201 |
-
pitch = row[3]
|
202 |
-
pitch_padded[i, : pitch.size(0)] = pitch
|
203 |
-
pitchf = row[4]
|
204 |
-
pitchf_padded[i, : pitchf.size(0)] = pitchf
|
205 |
-
|
206 |
-
# dv[i] = row[5]
|
207 |
-
sid[i] = row[5]
|
208 |
-
|
209 |
-
return (
|
210 |
-
phone_padded,
|
211 |
-
phone_lengths,
|
212 |
-
pitch_padded,
|
213 |
-
pitchf_padded,
|
214 |
-
spec_padded,
|
215 |
-
spec_lengths,
|
216 |
-
wave_padded,
|
217 |
-
wave_lengths,
|
218 |
-
# dv
|
219 |
-
sid,
|
220 |
-
)
|
221 |
-
|
222 |
-
|
223 |
-
class TextAudioLoader(torch.utils.data.Dataset):
|
224 |
-
"""
|
225 |
-
1) loads audio, text pairs
|
226 |
-
2) normalizes text and converts them to sequences of integers
|
227 |
-
3) computes spectrograms from audio files.
|
228 |
-
"""
|
229 |
-
|
230 |
-
def __init__(self, audiopaths_and_text, hparams):
|
231 |
-
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
|
232 |
-
self.max_wav_value = hparams.max_wav_value
|
233 |
-
self.sampling_rate = hparams.sampling_rate
|
234 |
-
self.filter_length = hparams.filter_length
|
235 |
-
self.hop_length = hparams.hop_length
|
236 |
-
self.win_length = hparams.win_length
|
237 |
-
self.sampling_rate = hparams.sampling_rate
|
238 |
-
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
239 |
-
self.max_text_len = getattr(hparams, "max_text_len", 5000)
|
240 |
-
self._filter()
|
241 |
-
|
242 |
-
def _filter(self):
|
243 |
-
"""
|
244 |
-
Filter text & store spec lengths
|
245 |
-
"""
|
246 |
-
# Store spectrogram lengths for Bucketing
|
247 |
-
# wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
|
248 |
-
# spec_length = wav_length // hop_length
|
249 |
-
audiopaths_and_text_new = []
|
250 |
-
lengths = []
|
251 |
-
for audiopath, text, dv in self.audiopaths_and_text:
|
252 |
-
if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
|
253 |
-
audiopaths_and_text_new.append([audiopath, text, dv])
|
254 |
-
lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
|
255 |
-
self.audiopaths_and_text = audiopaths_and_text_new
|
256 |
-
self.lengths = lengths
|
257 |
-
|
258 |
-
def get_sid(self, sid):
|
259 |
-
sid = torch.LongTensor([int(sid)])
|
260 |
-
return sid
|
261 |
-
|
262 |
-
def get_audio_text_pair(self, audiopath_and_text):
|
263 |
-
# separate filename and text
|
264 |
-
file = audiopath_and_text[0]
|
265 |
-
phone = audiopath_and_text[1]
|
266 |
-
dv = audiopath_and_text[2]
|
267 |
-
|
268 |
-
phone = self.get_labels(phone)
|
269 |
-
spec, wav = self.get_audio(file)
|
270 |
-
dv = self.get_sid(dv)
|
271 |
-
|
272 |
-
len_phone = phone.size()[0]
|
273 |
-
len_spec = spec.size()[-1]
|
274 |
-
if len_phone != len_spec:
|
275 |
-
len_min = min(len_phone, len_spec)
|
276 |
-
len_wav = len_min * self.hop_length
|
277 |
-
spec = spec[:, :len_min]
|
278 |
-
wav = wav[:, :len_wav]
|
279 |
-
phone = phone[:len_min, :]
|
280 |
-
return (spec, wav, phone, dv)
|
281 |
-
|
282 |
-
def get_labels(self, phone):
|
283 |
-
phone = np.load(phone)
|
284 |
-
phone = np.repeat(phone, 2, axis=0)
|
285 |
-
n_num = min(phone.shape[0], 900) # DistributedBucketSampler
|
286 |
-
phone = phone[:n_num, :]
|
287 |
-
phone = torch.FloatTensor(phone)
|
288 |
-
return phone
|
289 |
-
|
290 |
-
def get_audio(self, filename):
|
291 |
-
audio, sampling_rate = load_wav_to_torch(filename)
|
292 |
-
if sampling_rate != self.sampling_rate:
|
293 |
-
raise ValueError(
|
294 |
-
"{} SR doesn't match target {} SR".format(
|
295 |
-
sampling_rate, self.sampling_rate
|
296 |
-
)
|
297 |
-
)
|
298 |
-
audio_norm = audio
|
299 |
-
# audio_norm = audio / self.max_wav_value
|
300 |
-
# audio_norm = audio / np.abs(audio).max()
|
301 |
-
|
302 |
-
audio_norm = audio_norm.unsqueeze(0)
|
303 |
-
spec_filename = filename.replace(".wav", ".spec.pt")
|
304 |
-
if os.path.exists(spec_filename):
|
305 |
-
try:
|
306 |
-
spec = torch.load(spec_filename)
|
307 |
-
except:
|
308 |
-
logger.warn("%s %s", spec_filename, traceback.format_exc())
|
309 |
-
spec = spectrogram_torch(
|
310 |
-
audio_norm,
|
311 |
-
self.filter_length,
|
312 |
-
self.sampling_rate,
|
313 |
-
self.hop_length,
|
314 |
-
self.win_length,
|
315 |
-
center=False,
|
316 |
-
)
|
317 |
-
spec = torch.squeeze(spec, 0)
|
318 |
-
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
319 |
-
else:
|
320 |
-
spec = spectrogram_torch(
|
321 |
-
audio_norm,
|
322 |
-
self.filter_length,
|
323 |
-
self.sampling_rate,
|
324 |
-
self.hop_length,
|
325 |
-
self.win_length,
|
326 |
-
center=False,
|
327 |
-
)
|
328 |
-
spec = torch.squeeze(spec, 0)
|
329 |
-
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
330 |
-
return spec, audio_norm
|
331 |
-
|
332 |
-
def __getitem__(self, index):
|
333 |
-
return self.get_audio_text_pair(self.audiopaths_and_text[index])
|
334 |
-
|
335 |
-
def __len__(self):
|
336 |
-
return len(self.audiopaths_and_text)
|
337 |
-
|
338 |
-
|
339 |
-
class TextAudioCollate:
|
340 |
-
"""Zero-pads model inputs and targets"""
|
341 |
-
|
342 |
-
def __init__(self, return_ids=False):
|
343 |
-
self.return_ids = return_ids
|
344 |
-
|
345 |
-
def __call__(self, batch):
|
346 |
-
"""Collate's training batch from normalized text and aduio
|
347 |
-
PARAMS
|
348 |
-
------
|
349 |
-
batch: [text_normalized, spec_normalized, wav_normalized]
|
350 |
-
"""
|
351 |
-
# Right zero-pad all one-hot text sequences to max input length
|
352 |
-
_, ids_sorted_decreasing = torch.sort(
|
353 |
-
torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
|
354 |
-
)
|
355 |
-
|
356 |
-
max_spec_len = max([x[0].size(1) for x in batch])
|
357 |
-
max_wave_len = max([x[1].size(1) for x in batch])
|
358 |
-
spec_lengths = torch.LongTensor(len(batch))
|
359 |
-
wave_lengths = torch.LongTensor(len(batch))
|
360 |
-
spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
|
361 |
-
wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
|
362 |
-
spec_padded.zero_()
|
363 |
-
wave_padded.zero_()
|
364 |
-
|
365 |
-
max_phone_len = max([x[2].size(0) for x in batch])
|
366 |
-
phone_lengths = torch.LongTensor(len(batch))
|
367 |
-
phone_padded = torch.FloatTensor(
|
368 |
-
len(batch), max_phone_len, batch[0][2].shape[1]
|
369 |
-
)
|
370 |
-
phone_padded.zero_()
|
371 |
-
sid = torch.LongTensor(len(batch))
|
372 |
-
|
373 |
-
for i in range(len(ids_sorted_decreasing)):
|
374 |
-
row = batch[ids_sorted_decreasing[i]]
|
375 |
-
|
376 |
-
spec = row[0]
|
377 |
-
spec_padded[i, :, : spec.size(1)] = spec
|
378 |
-
spec_lengths[i] = spec.size(1)
|
379 |
-
|
380 |
-
wave = row[1]
|
381 |
-
wave_padded[i, :, : wave.size(1)] = wave
|
382 |
-
wave_lengths[i] = wave.size(1)
|
383 |
-
|
384 |
-
phone = row[2]
|
385 |
-
phone_padded[i, : phone.size(0), :] = phone
|
386 |
-
phone_lengths[i] = phone.size(0)
|
387 |
-
|
388 |
-
sid[i] = row[3]
|
389 |
-
|
390 |
-
return (
|
391 |
-
phone_padded,
|
392 |
-
phone_lengths,
|
393 |
-
spec_padded,
|
394 |
-
spec_lengths,
|
395 |
-
wave_padded,
|
396 |
-
wave_lengths,
|
397 |
-
sid,
|
398 |
-
)
|
399 |
-
|
400 |
-
|
401 |
-
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
|
402 |
-
"""
|
403 |
-
Maintain similar input lengths in a batch.
|
404 |
-
Length groups are specified by boundaries.
|
405 |
-
Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
|
406 |
-
|
407 |
-
It removes samples which are not included in the boundaries.
|
408 |
-
Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
|
409 |
-
"""
|
410 |
-
|
411 |
-
def __init__(
|
412 |
-
self,
|
413 |
-
dataset,
|
414 |
-
batch_size,
|
415 |
-
boundaries,
|
416 |
-
num_replicas=None,
|
417 |
-
rank=None,
|
418 |
-
shuffle=True,
|
419 |
-
):
|
420 |
-
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
|
421 |
-
self.lengths = dataset.lengths
|
422 |
-
self.batch_size = batch_size
|
423 |
-
self.boundaries = boundaries
|
424 |
-
|
425 |
-
self.buckets, self.num_samples_per_bucket = self._create_buckets()
|
426 |
-
self.total_size = sum(self.num_samples_per_bucket)
|
427 |
-
self.num_samples = self.total_size // self.num_replicas
|
428 |
-
|
429 |
-
def _create_buckets(self):
|
430 |
-
buckets = [[] for _ in range(len(self.boundaries) - 1)]
|
431 |
-
for i in range(len(self.lengths)):
|
432 |
-
length = self.lengths[i]
|
433 |
-
idx_bucket = self._bisect(length)
|
434 |
-
if idx_bucket != -1:
|
435 |
-
buckets[idx_bucket].append(i)
|
436 |
-
|
437 |
-
for i in range(len(buckets) - 1, -1, -1): #
|
438 |
-
if len(buckets[i]) == 0:
|
439 |
-
buckets.pop(i)
|
440 |
-
self.boundaries.pop(i + 1)
|
441 |
-
|
442 |
-
num_samples_per_bucket = []
|
443 |
-
for i in range(len(buckets)):
|
444 |
-
len_bucket = len(buckets[i])
|
445 |
-
total_batch_size = self.num_replicas * self.batch_size
|
446 |
-
rem = (
|
447 |
-
total_batch_size - (len_bucket % total_batch_size)
|
448 |
-
) % total_batch_size
|
449 |
-
num_samples_per_bucket.append(len_bucket + rem)
|
450 |
-
return buckets, num_samples_per_bucket
|
451 |
-
|
452 |
-
def __iter__(self):
|
453 |
-
# deterministically shuffle based on epoch
|
454 |
-
g = torch.Generator()
|
455 |
-
g.manual_seed(self.epoch)
|
456 |
-
|
457 |
-
indices = []
|
458 |
-
if self.shuffle:
|
459 |
-
for bucket in self.buckets:
|
460 |
-
indices.append(torch.randperm(len(bucket), generator=g).tolist())
|
461 |
-
else:
|
462 |
-
for bucket in self.buckets:
|
463 |
-
indices.append(list(range(len(bucket))))
|
464 |
-
|
465 |
-
batches = []
|
466 |
-
for i in range(len(self.buckets)):
|
467 |
-
bucket = self.buckets[i]
|
468 |
-
len_bucket = len(bucket)
|
469 |
-
ids_bucket = indices[i]
|
470 |
-
num_samples_bucket = self.num_samples_per_bucket[i]
|
471 |
-
|
472 |
-
# add extra samples to make it evenly divisible
|
473 |
-
rem = num_samples_bucket - len_bucket
|
474 |
-
ids_bucket = (
|
475 |
-
ids_bucket
|
476 |
-
+ ids_bucket * (rem // len_bucket)
|
477 |
-
+ ids_bucket[: (rem % len_bucket)]
|
478 |
-
)
|
479 |
-
|
480 |
-
# subsample
|
481 |
-
ids_bucket = ids_bucket[self.rank :: self.num_replicas]
|
482 |
-
|
483 |
-
# batching
|
484 |
-
for j in range(len(ids_bucket) // self.batch_size):
|
485 |
-
batch = [
|
486 |
-
bucket[idx]
|
487 |
-
for idx in ids_bucket[
|
488 |
-
j * self.batch_size : (j + 1) * self.batch_size
|
489 |
-
]
|
490 |
-
]
|
491 |
-
batches.append(batch)
|
492 |
-
|
493 |
-
if self.shuffle:
|
494 |
-
batch_ids = torch.randperm(len(batches), generator=g).tolist()
|
495 |
-
batches = [batches[i] for i in batch_ids]
|
496 |
-
self.batches = batches
|
497 |
-
|
498 |
-
assert len(self.batches) * self.batch_size == self.num_samples
|
499 |
-
return iter(self.batches)
|
500 |
-
|
501 |
-
def _bisect(self, x, lo=0, hi=None):
|
502 |
-
if hi is None:
|
503 |
-
hi = len(self.boundaries) - 1
|
504 |
-
|
505 |
-
if hi > lo:
|
506 |
-
mid = (hi + lo) // 2
|
507 |
-
if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
|
508 |
-
return mid
|
509 |
-
elif x <= self.boundaries[mid]:
|
510 |
-
return self._bisect(x, lo, mid)
|
511 |
-
else:
|
512 |
-
return self._bisect(x, mid + 1, hi)
|
513 |
-
else:
|
514 |
-
return -1
|
515 |
-
|
516 |
-
def __len__(self):
|
517 |
-
return self.num_samples // self.batch_size
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/scripts/style_mixing.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from argparse import Namespace
|
3 |
-
|
4 |
-
from tqdm import tqdm
|
5 |
-
import numpy as np
|
6 |
-
from PIL import Image
|
7 |
-
import torch
|
8 |
-
from torch.utils.data import DataLoader
|
9 |
-
import sys
|
10 |
-
|
11 |
-
sys.path.append(".")
|
12 |
-
sys.path.append("..")
|
13 |
-
|
14 |
-
from configs import data_configs
|
15 |
-
from datasets.inference_dataset import InferenceDataset
|
16 |
-
from utils.common import tensor2im, log_input_image
|
17 |
-
from options.test_options import TestOptions
|
18 |
-
from models.psp import pSp
|
19 |
-
|
20 |
-
|
21 |
-
def run():
|
22 |
-
test_opts = TestOptions().parse()
|
23 |
-
|
24 |
-
if test_opts.resize_factors is not None:
|
25 |
-
factors = test_opts.resize_factors.split(',')
|
26 |
-
assert len(factors) == 1, "When running inference, please provide a single downsampling factor!"
|
27 |
-
mixed_path_results = os.path.join(test_opts.exp_dir, 'style_mixing',
|
28 |
-
'downsampling_{}'.format(test_opts.resize_factors))
|
29 |
-
else:
|
30 |
-
mixed_path_results = os.path.join(test_opts.exp_dir, 'style_mixing')
|
31 |
-
os.makedirs(mixed_path_results, exist_ok=True)
|
32 |
-
|
33 |
-
# update test options with options used during training
|
34 |
-
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
|
35 |
-
opts = ckpt['opts']
|
36 |
-
opts.update(vars(test_opts))
|
37 |
-
if 'learn_in_w' not in opts:
|
38 |
-
opts['learn_in_w'] = False
|
39 |
-
if 'output_size' not in opts:
|
40 |
-
opts['output_size'] = 1024
|
41 |
-
opts = Namespace(**opts)
|
42 |
-
|
43 |
-
net = pSp(opts)
|
44 |
-
net.eval()
|
45 |
-
net.cuda()
|
46 |
-
|
47 |
-
print('Loading dataset for {}'.format(opts.dataset_type))
|
48 |
-
dataset_args = data_configs.DATASETS[opts.dataset_type]
|
49 |
-
transforms_dict = dataset_args['transforms'](opts).get_transforms()
|
50 |
-
dataset = InferenceDataset(root=opts.data_path,
|
51 |
-
transform=transforms_dict['transform_inference'],
|
52 |
-
opts=opts)
|
53 |
-
dataloader = DataLoader(dataset,
|
54 |
-
batch_size=opts.test_batch_size,
|
55 |
-
shuffle=False,
|
56 |
-
num_workers=int(opts.test_workers),
|
57 |
-
drop_last=True)
|
58 |
-
|
59 |
-
latent_mask = [int(l) for l in opts.latent_mask.split(",")]
|
60 |
-
if opts.n_images is None:
|
61 |
-
opts.n_images = len(dataset)
|
62 |
-
|
63 |
-
global_i = 0
|
64 |
-
for input_batch in tqdm(dataloader):
|
65 |
-
if global_i >= opts.n_images:
|
66 |
-
break
|
67 |
-
with torch.no_grad():
|
68 |
-
input_batch = input_batch.cuda()
|
69 |
-
for image_idx, input_image in enumerate(input_batch):
|
70 |
-
# generate random vectors to inject into input image
|
71 |
-
vecs_to_inject = np.random.randn(opts.n_outputs_to_generate, 512).astype('float32')
|
72 |
-
multi_modal_outputs = []
|
73 |
-
for vec_to_inject in vecs_to_inject:
|
74 |
-
cur_vec = torch.from_numpy(vec_to_inject).unsqueeze(0).to("cuda")
|
75 |
-
# get latent vector to inject into our input image
|
76 |
-
_, latent_to_inject = net(cur_vec,
|
77 |
-
input_code=True,
|
78 |
-
return_latents=True)
|
79 |
-
# get output image with injected style vector
|
80 |
-
res = net(input_image.unsqueeze(0).to("cuda").float(),
|
81 |
-
latent_mask=latent_mask,
|
82 |
-
inject_latent=latent_to_inject,
|
83 |
-
alpha=opts.mix_alpha,
|
84 |
-
resize=opts.resize_outputs)
|
85 |
-
multi_modal_outputs.append(res[0])
|
86 |
-
|
87 |
-
# visualize multi modal outputs
|
88 |
-
input_im_path = dataset.paths[global_i]
|
89 |
-
image = input_batch[image_idx]
|
90 |
-
input_image = log_input_image(image, opts)
|
91 |
-
resize_amount = (256, 256) if opts.resize_outputs else (opts.output_size, opts.output_size)
|
92 |
-
res = np.array(input_image.resize(resize_amount))
|
93 |
-
for output in multi_modal_outputs:
|
94 |
-
output = tensor2im(output)
|
95 |
-
res = np.concatenate([res, np.array(output.resize(resize_amount))], axis=1)
|
96 |
-
Image.fromarray(res).save(os.path.join(mixed_path_results, os.path.basename(input_im_path)))
|
97 |
-
global_i += 1
|
98 |
-
|
99 |
-
|
100 |
-
if __name__ == '__main__':
|
101 |
-
run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/modules.py
DELETED
@@ -1,314 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from functools import partial
|
4 |
-
|
5 |
-
from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
|
6 |
-
from torch.utils.checkpoint import checkpoint
|
7 |
-
from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel, AutoTokenizer
|
8 |
-
from importlib_resources import files
|
9 |
-
from ldm.modules.encoders.CLAP.utils import read_config_as_args
|
10 |
-
from ldm.modules.encoders.CLAP.clap import TextEncoder
|
11 |
-
from ldm.util import default, count_params
|
12 |
-
|
13 |
-
|
14 |
-
class AbstractEncoder(nn.Module):
|
15 |
-
def __init__(self):
|
16 |
-
super().__init__()
|
17 |
-
|
18 |
-
def encode(self, *args, **kwargs):
|
19 |
-
raise NotImplementedError
|
20 |
-
|
21 |
-
|
22 |
-
class ClassEmbedder(nn.Module):
|
23 |
-
def __init__(self, embed_dim, n_classes=1000, key='class'):
|
24 |
-
super().__init__()
|
25 |
-
self.key = key
|
26 |
-
self.embedding = nn.Embedding(n_classes, embed_dim)
|
27 |
-
|
28 |
-
def forward(self, batch, key=None):
|
29 |
-
if key is None:
|
30 |
-
key = self.key
|
31 |
-
# this is for use in crossattn
|
32 |
-
c = batch[key][:, None]# (bsz,1)
|
33 |
-
c = self.embedding(c)
|
34 |
-
return c
|
35 |
-
|
36 |
-
|
37 |
-
class TransformerEmbedder(AbstractEncoder):
|
38 |
-
"""Some transformer encoder layers"""
|
39 |
-
def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
|
40 |
-
super().__init__()
|
41 |
-
self.device = device
|
42 |
-
self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
|
43 |
-
attn_layers=Encoder(dim=n_embed, depth=n_layer))
|
44 |
-
|
45 |
-
def forward(self, tokens):
|
46 |
-
tokens = tokens.to(self.device) # meh
|
47 |
-
z = self.transformer(tokens, return_embeddings=True)
|
48 |
-
return z
|
49 |
-
|
50 |
-
def encode(self, x):
|
51 |
-
return self(x)
|
52 |
-
|
53 |
-
|
54 |
-
class BERTTokenizer(AbstractEncoder):
|
55 |
-
""" Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
|
56 |
-
def __init__(self, device="cuda", vq_interface=True, max_length=77):
|
57 |
-
super().__init__()
|
58 |
-
from transformers import BertTokenizerFast # TODO: add to reuquirements
|
59 |
-
self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
|
60 |
-
self.device = device
|
61 |
-
self.vq_interface = vq_interface
|
62 |
-
self.max_length = max_length
|
63 |
-
|
64 |
-
def forward(self, text):
|
65 |
-
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
|
66 |
-
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
|
67 |
-
tokens = batch_encoding["input_ids"].to(self.device)
|
68 |
-
return tokens
|
69 |
-
|
70 |
-
@torch.no_grad()
|
71 |
-
def encode(self, text):
|
72 |
-
tokens = self(text)
|
73 |
-
if not self.vq_interface:
|
74 |
-
return tokens
|
75 |
-
return None, None, [None, None, tokens]
|
76 |
-
|
77 |
-
def decode(self, text):
|
78 |
-
return text
|
79 |
-
|
80 |
-
|
81 |
-
class BERTEmbedder(AbstractEncoder):# 这里不是用的pretrained bert,是用的transformers的BertTokenizer加自定义的TransformerWrapper
|
82 |
-
"""Uses the BERT tokenizr model and add some transformer encoder layers"""
|
83 |
-
def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
|
84 |
-
device="cuda",use_tokenizer=True, embedding_dropout=0.0):
|
85 |
-
super().__init__()
|
86 |
-
self.use_tknz_fn = use_tokenizer
|
87 |
-
if self.use_tknz_fn:
|
88 |
-
self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
|
89 |
-
self.device = device
|
90 |
-
self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
|
91 |
-
attn_layers=Encoder(dim=n_embed, depth=n_layer),
|
92 |
-
emb_dropout=embedding_dropout)
|
93 |
-
|
94 |
-
def forward(self, text):
|
95 |
-
if self.use_tknz_fn:
|
96 |
-
tokens = self.tknz_fn(text)#.to(self.device)
|
97 |
-
else:
|
98 |
-
tokens = text
|
99 |
-
z = self.transformer(tokens, return_embeddings=True)
|
100 |
-
return z
|
101 |
-
|
102 |
-
def encode(self, text):
|
103 |
-
# output of length 77
|
104 |
-
return self(text)
|
105 |
-
|
106 |
-
|
107 |
-
class SpatialRescaler(nn.Module):
|
108 |
-
def __init__(self,
|
109 |
-
n_stages=1,
|
110 |
-
method='bilinear',
|
111 |
-
multiplier=0.5,
|
112 |
-
in_channels=3,
|
113 |
-
out_channels=None,
|
114 |
-
bias=False):
|
115 |
-
super().__init__()
|
116 |
-
self.n_stages = n_stages
|
117 |
-
assert self.n_stages >= 0
|
118 |
-
assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
|
119 |
-
self.multiplier = multiplier
|
120 |
-
self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
|
121 |
-
self.remap_output = out_channels is not None
|
122 |
-
if self.remap_output:
|
123 |
-
print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
|
124 |
-
self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
|
125 |
-
|
126 |
-
def forward(self,x):
|
127 |
-
for stage in range(self.n_stages):
|
128 |
-
x = self.interpolator(x, scale_factor=self.multiplier)
|
129 |
-
|
130 |
-
|
131 |
-
if self.remap_output:
|
132 |
-
x = self.channel_mapper(x)
|
133 |
-
return x
|
134 |
-
|
135 |
-
def encode(self, x):
|
136 |
-
return self(x)
|
137 |
-
|
138 |
-
def disabled_train(self, mode=True):
|
139 |
-
"""Overwrite model.train with this function to make sure train/eval mode
|
140 |
-
does not change anymore."""
|
141 |
-
return self
|
142 |
-
|
143 |
-
class FrozenT5Embedder(AbstractEncoder):
|
144 |
-
"""Uses the T5 transformer encoder for text"""
|
145 |
-
def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
|
146 |
-
super().__init__()
|
147 |
-
self.tokenizer = T5Tokenizer.from_pretrained(version)
|
148 |
-
self.transformer = T5EncoderModel.from_pretrained(version)
|
149 |
-
self.device = device
|
150 |
-
self.max_length = max_length # TODO: typical value?
|
151 |
-
if freeze:
|
152 |
-
self.freeze()
|
153 |
-
|
154 |
-
def freeze(self):
|
155 |
-
self.transformer = self.transformer.eval()
|
156 |
-
#self.train = disabled_train
|
157 |
-
for param in self.parameters():
|
158 |
-
param.requires_grad = False
|
159 |
-
|
160 |
-
def forward(self, text):
|
161 |
-
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
|
162 |
-
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
|
163 |
-
tokens = batch_encoding["input_ids"].to(self.device)
|
164 |
-
outputs = self.transformer(input_ids=tokens)
|
165 |
-
|
166 |
-
z = outputs.last_hidden_state
|
167 |
-
return z
|
168 |
-
|
169 |
-
def encode(self, text):
|
170 |
-
return self(text)
|
171 |
-
|
172 |
-
|
173 |
-
class FrozenCLAPEmbedder(AbstractEncoder):
|
174 |
-
"""Uses the CLAP transformer encoder for text (from huggingface)"""
|
175 |
-
def __init__(self, weights_path, freeze=True, device="cuda", max_length=77): # clip-vit-base-patch32
|
176 |
-
super().__init__()
|
177 |
-
|
178 |
-
model_state_dict = torch.load(weights_path, map_location=torch.device('cpu'))['model']
|
179 |
-
match_params = dict()
|
180 |
-
for key in list(model_state_dict.keys()):
|
181 |
-
if 'caption_encoder' in key:
|
182 |
-
match_params[key.replace('caption_encoder.', '')] = model_state_dict[key]
|
183 |
-
|
184 |
-
config_as_str = files('ldm').joinpath('modules/encoders/CLAP/config.yml').read_text()
|
185 |
-
args = read_config_as_args(config_as_str, is_config_str=True)
|
186 |
-
|
187 |
-
# To device
|
188 |
-
self.tokenizer = AutoTokenizer.from_pretrained(args.text_model) # args.text_model
|
189 |
-
self.caption_encoder = TextEncoder(
|
190 |
-
args.d_proj, args.text_model, args.transformer_embed_dim
|
191 |
-
)
|
192 |
-
|
193 |
-
self.max_length = max_length
|
194 |
-
self.device = device
|
195 |
-
if freeze: self.freeze()
|
196 |
-
|
197 |
-
print(f"{self.caption_encoder.__class__.__name__} comes with {count_params(self.caption_encoder) * 1.e-6:.2f} M params.")
|
198 |
-
|
199 |
-
def freeze(self):
|
200 |
-
self.caption_encoder.base = self.caption_encoder.base.eval()
|
201 |
-
for param in self.caption_encoder.base.parameters():
|
202 |
-
param.requires_grad = False
|
203 |
-
|
204 |
-
|
205 |
-
def encode(self, text):
|
206 |
-
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
|
207 |
-
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
|
208 |
-
tokens = batch_encoding["input_ids"].to(self.device)
|
209 |
-
|
210 |
-
outputs = self.caption_encoder.base(input_ids=tokens)
|
211 |
-
z = self.caption_encoder.projection(outputs.last_hidden_state)
|
212 |
-
return z
|
213 |
-
|
214 |
-
class FrozenCLAPEmbedderNoLoad(AbstractEncoder):
|
215 |
-
def __init__(self, config, freeze=True, device="cpu", max_length=77):
|
216 |
-
super().__init__()
|
217 |
-
args = config
|
218 |
-
|
219 |
-
# To device
|
220 |
-
self.tokenizer = AutoTokenizer.from_pretrained(args.text_model) # args.text_model
|
221 |
-
self.caption_encoder = TextEncoder(
|
222 |
-
args.d_proj, args.text_model, args.transformer_embed_dim
|
223 |
-
)
|
224 |
-
|
225 |
-
self.max_length = max_length
|
226 |
-
self.device = device
|
227 |
-
if freeze: self.freeze()
|
228 |
-
|
229 |
-
print(f"{self.caption_encoder.__class__.__name__} comes with {count_params(self.caption_encoder) * 1.e-6:.2f} M params.")
|
230 |
-
|
231 |
-
def freeze(self):
|
232 |
-
self.caption_encoder.base = self.caption_encoder.base.eval()
|
233 |
-
for param in self.caption_encoder.base.parameters():
|
234 |
-
param.requires_grad = False
|
235 |
-
|
236 |
-
|
237 |
-
def encode(self, text):
|
238 |
-
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
|
239 |
-
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
|
240 |
-
tokens = batch_encoding["input_ids"].to(self.device)
|
241 |
-
|
242 |
-
outputs = self.caption_encoder.base(input_ids=tokens)
|
243 |
-
z = self.caption_encoder.projection(outputs.last_hidden_state)
|
244 |
-
return z
|
245 |
-
|
246 |
-
|
247 |
-
class NewFrozenCLAPEmbedder(AbstractEncoder):
|
248 |
-
"""Uses the CLAP transformer encoder for text (from huggingface)"""
|
249 |
-
def __init__(self, weights_path, freeze=True, device="cuda", max_length=77): # clip-vit-base-patch32
|
250 |
-
super().__init__()
|
251 |
-
# To device
|
252 |
-
from transformers import RobertaTokenizer
|
253 |
-
from ldm.modules.encoders.open_clap import create_model
|
254 |
-
|
255 |
-
|
256 |
-
model, model_cfg = create_model(
|
257 |
-
'HTSAT-tiny',
|
258 |
-
'roberta',
|
259 |
-
weights_path,
|
260 |
-
enable_fusion=True,
|
261 |
-
fusion_type='aff_2d'
|
262 |
-
)
|
263 |
-
|
264 |
-
del model.audio_branch, model.audio_transform, model.audio_projection
|
265 |
-
self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
|
266 |
-
self.model = model
|
267 |
-
|
268 |
-
self.max_length = max_length
|
269 |
-
self.device = device
|
270 |
-
if freeze: self.freeze()
|
271 |
-
|
272 |
-
param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
273 |
-
print(f'{self.model.__class__.__name__} comes with: {param_num / 1e+6:.3f} M params.')
|
274 |
-
|
275 |
-
def freeze(self):
|
276 |
-
self.model = self.model.eval()
|
277 |
-
for param in self.model.parameters():
|
278 |
-
param.requires_grad = False
|
279 |
-
|
280 |
-
def encode(self, text):
|
281 |
-
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
|
282 |
-
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
|
283 |
-
outputs = self.model.text_branch(input_ids=batch_encoding["input_ids"].to(self.device), attention_mask=batch_encoding["attention_mask"].to(self.device))
|
284 |
-
z = self.model.text_projection(outputs.last_hidden_state)
|
285 |
-
return z
|
286 |
-
|
287 |
-
class FrozenFLANEmbedder(AbstractEncoder):
|
288 |
-
"""Uses the T5 transformer encoder for text"""
|
289 |
-
def __init__(self, version="google/flan-t5-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
|
290 |
-
super().__init__()
|
291 |
-
self.tokenizer = T5Tokenizer.from_pretrained(version)
|
292 |
-
self.transformer = T5EncoderModel.from_pretrained(version)
|
293 |
-
self.device = device
|
294 |
-
self.max_length = max_length # TODO: typical value?
|
295 |
-
if freeze:
|
296 |
-
self.freeze()
|
297 |
-
|
298 |
-
def freeze(self):
|
299 |
-
self.transformer = self.transformer.eval()
|
300 |
-
#self.train = disabled_train
|
301 |
-
for param in self.parameters():
|
302 |
-
param.requires_grad = False
|
303 |
-
|
304 |
-
def forward(self, text):
|
305 |
-
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
|
306 |
-
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
|
307 |
-
tokens = batch_encoding["input_ids"].to(self.device)
|
308 |
-
outputs = self.transformer(input_ids=tokens)
|
309 |
-
|
310 |
-
z = outputs.last_hidden_state
|
311 |
-
return z
|
312 |
-
|
313 |
-
def encode(self, text):
|
314 |
-
return self(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
_base_ = './yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py'
|
2 |
-
deepen_factor = 1.33
|
3 |
-
widen_factor = 1.25
|
4 |
-
|
5 |
-
model = dict(
|
6 |
-
backbone=dict(
|
7 |
-
deepen_factor=deepen_factor,
|
8 |
-
widen_factor=widen_factor,
|
9 |
-
),
|
10 |
-
neck=dict(
|
11 |
-
deepen_factor=deepen_factor,
|
12 |
-
widen_factor=widen_factor,
|
13 |
-
),
|
14 |
-
bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abubakari/Sepsis-fastapi-prediction-app/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Sepsis Fastapi Prediction App
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: gray
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ActivatedOne/JorisCos-ConvTasNet_Libri1Mix_enhsingle_16k/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/JorisCos/ConvTasNet_Libri1Mix_enhsingle_16k").launch()
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Factory.d.ts
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import Spinner from './Spinner';
|
2 |
-
import Base from '../base/Base';
|
3 |
-
|
4 |
-
export default function Factory(
|
5 |
-
config?: Base.IConfig
|
6 |
-
): Spinner;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ama434/neutral-barlow/README.md
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
|
2 |
-
---
|
3 |
-
tags: [gradio-theme]
|
4 |
-
title: neutral-barlow
|
5 |
-
colorFrom: orange
|
6 |
-
colorTo: purple
|
7 |
-
sdk: gradio
|
8 |
-
sdk_version: 3.34.0
|
9 |
-
app_file: app.py
|
10 |
-
pinned: false
|
11 |
-
license: apache-2.0
|
12 |
-
---
|
13 |
-
# neutral-barlow
|
14 |
-
## Description
|
15 |
-
Add a description of this theme here!
|
16 |
-
## Contributions
|
17 |
-
Thanks to [@Ama434](https://huggingface.co/Ama434) for adding this gradio theme!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/yt-shorts-video-captioning/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Yt Shorts Video Captioning
|
3 |
-
emoji: 🎥
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.3.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/app1.py
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import time
|
3 |
-
import os
|
4 |
-
from huggingface_hub import HfApi, create_repo
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
def convert_checkpoint(url, name,repo_id, hf_token ,image_size, scheduler_type, use_half):
|
10 |
-
try:
|
11 |
-
|
12 |
-
print("Downloading")
|
13 |
-
# Download the file
|
14 |
-
os.system(f"wget -q {url} --content-disposition -O {name}.safetensors")
|
15 |
-
|
16 |
-
time.sleep(5)
|
17 |
-
print("Download successful")
|
18 |
-
|
19 |
-
# Construct the checkpoint path and dump path
|
20 |
-
checkpoint_path = f"{name}.safetensors"
|
21 |
-
dump_path = f"/home/user/app/{name}"
|
22 |
-
|
23 |
-
cmd = [
|
24 |
-
"python3",
|
25 |
-
"diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py", # Replace with the name of your script
|
26 |
-
"--checkpoint_path", checkpoint_path,
|
27 |
-
f"--scheduler_type {scheduler_type}",
|
28 |
-
f"--image_size {image_size}",
|
29 |
-
"--prediction_type epsilon",
|
30 |
-
"--device cpu",
|
31 |
-
"--from_safetensors",
|
32 |
-
"--to_safetensors",
|
33 |
-
"--dump_path", dump_path
|
34 |
-
]
|
35 |
-
|
36 |
-
if use_half:
|
37 |
-
cmd.append("--half")
|
38 |
-
|
39 |
-
result = os.system(" ".join(cmd))
|
40 |
-
output = result
|
41 |
-
|
42 |
-
|
43 |
-
os.remove(checkpoint_path)
|
44 |
-
|
45 |
-
# Log in to your Hugging Face account
|
46 |
-
os.system(f"huggingface-cli login --token {hf_token}")
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
# Create a repository
|
51 |
-
api = HfApi()
|
52 |
-
api.create_repo(f"{repo_id}/{name}")
|
53 |
-
|
54 |
-
# Upload a folder to the repository
|
55 |
-
api.upload_folder(
|
56 |
-
folder_path=dump_path,
|
57 |
-
repo_id=f"{repo_id}/{name}",
|
58 |
-
repo_type="model",
|
59 |
-
)
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
except Exception as e:
|
65 |
-
output = str(e)
|
66 |
-
|
67 |
-
return output
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
iface = gr.Interface(
|
73 |
-
fn=convert_checkpoint,
|
74 |
-
inputs=[
|
75 |
-
gr.inputs.Textbox(label="URL"),
|
76 |
-
gr.inputs.Textbox(label="Name"),
|
77 |
-
gr.inputs.Textbox(label="Repo id"),
|
78 |
-
# gr.inputs.Dropdown(label="Visibility", choices=["True","False"]),
|
79 |
-
gr.inputs.Textbox(label="Hugging Face API Token"),
|
80 |
-
gr.inputs.Radio(label="Image Size", choices=["512", "768"]),
|
81 |
-
gr.inputs.Dropdown(label="Scheduler Type", choices=['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']),
|
82 |
-
gr.inputs.Checkbox(label="Use Half Precision")
|
83 |
-
],
|
84 |
-
outputs=gr.outputs.Textbox(),
|
85 |
-
title="**Forked from https://huggingface.co/spaces/Androidonnxfork/CivitAi-to-Diffusers**",
|
86 |
-
max_queue_size=5
|
87 |
-
)
|
88 |
-
iface.launch()
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/inference/README.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
# Inference Examples
|
2 |
-
|
3 |
-
**The inference examples folder is deprecated and will be removed in a future version**.
|
4 |
-
**Officially supported inference examples can be found in the [Pipelines folder](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines)**.
|
5 |
-
|
6 |
-
- For `Image-to-Image text-guided generation with Stable Diffusion`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples)
|
7 |
-
- For `In-painting using Stable Diffusion`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples)
|
8 |
-
- For `Tweak prompts reusing seeds and latents`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/encnet_r50-d8.py',
|
3 |
-
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_20k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/encnet_r50-d8.py',
|
3 |
-
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_40k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_cell_widths.py
DELETED
@@ -1,451 +0,0 @@
|
|
1 |
-
# Auto generated by make_terminal_widths.py
|
2 |
-
|
3 |
-
CELL_WIDTHS = [
|
4 |
-
(0, 0, 0),
|
5 |
-
(1, 31, -1),
|
6 |
-
(127, 159, -1),
|
7 |
-
(768, 879, 0),
|
8 |
-
(1155, 1161, 0),
|
9 |
-
(1425, 1469, 0),
|
10 |
-
(1471, 1471, 0),
|
11 |
-
(1473, 1474, 0),
|
12 |
-
(1476, 1477, 0),
|
13 |
-
(1479, 1479, 0),
|
14 |
-
(1552, 1562, 0),
|
15 |
-
(1611, 1631, 0),
|
16 |
-
(1648, 1648, 0),
|
17 |
-
(1750, 1756, 0),
|
18 |
-
(1759, 1764, 0),
|
19 |
-
(1767, 1768, 0),
|
20 |
-
(1770, 1773, 0),
|
21 |
-
(1809, 1809, 0),
|
22 |
-
(1840, 1866, 0),
|
23 |
-
(1958, 1968, 0),
|
24 |
-
(2027, 2035, 0),
|
25 |
-
(2045, 2045, 0),
|
26 |
-
(2070, 2073, 0),
|
27 |
-
(2075, 2083, 0),
|
28 |
-
(2085, 2087, 0),
|
29 |
-
(2089, 2093, 0),
|
30 |
-
(2137, 2139, 0),
|
31 |
-
(2259, 2273, 0),
|
32 |
-
(2275, 2306, 0),
|
33 |
-
(2362, 2362, 0),
|
34 |
-
(2364, 2364, 0),
|
35 |
-
(2369, 2376, 0),
|
36 |
-
(2381, 2381, 0),
|
37 |
-
(2385, 2391, 0),
|
38 |
-
(2402, 2403, 0),
|
39 |
-
(2433, 2433, 0),
|
40 |
-
(2492, 2492, 0),
|
41 |
-
(2497, 2500, 0),
|
42 |
-
(2509, 2509, 0),
|
43 |
-
(2530, 2531, 0),
|
44 |
-
(2558, 2558, 0),
|
45 |
-
(2561, 2562, 0),
|
46 |
-
(2620, 2620, 0),
|
47 |
-
(2625, 2626, 0),
|
48 |
-
(2631, 2632, 0),
|
49 |
-
(2635, 2637, 0),
|
50 |
-
(2641, 2641, 0),
|
51 |
-
(2672, 2673, 0),
|
52 |
-
(2677, 2677, 0),
|
53 |
-
(2689, 2690, 0),
|
54 |
-
(2748, 2748, 0),
|
55 |
-
(2753, 2757, 0),
|
56 |
-
(2759, 2760, 0),
|
57 |
-
(2765, 2765, 0),
|
58 |
-
(2786, 2787, 0),
|
59 |
-
(2810, 2815, 0),
|
60 |
-
(2817, 2817, 0),
|
61 |
-
(2876, 2876, 0),
|
62 |
-
(2879, 2879, 0),
|
63 |
-
(2881, 2884, 0),
|
64 |
-
(2893, 2893, 0),
|
65 |
-
(2901, 2902, 0),
|
66 |
-
(2914, 2915, 0),
|
67 |
-
(2946, 2946, 0),
|
68 |
-
(3008, 3008, 0),
|
69 |
-
(3021, 3021, 0),
|
70 |
-
(3072, 3072, 0),
|
71 |
-
(3076, 3076, 0),
|
72 |
-
(3134, 3136, 0),
|
73 |
-
(3142, 3144, 0),
|
74 |
-
(3146, 3149, 0),
|
75 |
-
(3157, 3158, 0),
|
76 |
-
(3170, 3171, 0),
|
77 |
-
(3201, 3201, 0),
|
78 |
-
(3260, 3260, 0),
|
79 |
-
(3263, 3263, 0),
|
80 |
-
(3270, 3270, 0),
|
81 |
-
(3276, 3277, 0),
|
82 |
-
(3298, 3299, 0),
|
83 |
-
(3328, 3329, 0),
|
84 |
-
(3387, 3388, 0),
|
85 |
-
(3393, 3396, 0),
|
86 |
-
(3405, 3405, 0),
|
87 |
-
(3426, 3427, 0),
|
88 |
-
(3457, 3457, 0),
|
89 |
-
(3530, 3530, 0),
|
90 |
-
(3538, 3540, 0),
|
91 |
-
(3542, 3542, 0),
|
92 |
-
(3633, 3633, 0),
|
93 |
-
(3636, 3642, 0),
|
94 |
-
(3655, 3662, 0),
|
95 |
-
(3761, 3761, 0),
|
96 |
-
(3764, 3772, 0),
|
97 |
-
(3784, 3789, 0),
|
98 |
-
(3864, 3865, 0),
|
99 |
-
(3893, 3893, 0),
|
100 |
-
(3895, 3895, 0),
|
101 |
-
(3897, 3897, 0),
|
102 |
-
(3953, 3966, 0),
|
103 |
-
(3968, 3972, 0),
|
104 |
-
(3974, 3975, 0),
|
105 |
-
(3981, 3991, 0),
|
106 |
-
(3993, 4028, 0),
|
107 |
-
(4038, 4038, 0),
|
108 |
-
(4141, 4144, 0),
|
109 |
-
(4146, 4151, 0),
|
110 |
-
(4153, 4154, 0),
|
111 |
-
(4157, 4158, 0),
|
112 |
-
(4184, 4185, 0),
|
113 |
-
(4190, 4192, 0),
|
114 |
-
(4209, 4212, 0),
|
115 |
-
(4226, 4226, 0),
|
116 |
-
(4229, 4230, 0),
|
117 |
-
(4237, 4237, 0),
|
118 |
-
(4253, 4253, 0),
|
119 |
-
(4352, 4447, 2),
|
120 |
-
(4957, 4959, 0),
|
121 |
-
(5906, 5908, 0),
|
122 |
-
(5938, 5940, 0),
|
123 |
-
(5970, 5971, 0),
|
124 |
-
(6002, 6003, 0),
|
125 |
-
(6068, 6069, 0),
|
126 |
-
(6071, 6077, 0),
|
127 |
-
(6086, 6086, 0),
|
128 |
-
(6089, 6099, 0),
|
129 |
-
(6109, 6109, 0),
|
130 |
-
(6155, 6157, 0),
|
131 |
-
(6277, 6278, 0),
|
132 |
-
(6313, 6313, 0),
|
133 |
-
(6432, 6434, 0),
|
134 |
-
(6439, 6440, 0),
|
135 |
-
(6450, 6450, 0),
|
136 |
-
(6457, 6459, 0),
|
137 |
-
(6679, 6680, 0),
|
138 |
-
(6683, 6683, 0),
|
139 |
-
(6742, 6742, 0),
|
140 |
-
(6744, 6750, 0),
|
141 |
-
(6752, 6752, 0),
|
142 |
-
(6754, 6754, 0),
|
143 |
-
(6757, 6764, 0),
|
144 |
-
(6771, 6780, 0),
|
145 |
-
(6783, 6783, 0),
|
146 |
-
(6832, 6848, 0),
|
147 |
-
(6912, 6915, 0),
|
148 |
-
(6964, 6964, 0),
|
149 |
-
(6966, 6970, 0),
|
150 |
-
(6972, 6972, 0),
|
151 |
-
(6978, 6978, 0),
|
152 |
-
(7019, 7027, 0),
|
153 |
-
(7040, 7041, 0),
|
154 |
-
(7074, 7077, 0),
|
155 |
-
(7080, 7081, 0),
|
156 |
-
(7083, 7085, 0),
|
157 |
-
(7142, 7142, 0),
|
158 |
-
(7144, 7145, 0),
|
159 |
-
(7149, 7149, 0),
|
160 |
-
(7151, 7153, 0),
|
161 |
-
(7212, 7219, 0),
|
162 |
-
(7222, 7223, 0),
|
163 |
-
(7376, 7378, 0),
|
164 |
-
(7380, 7392, 0),
|
165 |
-
(7394, 7400, 0),
|
166 |
-
(7405, 7405, 0),
|
167 |
-
(7412, 7412, 0),
|
168 |
-
(7416, 7417, 0),
|
169 |
-
(7616, 7673, 0),
|
170 |
-
(7675, 7679, 0),
|
171 |
-
(8203, 8207, 0),
|
172 |
-
(8232, 8238, 0),
|
173 |
-
(8288, 8291, 0),
|
174 |
-
(8400, 8432, 0),
|
175 |
-
(8986, 8987, 2),
|
176 |
-
(9001, 9002, 2),
|
177 |
-
(9193, 9196, 2),
|
178 |
-
(9200, 9200, 2),
|
179 |
-
(9203, 9203, 2),
|
180 |
-
(9725, 9726, 2),
|
181 |
-
(9748, 9749, 2),
|
182 |
-
(9800, 9811, 2),
|
183 |
-
(9855, 9855, 2),
|
184 |
-
(9875, 9875, 2),
|
185 |
-
(9889, 9889, 2),
|
186 |
-
(9898, 9899, 2),
|
187 |
-
(9917, 9918, 2),
|
188 |
-
(9924, 9925, 2),
|
189 |
-
(9934, 9934, 2),
|
190 |
-
(9940, 9940, 2),
|
191 |
-
(9962, 9962, 2),
|
192 |
-
(9970, 9971, 2),
|
193 |
-
(9973, 9973, 2),
|
194 |
-
(9978, 9978, 2),
|
195 |
-
(9981, 9981, 2),
|
196 |
-
(9989, 9989, 2),
|
197 |
-
(9994, 9995, 2),
|
198 |
-
(10024, 10024, 2),
|
199 |
-
(10060, 10060, 2),
|
200 |
-
(10062, 10062, 2),
|
201 |
-
(10067, 10069, 2),
|
202 |
-
(10071, 10071, 2),
|
203 |
-
(10133, 10135, 2),
|
204 |
-
(10160, 10160, 2),
|
205 |
-
(10175, 10175, 2),
|
206 |
-
(11035, 11036, 2),
|
207 |
-
(11088, 11088, 2),
|
208 |
-
(11093, 11093, 2),
|
209 |
-
(11503, 11505, 0),
|
210 |
-
(11647, 11647, 0),
|
211 |
-
(11744, 11775, 0),
|
212 |
-
(11904, 11929, 2),
|
213 |
-
(11931, 12019, 2),
|
214 |
-
(12032, 12245, 2),
|
215 |
-
(12272, 12283, 2),
|
216 |
-
(12288, 12329, 2),
|
217 |
-
(12330, 12333, 0),
|
218 |
-
(12334, 12350, 2),
|
219 |
-
(12353, 12438, 2),
|
220 |
-
(12441, 12442, 0),
|
221 |
-
(12443, 12543, 2),
|
222 |
-
(12549, 12591, 2),
|
223 |
-
(12593, 12686, 2),
|
224 |
-
(12688, 12771, 2),
|
225 |
-
(12784, 12830, 2),
|
226 |
-
(12832, 12871, 2),
|
227 |
-
(12880, 19903, 2),
|
228 |
-
(19968, 42124, 2),
|
229 |
-
(42128, 42182, 2),
|
230 |
-
(42607, 42610, 0),
|
231 |
-
(42612, 42621, 0),
|
232 |
-
(42654, 42655, 0),
|
233 |
-
(42736, 42737, 0),
|
234 |
-
(43010, 43010, 0),
|
235 |
-
(43014, 43014, 0),
|
236 |
-
(43019, 43019, 0),
|
237 |
-
(43045, 43046, 0),
|
238 |
-
(43052, 43052, 0),
|
239 |
-
(43204, 43205, 0),
|
240 |
-
(43232, 43249, 0),
|
241 |
-
(43263, 43263, 0),
|
242 |
-
(43302, 43309, 0),
|
243 |
-
(43335, 43345, 0),
|
244 |
-
(43360, 43388, 2),
|
245 |
-
(43392, 43394, 0),
|
246 |
-
(43443, 43443, 0),
|
247 |
-
(43446, 43449, 0),
|
248 |
-
(43452, 43453, 0),
|
249 |
-
(43493, 43493, 0),
|
250 |
-
(43561, 43566, 0),
|
251 |
-
(43569, 43570, 0),
|
252 |
-
(43573, 43574, 0),
|
253 |
-
(43587, 43587, 0),
|
254 |
-
(43596, 43596, 0),
|
255 |
-
(43644, 43644, 0),
|
256 |
-
(43696, 43696, 0),
|
257 |
-
(43698, 43700, 0),
|
258 |
-
(43703, 43704, 0),
|
259 |
-
(43710, 43711, 0),
|
260 |
-
(43713, 43713, 0),
|
261 |
-
(43756, 43757, 0),
|
262 |
-
(43766, 43766, 0),
|
263 |
-
(44005, 44005, 0),
|
264 |
-
(44008, 44008, 0),
|
265 |
-
(44013, 44013, 0),
|
266 |
-
(44032, 55203, 2),
|
267 |
-
(63744, 64255, 2),
|
268 |
-
(64286, 64286, 0),
|
269 |
-
(65024, 65039, 0),
|
270 |
-
(65040, 65049, 2),
|
271 |
-
(65056, 65071, 0),
|
272 |
-
(65072, 65106, 2),
|
273 |
-
(65108, 65126, 2),
|
274 |
-
(65128, 65131, 2),
|
275 |
-
(65281, 65376, 2),
|
276 |
-
(65504, 65510, 2),
|
277 |
-
(66045, 66045, 0),
|
278 |
-
(66272, 66272, 0),
|
279 |
-
(66422, 66426, 0),
|
280 |
-
(68097, 68099, 0),
|
281 |
-
(68101, 68102, 0),
|
282 |
-
(68108, 68111, 0),
|
283 |
-
(68152, 68154, 0),
|
284 |
-
(68159, 68159, 0),
|
285 |
-
(68325, 68326, 0),
|
286 |
-
(68900, 68903, 0),
|
287 |
-
(69291, 69292, 0),
|
288 |
-
(69446, 69456, 0),
|
289 |
-
(69633, 69633, 0),
|
290 |
-
(69688, 69702, 0),
|
291 |
-
(69759, 69761, 0),
|
292 |
-
(69811, 69814, 0),
|
293 |
-
(69817, 69818, 0),
|
294 |
-
(69888, 69890, 0),
|
295 |
-
(69927, 69931, 0),
|
296 |
-
(69933, 69940, 0),
|
297 |
-
(70003, 70003, 0),
|
298 |
-
(70016, 70017, 0),
|
299 |
-
(70070, 70078, 0),
|
300 |
-
(70089, 70092, 0),
|
301 |
-
(70095, 70095, 0),
|
302 |
-
(70191, 70193, 0),
|
303 |
-
(70196, 70196, 0),
|
304 |
-
(70198, 70199, 0),
|
305 |
-
(70206, 70206, 0),
|
306 |
-
(70367, 70367, 0),
|
307 |
-
(70371, 70378, 0),
|
308 |
-
(70400, 70401, 0),
|
309 |
-
(70459, 70460, 0),
|
310 |
-
(70464, 70464, 0),
|
311 |
-
(70502, 70508, 0),
|
312 |
-
(70512, 70516, 0),
|
313 |
-
(70712, 70719, 0),
|
314 |
-
(70722, 70724, 0),
|
315 |
-
(70726, 70726, 0),
|
316 |
-
(70750, 70750, 0),
|
317 |
-
(70835, 70840, 0),
|
318 |
-
(70842, 70842, 0),
|
319 |
-
(70847, 70848, 0),
|
320 |
-
(70850, 70851, 0),
|
321 |
-
(71090, 71093, 0),
|
322 |
-
(71100, 71101, 0),
|
323 |
-
(71103, 71104, 0),
|
324 |
-
(71132, 71133, 0),
|
325 |
-
(71219, 71226, 0),
|
326 |
-
(71229, 71229, 0),
|
327 |
-
(71231, 71232, 0),
|
328 |
-
(71339, 71339, 0),
|
329 |
-
(71341, 71341, 0),
|
330 |
-
(71344, 71349, 0),
|
331 |
-
(71351, 71351, 0),
|
332 |
-
(71453, 71455, 0),
|
333 |
-
(71458, 71461, 0),
|
334 |
-
(71463, 71467, 0),
|
335 |
-
(71727, 71735, 0),
|
336 |
-
(71737, 71738, 0),
|
337 |
-
(71995, 71996, 0),
|
338 |
-
(71998, 71998, 0),
|
339 |
-
(72003, 72003, 0),
|
340 |
-
(72148, 72151, 0),
|
341 |
-
(72154, 72155, 0),
|
342 |
-
(72160, 72160, 0),
|
343 |
-
(72193, 72202, 0),
|
344 |
-
(72243, 72248, 0),
|
345 |
-
(72251, 72254, 0),
|
346 |
-
(72263, 72263, 0),
|
347 |
-
(72273, 72278, 0),
|
348 |
-
(72281, 72283, 0),
|
349 |
-
(72330, 72342, 0),
|
350 |
-
(72344, 72345, 0),
|
351 |
-
(72752, 72758, 0),
|
352 |
-
(72760, 72765, 0),
|
353 |
-
(72767, 72767, 0),
|
354 |
-
(72850, 72871, 0),
|
355 |
-
(72874, 72880, 0),
|
356 |
-
(72882, 72883, 0),
|
357 |
-
(72885, 72886, 0),
|
358 |
-
(73009, 73014, 0),
|
359 |
-
(73018, 73018, 0),
|
360 |
-
(73020, 73021, 0),
|
361 |
-
(73023, 73029, 0),
|
362 |
-
(73031, 73031, 0),
|
363 |
-
(73104, 73105, 0),
|
364 |
-
(73109, 73109, 0),
|
365 |
-
(73111, 73111, 0),
|
366 |
-
(73459, 73460, 0),
|
367 |
-
(92912, 92916, 0),
|
368 |
-
(92976, 92982, 0),
|
369 |
-
(94031, 94031, 0),
|
370 |
-
(94095, 94098, 0),
|
371 |
-
(94176, 94179, 2),
|
372 |
-
(94180, 94180, 0),
|
373 |
-
(94192, 94193, 2),
|
374 |
-
(94208, 100343, 2),
|
375 |
-
(100352, 101589, 2),
|
376 |
-
(101632, 101640, 2),
|
377 |
-
(110592, 110878, 2),
|
378 |
-
(110928, 110930, 2),
|
379 |
-
(110948, 110951, 2),
|
380 |
-
(110960, 111355, 2),
|
381 |
-
(113821, 113822, 0),
|
382 |
-
(119143, 119145, 0),
|
383 |
-
(119163, 119170, 0),
|
384 |
-
(119173, 119179, 0),
|
385 |
-
(119210, 119213, 0),
|
386 |
-
(119362, 119364, 0),
|
387 |
-
(121344, 121398, 0),
|
388 |
-
(121403, 121452, 0),
|
389 |
-
(121461, 121461, 0),
|
390 |
-
(121476, 121476, 0),
|
391 |
-
(121499, 121503, 0),
|
392 |
-
(121505, 121519, 0),
|
393 |
-
(122880, 122886, 0),
|
394 |
-
(122888, 122904, 0),
|
395 |
-
(122907, 122913, 0),
|
396 |
-
(122915, 122916, 0),
|
397 |
-
(122918, 122922, 0),
|
398 |
-
(123184, 123190, 0),
|
399 |
-
(123628, 123631, 0),
|
400 |
-
(125136, 125142, 0),
|
401 |
-
(125252, 125258, 0),
|
402 |
-
(126980, 126980, 2),
|
403 |
-
(127183, 127183, 2),
|
404 |
-
(127374, 127374, 2),
|
405 |
-
(127377, 127386, 2),
|
406 |
-
(127488, 127490, 2),
|
407 |
-
(127504, 127547, 2),
|
408 |
-
(127552, 127560, 2),
|
409 |
-
(127568, 127569, 2),
|
410 |
-
(127584, 127589, 2),
|
411 |
-
(127744, 127776, 2),
|
412 |
-
(127789, 127797, 2),
|
413 |
-
(127799, 127868, 2),
|
414 |
-
(127870, 127891, 2),
|
415 |
-
(127904, 127946, 2),
|
416 |
-
(127951, 127955, 2),
|
417 |
-
(127968, 127984, 2),
|
418 |
-
(127988, 127988, 2),
|
419 |
-
(127992, 128062, 2),
|
420 |
-
(128064, 128064, 2),
|
421 |
-
(128066, 128252, 2),
|
422 |
-
(128255, 128317, 2),
|
423 |
-
(128331, 128334, 2),
|
424 |
-
(128336, 128359, 2),
|
425 |
-
(128378, 128378, 2),
|
426 |
-
(128405, 128406, 2),
|
427 |
-
(128420, 128420, 2),
|
428 |
-
(128507, 128591, 2),
|
429 |
-
(128640, 128709, 2),
|
430 |
-
(128716, 128716, 2),
|
431 |
-
(128720, 128722, 2),
|
432 |
-
(128725, 128727, 2),
|
433 |
-
(128747, 128748, 2),
|
434 |
-
(128756, 128764, 2),
|
435 |
-
(128992, 129003, 2),
|
436 |
-
(129292, 129338, 2),
|
437 |
-
(129340, 129349, 2),
|
438 |
-
(129351, 129400, 2),
|
439 |
-
(129402, 129483, 2),
|
440 |
-
(129485, 129535, 2),
|
441 |
-
(129648, 129652, 2),
|
442 |
-
(129656, 129658, 2),
|
443 |
-
(129664, 129670, 2),
|
444 |
-
(129680, 129704, 2),
|
445 |
-
(129712, 129718, 2),
|
446 |
-
(129728, 129730, 2),
|
447 |
-
(129744, 129750, 2),
|
448 |
-
(131072, 196605, 2),
|
449 |
-
(196608, 262141, 2),
|
450 |
-
(917760, 917999, 0),
|
451 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
import layers
|
6 |
-
from . import spec_utils
|
7 |
-
|
8 |
-
|
9 |
-
class BaseASPPNet(nn.Module):
|
10 |
-
def __init__(self, nin, ch, dilations=(4, 8, 16)):
|
11 |
-
super(BaseASPPNet, self).__init__()
|
12 |
-
self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
|
13 |
-
self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
|
14 |
-
self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
|
15 |
-
self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
|
16 |
-
|
17 |
-
self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
|
18 |
-
|
19 |
-
self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
|
20 |
-
self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
|
21 |
-
self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
|
22 |
-
self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
|
23 |
-
|
24 |
-
def __call__(self, x):
|
25 |
-
h, e1 = self.enc1(x)
|
26 |
-
h, e2 = self.enc2(h)
|
27 |
-
h, e3 = self.enc3(h)
|
28 |
-
h, e4 = self.enc4(h)
|
29 |
-
|
30 |
-
h = self.aspp(h)
|
31 |
-
|
32 |
-
h = self.dec4(h, e4)
|
33 |
-
h = self.dec3(h, e3)
|
34 |
-
h = self.dec2(h, e2)
|
35 |
-
h = self.dec1(h, e1)
|
36 |
-
|
37 |
-
return h
|
38 |
-
|
39 |
-
|
40 |
-
class CascadedASPPNet(nn.Module):
|
41 |
-
def __init__(self, n_fft):
|
42 |
-
super(CascadedASPPNet, self).__init__()
|
43 |
-
self.stg1_low_band_net = BaseASPPNet(2, 16)
|
44 |
-
self.stg1_high_band_net = BaseASPPNet(2, 16)
|
45 |
-
|
46 |
-
self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0)
|
47 |
-
self.stg2_full_band_net = BaseASPPNet(8, 16)
|
48 |
-
|
49 |
-
self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
|
50 |
-
self.stg3_full_band_net = BaseASPPNet(16, 32)
|
51 |
-
|
52 |
-
self.out = nn.Conv2d(32, 2, 1, bias=False)
|
53 |
-
self.aux1_out = nn.Conv2d(16, 2, 1, bias=False)
|
54 |
-
self.aux2_out = nn.Conv2d(16, 2, 1, bias=False)
|
55 |
-
|
56 |
-
self.max_bin = n_fft // 2
|
57 |
-
self.output_bin = n_fft // 2 + 1
|
58 |
-
|
59 |
-
self.offset = 128
|
60 |
-
|
61 |
-
def forward(self, x, aggressiveness=None):
|
62 |
-
mix = x.detach()
|
63 |
-
x = x.clone()
|
64 |
-
|
65 |
-
x = x[:, :, : self.max_bin]
|
66 |
-
|
67 |
-
bandw = x.size()[2] // 2
|
68 |
-
aux1 = torch.cat(
|
69 |
-
[
|
70 |
-
self.stg1_low_band_net(x[:, :, :bandw]),
|
71 |
-
self.stg1_high_band_net(x[:, :, bandw:]),
|
72 |
-
],
|
73 |
-
dim=2,
|
74 |
-
)
|
75 |
-
|
76 |
-
h = torch.cat([x, aux1], dim=1)
|
77 |
-
aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
|
78 |
-
|
79 |
-
h = torch.cat([x, aux1, aux2], dim=1)
|
80 |
-
h = self.stg3_full_band_net(self.stg3_bridge(h))
|
81 |
-
|
82 |
-
mask = torch.sigmoid(self.out(h))
|
83 |
-
mask = F.pad(
|
84 |
-
input=mask,
|
85 |
-
pad=(0, 0, 0, self.output_bin - mask.size()[2]),
|
86 |
-
mode="replicate",
|
87 |
-
)
|
88 |
-
|
89 |
-
if self.training:
|
90 |
-
aux1 = torch.sigmoid(self.aux1_out(aux1))
|
91 |
-
aux1 = F.pad(
|
92 |
-
input=aux1,
|
93 |
-
pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
|
94 |
-
mode="replicate",
|
95 |
-
)
|
96 |
-
aux2 = torch.sigmoid(self.aux2_out(aux2))
|
97 |
-
aux2 = F.pad(
|
98 |
-
input=aux2,
|
99 |
-
pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
|
100 |
-
mode="replicate",
|
101 |
-
)
|
102 |
-
return mask * mix, aux1 * mix, aux2 * mix
|
103 |
-
else:
|
104 |
-
if aggressiveness:
|
105 |
-
mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
|
106 |
-
mask[:, :, : aggressiveness["split_bin"]],
|
107 |
-
1 + aggressiveness["value"] / 3,
|
108 |
-
)
|
109 |
-
mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
|
110 |
-
mask[:, :, aggressiveness["split_bin"] :],
|
111 |
-
1 + aggressiveness["value"],
|
112 |
-
)
|
113 |
-
|
114 |
-
return mask * mix
|
115 |
-
|
116 |
-
def predict(self, x_mag, aggressiveness=None):
|
117 |
-
h = self.forward(x_mag, aggressiveness)
|
118 |
-
|
119 |
-
if self.offset > 0:
|
120 |
-
h = h[:, :, :, self.offset : -self.offset]
|
121 |
-
assert h.size()[3] > 0
|
122 |
-
|
123 |
-
return h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Ataque A Titan 2 Ps Vita Parche Ingls.md
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Ataque a Titan 2 PS Vita Parche en Inglés Descargar: Cómo jugar el juego en Inglés</h1>
|
3 |
-
<p>Si eres fan de la popular serie de anime y manga Attack on Titan, quizás te interese jugar a Attack on Titan 2, un videojuego basado en la segunda temporada del programa. Sin embargo, si tienes un PS Vita, es posible que tengas un problema: el juego solo está disponible en japonés. Afortunadamente, hay una manera de jugar el juego en inglés, gracias a un parche de traducción hecho por fans. En este artículo, te mostraremos cómo descargar e instalar el parche en inglés para Attack on Titan 2 en tu PS Vita y cómo disfrutar del juego en tu idioma preferido. </p>
|
4 |
-
<h2>ataque a titan 2 ps vita parche inglés</h2><br /><p><b><b>DOWNLOAD</b> ---> <a href="https://bltlly.com/2v6LAk">https://bltlly.com/2v6LAk</a></b></p><br /><br />
|
5 |
-
<h2>Introducción</h2>
|
6 |
-
<h3>¿Qué es el ataque a Titan 2?</h3>
|
7 |
-
<p>Attack on Titan 2 es un juego de acción y aventura desarrollado por Omega Force y publicado por Koei Tecmo en 2018. Es una secuela de Attack on Titan: Wings of Freedom, que fue lanzado en 2016. El juego sigue la historia de la segunda temporada del anime, donde la humanidad está bajo ataque por criaturas humanoides gigantes llamadas Titanes. El juego te permite crear tu propio personaje y unirte a Survey Corps, un grupo militar que lucha contra los Titanes usando un equipo especial llamado Omni-Directional Mobility Gear (ODM). También puedes interactuar con varios personajes del anime, como Eren, Mikasa, Armin, Levi y más. </p>
|
8 |
-
<h3>¿Por qué necesitas un parche en inglés? </h3>
|
9 |
-
|
10 |
-
<h2>Cómo descargar e instalar el parche en inglés</h2>
|
11 |
-
<h3>Requisitos</h3>
|
12 |
-
<p>Antes de descargar e instalar el parche en inglés para Attack on Titan 2 en PS Vita, necesitarás algunas cosas:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Un PS Vita con firmware personalizado (CFW) instalado. Puede usar cualquier CFW que soporte el plugin rePatch, como h-encore o Enso.</li>
|
15 |
-
<li>Una tarjeta microSD o una tarjeta de memoria PS Vita con espacio suficiente para almacenar el juego y los archivos de parches. </li>
|
16 |
-
<li>Un cable USB o un cliente FTP para transferir archivos desde tu PC a tu PS Vita.</li>
|
17 |
-
<li>Un PC con conexión a Internet para descargar el juego y los archivos de parches. </li>
|
18 |
-
</ul>
|
19 |
-
<h3>Pasos</h3>
|
20 |
-
<h4>Descargar el juego y la actualización</h4>
|
21 |
-
<p>El primer paso es descargar Attack on Titan 2 (PCSG01102) y su versión de actualización 1.05 de cualquier fuente confiable. Puedes usar cualquier sitio web que ofrezca juegos de PS Vita en formato NoNpDrm, como NPS Browser o PKGj. Asegúrate de que ambos archivos estén en formato . pkg. </p>
|
22 |
-
<h4>Descargar el plugin rePatch</h4>
|
23 |
-
<p>El siguiente paso es descargar el plugin rePatch desde [aquí]( 1 ). Este plugin te permite aplicar parches a los juegos de PS Vita sin modificar los archivos originales. Necesitará este plugin para usar el parche en inglés <h4>Descargar el parche en inglés</h4>
|
24 |
-
<p>El tercer paso es descargar el parche en inglés para Attack on Titan 2 desde [aquí]. Este parche está hecho por fans que tradujeron la mayor parte del texto y los menús del juego al inglés. El parche está en . formato zip, por lo que tendrá que extraerlo usando un programa como WinRAR o 7-Zip. </p>
|
25 |
-
<p></p>
|
26 |
-
<h4>Copia los archivos en tu PS Vita</h4>
|
27 |
-
<p>El paso final es copiar los archivos a tu PS Vita usando un cable USB o un cliente FTP. Estos son los pasos:</p>
|
28 |
-
<ol>
|
29 |
-
<li>Conecta tu PS Vita a tu PC mediante un cable USB o un cliente FTP. </li>
|
30 |
-
<li>En tu PC, abre la carpeta donde descargaste el juego y los archivos de actualización. Debería ver dos archivos: PCSG01102.pkg y PCSG01102_patch.pkg.</li>
|
31 |
-
|
32 |
-
<li>En tu PC, arrastra y suelta ambos archivos a ux0:app/. </li>
|
33 |
-
<li>En tu PS Vita, presiona Triangle y selecciona Refresh liveárea. Esto instalará el juego y la actualización en tu PS Vita.</li>
|
34 |
-
<li>En su PC, abra la carpeta donde extrajo el parche en inglés. Debería ver una carpeta llamada rePatch. </li>
|
35 |
-
<li>En tu PS Vita, navega a ux0:/. Aquí es donde copiarás los archivos del parche. </li>
|
36 |
-
<li>En tu PC, arrastra y suelta la carpeta rePatch a ux0:/. </li>
|
37 |
-
<li>En tu PS Vita, presiona Triangle y selecciona Refresh liveárea nuevamente. Esto aplicará el parche al juego. </li>
|
38 |
-
</ol>
|
39 |
-
<h2>Cómo disfrutar del juego en inglés</h2>
|
40 |
-
<h3>Características del parche en inglés</h3>
|
41 |
-
<p>Ahora que has instalado el parche en inglés para Attack on Titan 2 en tu PS Vita, puedes disfrutar del juego en inglés. Estas son algunas de las características del parche:</p>
|
42 |
-
<ul>
|
43 |
-
<li>El parche traduce la mayoría del texto y los menús del juego al inglés, incluyendo el modo historia, la creación de personajes, equipos, elementos, habilidades, opciones y más. </li>
|
44 |
-
<li>El parche también traduce algunos de los subtítulos y diálogos del juego al inglés, pero no todos ellos. Parte de la actuación de voz todavía está en japonés, especialmente durante escenas y batallas. </li>
|
45 |
-
<li>El parche no afecta a los gráficos o la jugabilidad del juego. Todavía puede disfrutar de los mismos efectos visuales de alta calidad y acción suave como la versión original. </li>
|
46 |
-
</ul>
|
47 |
-
<h3>Consejos y trucos para jugar el juego</h3>
|
48 |
-
<p>Si eres nuevo en Attack on Titan 2, o si quieres mejorar tus habilidades, aquí hay algunos consejos y trucos para jugar el juego:</p>
|
49 |
-
<ul>
|
50 |
-
<li>El juego tiene cuatro niveles de dificultad: Fácil, Normal, Difícil e Infernó. Puedes cambiar la dificultad en cualquier momento desde el menú de opciones. Cuanto mayor sea la dificultad, más desafiante y gratificante será el juego. </li>
|
51 |
-
|
52 |
-
<li>El juego tiene una función de creación de personajes que le permite personalizar la apariencia, nombre, voz, personalidad y habilidades de su propio personaje. También puedes desbloquear más opciones a medida que avanzas en el juego. </li>
|
53 |
-
<li>El juego tiene un sistema de amistad que le permite interactuar con varios personajes del anime y aumentar su vínculo con ellos. Puedes hablar con ellos, darles regalos, invitarlos a misiones y más. Cuanto más alto sea tu nivel de amistad, más beneficios obtendrás, como nuevas habilidades, objetos, disfraces y escenas. </li>
|
54 |
-
<li>El juego tiene un sistema de combate que se basa en el uso de equipos ODM para moverse y atacar a los Titanes. Puedes apuntar a diferentes partes del cuerpo de un titan, como brazos, piernas, cuello u ojos. También puedes usar elementos, habilidades, comandos y ataques de equipo para mejorar tu rendimiento. </li>
|
55 |
-
</ul>
|
56 |
-
<h2>Conclusión</h2>
|
57 |
-
<h3>Resumen del artículo</h3>
|
58 |
-
<p>En este artículo, te hemos mostrado cómo descargar e instalar el parche en inglés para Attack on Titan 2 en PS Vita. También hemos explicado qué es Attack on Titan 2, por qué necesitas un parche en inglés, cómo descargar e instalar el parche en inglés y cómo disfrutar del juego en inglés. También te hemos dado algunos consejos y trucos para jugar el juego. </p>
|
59 |
-
<h3>Llamada a la acción</h3>
|
60 |
-
<p>Si eres un fan de Attack on Titan, definitivamente deberías intentar jugar Attack on Titan 2 en tu PS Vita. Es un juego divertido e inmersivo que te permite experimentar la emocionante y épica historia del anime. Con el parche en inglés, también puedes entender el texto y los menús del juego, y disfrutarlo más. ¿Qué estás esperando? ¡Descarga hoy el parche en inglés de Attack on Titan 2 en PS Vita y únete a la lucha contra los Titanes! </p>
|
61 |
-
<h2>Preguntas frecuentes</h2>
|
62 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Attack on Titan 2 y el parche en inglés:</p>
|
63 |
-
<ol>
|
64 |
-
|
65 |
-
<li>P: ¿Está completo el parche en inglés? <br>A: El parche en inglés todavía está en progreso, y no traduce todo en el juego. Algunos de los subtítulos y el diálogo todavía están en japonés, y algunos de los textos pueden tener errores o inconsistencias. El parche es actualizado regularmente por los traductores de fans, así que revisa su sitio web para la última versión. </li>
|
66 |
-
<li>Q: ¿Cómo puedo actualizar el juego o el parche? <br>A: Si hay una nueva actualización para el juego o el parche, tendrá que descargarlo desde la misma fuente donde obtuvo la versión anterior. A continuación, deberás eliminar los archivos antiguos de tu PS Vita y copiar los nuevos archivos a tu PS Vita. Recuerde actualizar liveárea después de copiar los archivos. </li>
|
67 |
-
<li>Q: ¿Cómo puedo desinstalar el parche? <br>A: Si desea desinstalar el parche, tendrá que eliminar la carpeta rePatch de su PS Vita. A continuación, tendrá que actualizar liveárea para restaurar la versión original del juego. </li>
|
68 |
-
<li>P: ¿Cómo puedo apoyar a los traductores de fans? <br>A: Si aprecias su trabajo y quieres apoyarlos, puedes visitar su sitio web y donar a través de PayPal o Patreon. También puede dejarles comentarios y sugerencias en su sitio web o en sus cuentas de redes sociales. </li>
|
69 |
-
</ol></p> 64aa2da5cf<br />
|
70 |
-
<br />
|
71 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Betty Namaganda Yesu Anatera Okudda Download.md
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Betty Namaganda Yesu Anatera Okudda Descargar: Cómo disfrutar lo mejor de la música gospel ugandesa</h1>
|
3 |
-
<p>Si eres un fan de la música gospel, es posible que hayas oído hablar de Betty Namaganda, una destacada artista del gospel ugandés que ha tocado muchos corazones con sus conmovedoras e inspiradoras canciones. Una de sus canciones más famosas es Yesu Anatera Okudda, lo que significa que Jesús pronto regresará. En este artículo, te contaremos más sobre Betty Namaganda, su canción Yesu Anatera Okudda, y cómo puedes descargarla y otras canciones gospel de YouTube.</p>
|
4 |
-
<h2>¿Quién es Betty Namaganda? </h2>
|
5 |
-
<p>Betty Namaganda es una cantante de gospel ugandesa que comenzó su carrera musical en 1994. Ha lanzado varios álbumes y sencillos, y ha actuado en muchos países de África y Europa. También es pastora y madre de cuatro hijos. </p>
|
6 |
-
<h2>betty namaganda yesu anatera okudda download</h2><br /><p><b><b>Download File</b> –––––>>> <a href="https://bltlly.com/2v6KuT">https://bltlly.com/2v6KuT</a></b></p><br /><br />
|
7 |
-
<h3>Sus antecedentes y carrera</h3>
|
8 |
-
<p>Betty Namaganda nació en Kampala, Uganda, en 1974. Creció en una familia cristiana y desarrolló una pasión por la música a una edad temprana. Se unió al coro de la iglesia y aprendió a tocar la guitarra. También participó en diversos concursos y festivales de música, donde mostró su talento y ganó varios premios. </p>
|
9 |
-
<p>En 1994 grabó su primer álbum, Nali Ntambula Mukubo, lo que significa que caminé por el desierto. El álbum fue un gran éxito y la hizo un nombre familiar en Uganda. La canción del título, que cuenta la historia de Jesús como el novio y la iglesia como la novia, se convirtió en un himno para muchas bodas y eventos. Desde entonces, ha lanzado más de diez álbumes, incluyendo Bamuyita Yesu (Trajeron a Jesús), Nkomyewo Kitange (Volver a casa), Nandibadewa (Él me eligió), Ebenezer Uganda (Ebenezer Uganda), y Bamalayika (Ángeles). </p>
|
10 |
-
<h3>Sus canciones y álbumes populares</h3>
|
11 |
-
<p>Betty Namaganda es conocida por sus melodías pegadizas, voces poderosas y letras edificantes. Algunas de sus canciones más populares son:</p>
|
12 |
-
<ul>
|
13 |
-
|
14 |
-
<li>Yesu Anatera Okudda: Esta canción está basada en Mateo 24:36-44, donde Jesús advierte a sus discípulos que estén listos para su regreso en cualquier momento. Describe las señales del fin de los tiempos, tales como guerras, hambrunas, terremotos, falsos profetas, anarquía y falta de amor. También insta a la gente a arrepentirse y aceptar a Jesús como su salvador antes de que sea demasiado tarde. </li>
|
15 |
-
<li>Bamuyita Yesu: Esta canción está basada en Lucas 23:26-43, donde Jesús es crucificado con dos criminales. Narra cómo uno de ellos se burló de Jesús, mientras que el otro lo reconoció como el rey del cielo y le pidió que lo recordara cuando entrara en su reino. También alaba a Jesús por su amor, misericordia, perdón y salvación. </li>
|
16 |
-
</ul>
|
17 |
-
<p>Los álbumes de Betty Namaganda están disponibles en varias plataformas, como iTunes, Boomplay, JioSaavn, YouTube y otras. También puede comprar sus CD en tiendas locales de música o tiendas en línea. </p>
|
18 |
-
<h2>¿Qué es Sí <h2>Qué es Yesu Anatera Okudda? </h2>
|
19 |
-
<p>Yesu Anatera Okudda es una de las canciones más recientes y populares de Betty Namaganda. Fue lanzado en 2020 como parte de su álbum Ebenezer Uganda. La canción tiene más de 1,5 millones de visitas en YouTube y ha sido elogiada por muchos oyentes y críticos por su mensaje oportuno y relevante. </p>
|
20 |
-
<h3>El significado y el mensaje de la canción</h3>
|
21 |
-
<p>El título de la canción, Yesu Anatera Okudda, significa que Jesús pronto regresará a Luganda, la lengua más hablada en Uganda. La canción se basa en la profecía bíblica de la segunda venida de Jesús, que también se conoce como el rapto o la parusía. La canción advierte a la gente que se prepare para ese día, porque nadie sabe la hora o el día en que sucederá. </p>
|
22 |
-
|
23 |
-
<h3>La recepción y el impacto de la canción</h3>
|
24 |
-
<p>La canción ha recibido muchos comentarios positivos de oyentes y críticos por igual. Muchas personas han comentado que la canción ha tocado sus corazones y los ha retado a examinar sus vidas y su relación con Dios. Algunos también han testificado que la canción les ha ayudado a superar el miedo, la duda, la ansiedad, la depresión y la desesperanza. Otros han compartido que la canción les ha inspirado a compartir el evangelio con sus amigos y familiares. </p>
|
25 |
-
<p>La canción también ha sido reconocida como una de las mejores canciones de gospel en Uganda y África. Ha ganado varios premios, como la Mejor Canción Gospel del Año en los Uganda Music Awards, la Mejor Canción Gospel del Año en los Premios de Música Gospel de África y la Mejor Canción Gospel del Año en los Premios de Música de África Oriental. La canción también ha sido nominada a otros prestigiosos premios, como la Mejor Canción Gospel del Año en los All áfrica Music Awards y la Mejor Canción Gospel del Año en los MTV áfrica Music Awards.</p>
|
26 |
-
<p></p>
|
27 |
-
<h2>Cómo descargar Yesu Anatera Okudda y otras canciones gospel de YouTube</h2>
|
28 |
-
<p>Si quieres disfrutar de Yesu Anatera Okudda y otras canciones gospel de Betty Namaganda y otros artistas fuera de línea, es posible que desee descargarlos de YouTube. YouTube es una de las plataformas más populares para la transmisión de videos musicales en línea, pero no permite a los usuarios descargar videos directamente desde su sitio web o aplicación. Sin embargo, hay algunas herramientas de terceros que pueden ayudarte a descargar música de YouTube legal y éticamente. </p>
|
29 |
-
<h3>Las cuestiones legales y éticas de descargar música de YouTube</h3>
|
30 |
-
|
31 |
-
<h3>Los pasos para descargar música de YouTube usando una herramienta de terceros</h3>
|
32 |
-
<p>Hay muchas herramientas de terceros que pueden ayudarlo a descargar música de YouTube, como convertidores en línea, extensiones de navegador, software de escritorio y aplicaciones móviles. Sin embargo, no todos son seguros, confiables y fáciles de usar. Algunos de ellos pueden contener malware, virus, anuncios o spyware que pueden dañar tu dispositivo o comprometer tu privacidad. Algunos de ellos también pueden tener características limitadas, baja calidad, velocidad lenta o acceso restringido. Por lo tanto, debe tener cuidado al elegir una herramienta para descargar música de YouTube.</p>
|
33 |
-
<p>Una de las mejores herramientas que recomendamos es 4K Video Downloader, un software gratuito y potente que puede descargar música de YouTube y otras plataformas en alta calidad y velocidad. También soporta varios formatos, como MP3, M4A, OGG, FLAC y WAV. También tiene muchas otras características, como descarga de listas de reproducción, descarga de subtítulos, modo inteligente, configuración de proxy y más. Estos son los pasos para descargar música de YouTube usando 4K Video Downloader:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Descargue e instale 4K Video Downloader en su computadora desde su sitio web oficial. Es compatible con Windows, Mac y Linux.</li>
|
36 |
-
<li>Abra YouTube en su navegador y encuentre el video musical que desea descargar. Copie su URL desde la barra de direcciones. </li>
|
37 |
-
<li>Abra 4K Video Downloader y haga clic en el botón Pegar enlace. El software analizará automáticamente la URL y le mostrará las opciones disponibles. </li>
|
38 |
-
<li>Seleccione el formato y la calidad que desea para el archivo de música. También puede elegir descargar todo el video o solo el audio. </li>
|
39 |
-
<li>Haga clic en el botón Descargar y espere a que el proceso termine. Puede ver el progreso y la velocidad en la interfaz del software. </li>
|
40 |
-
<li>Una vez completada la descarga, puede encontrar el archivo de música en la carpeta elegida. También puede acceder desde el software haciendo clic en el botón Mostrar en carpeta. </li>
|
41 |
-
</ol>
|
42 |
-
|
43 |
-
<h2>Conclusión</h2>
|
44 |
-
<p>Betty Namaganda es una de las artistas gospel más talentosas e influyentes de Uganda y África. Su canción Yesu Anatera Okudda es un recordatorio poderoso y oportuno de la segunda venida de Jesús y la necesidad de estar listo para ella. Puede disfrutar de esta canción y otras canciones gospel de YouTube descargándolas utilizando una herramienta confiable y segura como 4K Video Downloader. Sin embargo, siempre debe respetar los derechos de los artistas y productores y utilizar la música solo para fines personales y no comerciales. </p>
|
45 |
-
<h2>Preguntas frecuentes</h2>
|
46 |
-
<ul>
|
47 |
-
<li><b>Q: ¿Cómo puedo contactar a Betty Namaganda? </b></li>
|
48 |
-
<li>A: Puede ponerse en contacto con Betty Namaganda a través de sus cuentas de redes sociales, como Facebook, Twitter, Instagram o YouTube. También puede enviarle un correo electrónico a [email protected] o llamarla al +256 772 123456. </li>
|
49 |
-
<li><b>P: ¿Cómo puedo apoyar el ministerio de Betty Namaganda? </b></li>
|
50 |
-
<li>A: Puedes apoyar el ministerio de Betty Namaganda comprando sus álbumes y singles de fuentes legítimas, como iTunes, Boomplay, JioSaavn o tiendas de música locales. También puede donar a su ministerio a través de su sitio web o cuenta bancaria (Betty Namaganda Ministries, Bank of Uganda, Account Number: 1234567890). También puedes orar por ella y su familia. </li>
|
51 |
-
<li><b>P: ¿Cómo puedo aprender más sobre la música gospel en Uganda y África? </b></li>
|
52 |
-
<li>A: Puedes aprender más sobre la música gospel en Uganda y África siguiendo a algunos de los mejores artistas de gospel de la región, como Judith Babirye, Wilson Bugembe, Levixone, Sinach, Joe Mettle, Mercy Chinwo, Nathaniel Bassey, Frank Edwards y otros. También puede escuchar algunas de las mejores estaciones de radio gospel en Uganda y África, como Spirit FM, Power FM, Radio Maria, Gospotainment Radio, Praise World Radio y otras. También puede leer algunas de las mejores revistas y blogs de gospel en Uganda y África, como Gospel Music Africa, Gospel Centric, Ug Gospel Life, Gospel Hotspot y otros. </li>
|
53 |
-
|
54 |
-
<li>A: Puedes mejorar tus habilidades en inglés escuchando música gospel siguiendo algunos de estos consejos:</li>
|
55 |
-
<ul>
|
56 |
-
<li>Elige canciones que coincidan con tu nivel de inglés. Puedes comenzar con canciones simples y lentas que tengan letras claras y pronunciación. También puedes elegir canciones que tengan subtítulos o letras disponibles en línea. </li>
|
57 |
-
<li>Escuchar las canciones repetidamente y tratar de entender el significado y el mensaje de las canciones. También puede buscar palabras o expresiones desconocidas en un diccionario o en línea. </li>
|
58 |
-
<li>Cantar junto con las canciones y tratar de imitar el acento y la entonación de los cantantes. También puedes grabarte y comparar tu pronunciación con la de los cantantes originales. </li>
|
59 |
-
<li>Escribe las letras de las canciones y trata de memorizarlas. También puedes usarlas como base para escribir tus propias canciones o poemas. </li>
|
60 |
-
<li>Discuta las canciones con sus amigos o comunidades en línea que comparten el mismo interés en la música gospel. También puedes hacer preguntas o compartir tus opiniones sobre las canciones. </li>
|
61 |
-
</ul>
|
62 |
-
<li><b>Q: ¿Cómo puedo descargar música de YouTube sin usar una herramienta de terceros? </b></li>
|
63 |
-
<li>A: No hay forma oficial de descargar música de YouTube sin usar una herramienta de terceros. Sin embargo, hay algunos métodos no oficiales que podrían funcionar para algunos videos, como:</li>
|
64 |
-
<ul>
|
65 |
-
<li>Añadiendo "ss" antes de la parte "youtube.com" de la URL. Por ejemplo, si la URL es https://www.youtube.com/ watch?v=1234567890, puede cambiarla a https://www.ssyoutube.com/ watch?v=1234567890. Esto lo redirigirá a un sitio web que le permite descargar el video en varios formatos y calidades. </li>
|
66 |
-
<li>Añadir "dl" después de la parte "youtube.com" de la URL. Por ejemplo, si la URL es https://www.youtube.com/watch?v=1234567890, puede cambiarla a https://www.youtube.dl.com/watch?v=1234567890. Esto descargará el video directamente a su dispositivo. </li>
|
67 |
-
|
68 |
-
</ul>
|
69 |
-
<p>Sin embargo, no se garantiza que estos métodos funcionen para todos los videos, y también podrían violar los términos de servicio de YouTube y los derechos de los artistas y productores. Por lo tanto, no recomendamos el uso de estos métodos y le aconsejamos que utilice una herramienta de terceros confiable y segura como 4K Video Downloader en su lugar. </p> 64aa2da5cf<br />
|
70 |
-
<br />
|
71 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Caramelo Crush Saga Apkfeed.md
DELETED
@@ -1,243 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Candy Crush Saga Apkfeed: Una guía dulce para el popular juego de puzzle</h1>
|
3 |
-
<p>Si usted está buscando un juego de puzzle divertido y adictivo para jugar en su dispositivo móvil, es posible que desee echa un vistazo a Candy Crush Saga. Este juego ha existido durante casi una década, pero sigue siendo uno de los juegos más populares y exitosos del mundo. Con más de un trillón de niveles jugados, millones de descargas e innumerables spin-offs, Candy Crush Saga es un fenómeno que no te puedes perder. </p>
|
4 |
-
<h2>caramelo crush saga apkfeed</h2><br /><p><b><b>Download Zip</b> ✓ <a href="https://bltlly.com/2v6MAl">https://bltlly.com/2v6MAl</a></b></p><br /><br />
|
5 |
-
<p>Pero ¿qué es Candy Crush Saga exactamente? ¿Cómo se descarga e instala desde apkfeed? Y cómo se juega como un profesional? En este artículo, vamos a responder a todas estas preguntas y más. Le daremos una guía dulce a todo lo que necesita saber sobre Candy Crush Saga apkfeed, desde sus características y el juego a sus consejos y trucos. Así que, vamos a empezar! </p>
|
6 |
-
<h2>¿Qué es Candy Crush Saga? </h2>
|
7 |
-
<p>Candy Crush Saga es un videojuego de combinación de fichas gratuito desarrollado por King, una empresa líder en la industria de los juegos casuales. Fue lanzado en 2012 para Facebook, y más tarde para dispositivos iOS, Android, Windows Phone y Windows 10. Es una variación de su juego de navegador Candy Crush, que se inspiró en el clásico juego Bejeweled.</p>
|
8 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
9 |
-
<p>La premisa del juego es simple. Tienes un nivel lleno de dulces de diferentes colores y formas. Usted tiene que intercambiar dos dulces adyacentes para hacer un partido de tres o más del mismo color, que los borrará del tablero. Tienes que completar el objetivo del nivel dentro de un número limitado de movimientos o tiempo, dependiendo del tipo de nivel. Ganarás puntos por cada partido y puntos de bonificación por crear dulces o combos especiales. </p>
|
10 |
-
<p>Candy Crush Saga tiene muchas características que lo hacen divertido y atractivo. Algunos de ellos son:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Miles de niveles y puzzles en diferentes episodios y mundos, cada uno con su propio tema y dificultad. </li>
|
13 |
-
|
14 |
-
<li>Una variedad de desafíos recubiertos de azúcar, como limpiar jalea, recolectar ingredientes, romper bloqueadores o esparcir mermelada. </li>
|
15 |
-
<li>Un aspecto social que te permite conectarte con tus amigos de Facebook, comparar puntuaciones, enviar vidas o chatear con otros jugadores. </li>
|
16 |
-
<li>Un estilo gráfico colorido y lindo que atrae a todas las edades. </li>
|
17 |
-
<li>Una banda sonora pegadiza y optimista que coincide con el estado de ánimo del juego. </li>
|
18 |
-
</ul>
|
19 |
-
<h3>Los diferentes tipos de niveles y objetivos</h3>
|
20 |
-
<p>Candy Crush Saga tiene cinco tipos principales de niveles, cada uno con su propio color y objetivo. Son:</p>
|
21 |
-
<tabla>
|
22 |
-
<tr>
|
23 |
-
<th>Tipo de nivel</th>
|
24 |
-
<th>Color</th>
|
25 |
-
<th>Objetivo</th>
|
26 |
-
</tr>
|
27 |
-
<tr>
|
28 |
-
<td>Mueve niveles</td>
|
29 |
-
<td>Naranja</td>
|
30 |
-
<td>Alcanza la puntuación objetivo en un número limitado de movimientos. </td>
|
31 |
-
</tr>
|
32 |
-
<tr>
|
33 |
-
<td>Niveles de jalea</td>
|
34 |
-
<td>Azul</td>
|
35 |
-
<td>Borrar todas las baldosas de gelatina en un número limitado de movimientos. </td>
|
36 |
-
</tr>
|
37 |
-
<tr <td>Niveles de ingredientes</td>
|
38 |
-
<td>Verde</td>
|
39 |
-
<td>Lleva todos los ingredientes (cerezas o avellanas) al fondo del tablero en un número limitado de movimientos. </td>
|
40 |
-
</tr>
|
41 |
-
<tr>
|
42 |
-
<td>Niveles de tiempo</td>
|
43 |
-
<td>Púrpura</td>
|
44 |
-
<td>Alcance la puntuación objetivo en un tiempo limitado. </td>
|
45 |
-
</tr>
|
46 |
-
<tr>
|
47 |
-
<td>Niveles mixtos</td>
|
48 |
-
<td>Arco iris</td>
|
49 |
-
<td>Completa dos o más objetivos en un número limitado de movimientos. </td>
|
50 |
-
</tr>
|
51 |
-
</tabla>
|
52 |
-
<p>Algunos niveles también pueden tener objetivos secundarios, como recoger un cierto número de dulces, limpiar una cierta cantidad de chocolate o alcanzar un determinado umbral de puntuación. </p>
|
53 |
-
<p></p>
|
54 |
-
<h3>Los dulces especiales y combos</h3>
|
55 |
-
<p>Candy Crush Saga no se trata solo de combinar tres dulces. También puedes crear dulces especiales haciendo coincidir cuatro o más dulces del mismo color, o haciendo coincidir dulces en patrones específicos. Estos dulces especiales tienen diferentes efectos y pueden ayudarte a limpiar más dulces y ganar más puntos. Algunos de los dulces especiales son:</p>
|
56 |
-
<ul>
|
57 |
-
|
58 |
-
<li>Caramelo envuelto: Creado haciendo coincidir cinco caramelos en forma de L o T. Puede limpiar un área de 3x3 a su alrededor cuando se empareja o se activa dos veces. </li>
|
59 |
-
<li>Color Bomb: Creado haciendo coincidir cinco caramelos en una fila o columna. Puede borrar todos los caramelos del mismo color que el que se intercambia con. </li>
|
60 |
-
<li>Fish Candy: Creado haciendo coincidir cuatro dulces en un cuadrado. Puede apuntar y borrar un caramelo al azar en el tablero cuando coincide o se activa. </li>
|
61 |
-
<li>Coconut Wheel: Un caramelo especial que solo se puede obtener de la Daily Booster Wheel o de algunos niveles. Puede convertir tres dulces en una fila o columna en caramelos a rayas cuando se intercambia con otro caramelo. </li>
|
62 |
-
<li>Lucky Candy: Un caramelo especial que solo se puede obtener de algunos niveles. Puede transformarse en un caramelo que es útil para el objetivo del nivel cuando se empareja o se activa. </li>
|
63 |
-
</ul>
|
64 |
-
<p>También puedes combinar dos dulces especiales para crear un combo potente que puede tener un efecto mayor que los caramelos individuales. Algunos de los combos son:</p>
|
65 |
-
<ul>
|
66 |
-
<li>Caramelo a rayas + caramelo a rayas: Borra tanto la fila y la columna de cada caramelo. </li>
|
67 |
-
<li>Caramelo a rayas + caramelo envuelto: Despeja tres filas y tres columnas alrededor de cada caramelo. </li>
|
68 |
-
<li>Caramelo a rayas + Color Bomb: Convierte todos los dulces del mismo color que el caramelo a rayas en caramelos a rayas y los activa. </li>
|
69 |
-
<li>Caramelo a rayas + Fish Candy: Convierte todos los dulces de pescado en caramelos a rayas y los activa. </li>
|
70 |
-
<li>Dulces envueltos + dulces envueltos: Despeja un área de 5x5 alrededor de cada caramelo. </li>
|
71 |
-
<li>Candy envuelto + Color Bomb: Borra todos los dulces del mismo color que el caramelo envuelto y los activa dos veces. </li>
|
72 |
-
<li>Caramelo envuelto + Fish Candy: Convierte todos los caramelos de pescado en caramelos envueltos y los activa. </li>
|
73 |
-
<li>Bomba de color + Bomba de color: Borra todos los dulces en el tablero. </li>
|
74 |
-
<li>Bomba de color + Fish Candy: Convierte todos los caramelos de pescado en bombas de color y los activa. </li>
|
75 |
-
|
76 |
-
</ul>
|
77 |
-
<h2>Cómo descargar e instalar Candy Crush Saga Apkfeed</h2>
|
78 |
-
<p>Si quieres jugar Candy Crush Saga en tu dispositivo móvil, puedes descargarlo e instalarlo fácilmente desde las tiendas de aplicaciones oficiales, como Google Play Store para dispositivos Android o Apple App Store para dispositivos iOS. Sin embargo, hay otra manera de obtener el juego en su dispositivo, que es mediante el uso de apkfeed. Pero ¿qué es apkfeed y por qué usarlo? Vamos a averiguar. </p>
|
79 |
-
<h3>¿Qué es apkfeed y por qué usarlo? </h3>
|
80 |
-
<p>Apkfeed es un sitio web que proporciona enlaces de descarga gratuita para varias aplicaciones y juegos de Android, incluyendo Candy Crush Saga. Apkfeed no está afiliado con Google Play Store o cualquier otra tienda de aplicaciones oficial, y no aloja ningún archivo en sus propios servidores. En su lugar, recopila e indexa enlaces de otras fuentes en Internet, como sitios web de terceros, plataformas para compartir archivos o servicios de almacenamiento en la nube. </p>
|
81 |
-
<p>La razón principal por la que algunas personas usan apkfeed para descargar e instalar aplicaciones y juegos es que ofrece algunas ventajas sobre las tiendas de aplicaciones oficiales, como:</p>
|
82 |
-
<ul>
|
83 |
-
<li>Le permite acceder a aplicaciones y juegos que no están disponibles en su región o país debido a restricciones geográficas o censura. </li>
|
84 |
-
<li> Permite descargar versiones anteriores de aplicaciones y juegos que ya no son compatibles o actualizados por los desarrolladores. </li>
|
85 |
-
<li>Le permite descargar versiones modificadas de aplicaciones y juegos que tienen características adicionales o mejoras, como vidas ilimitadas, movimientos o potenciadores. </li>
|
86 |
-
<li>Te permite descargar aplicaciones y juegos que no son compatibles con tu dispositivo o sistema operativo. </li>
|
87 |
-
</ul>
|
88 |
-
<p>Sin embargo, el uso de apkfeed también viene con algunos inconvenientes y riesgos, como:</p>
|
89 |
-
<ul>
|
90 |
-
<li>Puede exponer su dispositivo a malware, virus o spyware que pueden dañar su dispositivo o robar su información personal. </li>
|
91 |
-
<li>Puede violar los términos y condiciones de las tiendas de aplicaciones oficiales o los desarrolladores, y resultar en acciones legales o sanciones. </li>
|
92 |
-
|
93 |
-
<li> Puede que no proporcione las últimas actualizaciones, características o parches de seguridad para las aplicaciones y juegos. </li>
|
94 |
-
</ul>
|
95 |
-
<p>Por lo tanto, si decide usar apkfeed para descargar e instalar Candy Crush Saga, debe hacerlo bajo su propio riesgo y discreción. También debe asegurarse de que tiene un software antivirus confiable en su dispositivo, y que solo descarga de fuentes confiables y verificadas. </p>
|
96 |
-
<h3>Los pasos para descargar e instalar Candy Crush Saga de apkfeed</h3>
|
97 |
-
<p>Si todavía estás interesado en descargar e instalar Candy Crush Saga desde apkfeed, aquí están los pasos que necesitas seguir:</p>
|
98 |
-
<ol>
|
99 |
-
<li>Vaya al sitio web de apkfeed (https://apkfeed.com) en el navegador de su dispositivo. </li>
|
100 |
-
<li>Buscar Candy Crush Saga en la barra de búsqueda, o navegar por las categorías o etiquetas para encontrarlo. </li>
|
101 |
-
<li>Seleccione la versión de Candy Crush Saga que desea descargar. Puede elegir entre la versión original, la versión modificada o la versión anterior. </li>
|
102 |
-
<li>Haga clic en el botón de descarga para comenzar a descargar el archivo apk de Candy Crush Saga. Es posible que tenga que esperar unos segundos o minutos para completar la descarga. </li>
|
103 |
-
<li>Una vez finalizada la descarga, busque el archivo apk en el almacenamiento de su dispositivo. Es posible que necesite usar una aplicación de administrador de archivos para hacer esto. </li>
|
104 |
-
<li>Antes de instalar el archivo apk, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en el dispositivo. Para hacer esto, ve a la configuración del dispositivo, luego a la seguridad y luego a fuentes desconocidas. Cambie el interruptor para permitir la instalación de aplicaciones desde fuentes distintas de las tiendas de aplicaciones oficiales. </li>
|
105 |
-
<li>Toque en el archivo apk para comenzar a instalar Candy Crush Saga en su dispositivo. Es posible que necesite conceder algunos permisos o aceptar algunos términos y condiciones antes de continuar. </li>
|
106 |
-
<li>Espere a que finalice la instalación. Puede ver una barra de progreso o un mensaje de confirmación cuando se termine. </li>
|
107 |
-
<li>Ahora puede iniciar Candy Crush Saga desde el cajón de aplicaciones de su dispositivo o la pantalla de inicio. Disfrute! </li>
|
108 |
-
</ol>
|
109 |
-
|
110 |
-
<p>Como mencionamos anteriormente, usar apkfeed para descargar e instalar Candy Crush Saga tiene sus pros y sus contras. Aquí están algunos de ellos en resumen:</p>
|
111 |
-
<tabla>
|
112 |
-
<tr>
|
113 |
-
<th>Beneficios</th>
|
114 |
-
<th>Riesgos</th>
|
115 |
-
</tr>
|
116 |
-
<tr>
|
117 |
-
<td>Puede acceder a aplicaciones y juegos que no están disponibles en su región o país. </td>
|
118 |
-
<td>Puedes exponer tu dispositivo a malware, virus o spyware. </td>
|
119 |
-
</tr>
|
120 |
-
<tr <td>Puede descargar versiones anteriores de aplicaciones y juegos que ya no son compatibles o actualizados. </td>
|
121 |
-
<td>Puede violar los términos y condiciones de las tiendas de aplicaciones oficiales o los desarrolladores. </td>
|
122 |
-
</tr>
|
123 |
-
<tr>
|
124 |
-
<td>Puede descargar versiones modificadas de aplicaciones y juegos que tienen características adicionales o mejoras. </td>
|
125 |
-
<td>Puede causar problemas de compatibilidad, errores o errores. </td>
|
126 |
-
</tr>
|
127 |
-
<tr>
|
128 |
-
<td>Puede descargar aplicaciones y juegos que no son compatibles con su dispositivo o sistema operativo. </td>
|
129 |
-
<td>Es posible que no reciba las últimas actualizaciones, características o parches de seguridad. </td>
|
130 |
-
</tr>
|
131 |
-
</tabla>
|
132 |
-
<h2>Cómo jugar Candy Crush Saga como un profesional</h2>
|
133 |
-
<p>Ahora que has descargado e instalado Candy Crush Saga de apkfeed, estás listo para jugar y divertirte. Pero, ¿cómo se juega como un profesional? ¿Cómo se borran los niveles y la puntuación alta? ¿Cómo se utiliza refuerzos y evitar errores? En esta sección, te daremos algunos de los mejores consejos y trucos para ayudarte a dominar el juego y convertirte en un experto en trituración de dulces. </p>
|
134 |
-
<h3>Los mejores consejos y trucos para limpiar niveles y puntuación alta</h3>
|
135 |
-
<p>Candy Crush Saga es un juego que requiere habilidad y estrategia. Necesitas planear tus movimientos cuidadosamente, usar tus dulces especiales sabiamente, y aprovechar el diseño del tablero. Estos son algunos de los mejores consejos y trucos para ayudarte a eliminar niveles y obtener mejores resultados:</p>
|
136 |
-
<ul>
|
137 |
-
|
138 |
-
<li>Concéntrate en el objetivo del nivel. No te distraigas por la puntuación o los otros elementos del tablero. Siempre ten en cuenta lo que tienes que hacer para completar el nivel, ya sea limpiar jalea, recoger ingredientes, romper bloqueadores o esparcir mermelada. Prioriza los movimientos que pueden ayudarte a lograr el objetivo más rápido. </li>
|
139 |
-
<li>Conoce tus dulces y combos especiales. Como explicamos anteriormente, los dulces y combos especiales pueden tener un gran impacto en el juego. Pueden limpiar más dulces, crear más puntos o cambiar la situación del tablero. Aprenda cómo crearlos, cómo usarlos y cómo combinarlos para obtener el máximo efecto. </li>
|
140 |
-
<li>Usa las pistas sabiamente. A veces, cuando estás atascado o no sabes qué hacer a continuación, el juego te dará una pista destacando una posible coincidencia. Sin embargo, no confíes demasiado en estas sugerencias. Puede que no siempre sean la mejor opción para ti. Usa tu propio juicio y busca otras opciones antes de seguir la pista. </li>
|
141 |
-
<li>No malgastes tus movimientos. Recuerda que tienes un número limitado de movimientos para cada nivel. No los desperdicies en partidas inútiles o ineficaces. Intenta hacer que cada movimiento cuente y crea tanto impacto como sea posible. </li>
|
142 |
-
</ul>
|
143 |
-
<h3>Los potenciadores más útiles y cómo conseguirlos</h3>
|
144 |
-
<p>Los boosters son artículos especiales que pueden ayudarte en el juego. Pueden darte movimientos extra, tiempo extra, vidas extra o efectos adicionales. Puedes utilizarlos antes o durante un nivel, dependiendo del tipo de refuerzo. Algunos de los potenciadores más útiles son:</p>
|
145 |
-
<ul>
|
146 |
-
<li>Lollipop Hammer: Este amplificador le permite aplastar cualquier caramelo en el tablero sin usar un movimiento. Puedes usarlo para limpiar un bloqueador, una jalea, un ingrediente o cualquier caramelo que se interponga en tu camino. </li>
|
147 |
-
<li>Free Switch: Este amplificador le permite intercambiar dos dulces adyacentes sin usar un movimiento. Puedes usarlo para crear una coincidencia, un caramelo especial o un combo que de otra manera no podrías hacer. </li>
|
148 |
-
|
149 |
-
<li>Enfriador de bombas: Este amplificador le permite añadir cinco movimientos adicionales a cualquier bomba en el tablero. Puedes usarlo para evitar que la bomba explote y termine tu nivel prematuramente. </li>
|
150 |
-
<li>Bomba de color: Este amplificador le permite iniciar un nivel con una bomba de color ya en el tablero. Puedes usarlo para limpiar muchos dulces del mismo color o crear un combo poderoso con otro caramelo especial. </li>
|
151 |
-
</ul>
|
152 |
-
<p>Puedes obtener boosters de diferentes maneras, como:</p>
|
153 |
-
<ul <li>Girando la rueda de refuerzo diaria, que le da la oportunidad de ganar un refuerzo al azar todos los días. </li>
|
154 |
-
<li>Completar misiones, que te dan tareas específicas que hacer y te recompensan con refuerzos al completarlas. </li>
|
155 |
-
<li>Participar en eventos, que ofrecen desafíos y premios especiales, incluyendo boosters, por un tiempo limitado. </li>
|
156 |
-
<li>Unirse a clubes, que le permiten formar equipo con otros jugadores y compartir refuerzos entre sí. </li>
|
157 |
-
<li>Comprarlos con dinero real o barras de oro, que son la moneda premium del juego. </li>
|
158 |
-
</ul>
|
159 |
-
<p>Sin embargo, debes usar boosters con moderación y estratégicamente. No los desperdicies en niveles fáciles o cuando no los necesites. Guárdalos para niveles más difíciles o cuando estés atascado. Además, no confíes demasiado en ellos. Pueden ayudarte, pero no pueden garantizar tu éxito. Todavía necesitas usar tu habilidad y estrategia para ganar el juego. </p>
|
160 |
-
<h3>Los errores comunes y cómo evitarlos</h3>
|
161 |
-
<p>Candy Crush Saga es un juego que puede ser frustrante y desafiante a veces. Puedes encontrar niveles que parecen imposibles de superar, o situaciones que parecen desesperadas. También puede cometer algunos errores que pueden costarle el juego o hacerlo más difícil para usted. Estos son algunos de los errores comunes y cómo evitarlos:</p>
|
162 |
-
<ul>
|
163 |
-
|
164 |
-
<li>Usar dulces especiales o combos demasiado pronto. Los dulces y combos especiales son muy poderosos y útiles, pero también son limitados y raros. Debe usarlos sabiamente y en el momento adecuado. No los use demasiado pronto o sin un plan. Por ejemplo, si tienes una bomba de color, no la cambies por un caramelo al azar. Espera hasta que tengas una buena oportunidad de crear un gran impacto, como cambiarlo por un caramelo a rayas o un caramelo que sea dominante en el tablero. </li>
|
165 |
-
<li>No prestar atención al diseño del tablero. El diseño del tablero puede tener un efecto significativo en el juego. Puede crear oportunidades u obstáculos para usted. Debe prestar atención al diseño del tablero y usarlo a su favor. Por ejemplo, busca espacios, esquinas, bordes o áreas aisladas que puedan hacer más difícil o más fácil limpiar dulces o crear combinaciones. Además, busca patrones, formas o colores que puedan ayudarte a crear dulces o combos especiales. </li>
|
166 |
-
<li>Apresurar tus movimientos. Candy Crush Saga es un juego que requiere paciencia y pensamiento. No debes apresurar tus movimientos o actuar impulsivamente. Debes tomarte tu tiempo y pensar cuidadosamente antes de hacer un movimiento. También debes planificar con anticipación y anticipar las consecuencias de tu movimiento. Por ejemplo, busque posibles cascadas, bloqueadores, bombas o temporizadores que puedan afectar su próximo movimiento. </li>
|
167 |
-
<li>Renunciar demasiado pronto. Candy Crush Saga es un juego que puede ser impredecible y sorprendente. Puedes pensar que no tienes posibilidades de ganar, pero puedes estar equivocado. A veces, una cascada de la suerte, un refuerzo útil, o un partido milagroso puede cambiar la marea a su favor. No debe darse por vencido demasiado pronto o perder la esperanza. Deberías seguir intentando y jugando hasta el final. Nunca sabes lo que podría pasar. </li>
|
168 |
-
</ul>
|
169 |
-
<h2>Cómo disfrutar de Candy Crush Saga aún más</h2>
|
170 |
-
|
171 |
-
<h3>Las características sociales y cómo conectarse con amigos</h3>
|
172 |
-
<p>Candy Crush Saga no es solo un juego en solitario. También es un juego social que te permite conectarte con tus amigos y otros jugadores de todo el mundo. Puedes hacer esto vinculando tu cuenta de juego a tu cuenta de Facebook, lo que te permitirá:</p>
|
173 |
-
<ul>
|
174 |
-
<li>Ver el progreso de tus amigos y las puntuaciones en el mapa y las tablas de clasificación. </li>
|
175 |
-
<li>Envía y recibe vidas, movimientos o refuerzos de tus amigos. </li>
|
176 |
-
<li>Invita a tus amigos a jugar contigo o únete a tu club. </li>
|
177 |
-
<li>Chatea con tus amigos o miembros del club en el juego. </li>
|
178 |
-
<li>Compite con tus amigos o miembros del club en desafíos amistosos o torneos. </li>
|
179 |
-
</ul>
|
180 |
-
<p>Al conectarte con tus amigos, puedes hacer el juego más divertido y social. También puedes obtener más apoyo y motivación de tus amigos. También puedes hacer nuevos amigos uniéndote a clubes o participando en eventos. </p>
|
181 |
-
<h3>Los juegos spin-off y cómo acceder a ellos</h3>
|
182 |
-
<p>Candy Crush Saga no es el único juego en la franquicia de Candy Crush. También hay varios juegos spin-off que ofrecen diferentes giros y variaciones sobre el juego original. Algunos de ellos son:</p>
|
183 |
-
<ul>
|
184 |
-
<li>Candy Crush Soda Saga: Este juego introduce botellas de soda y niveles de soda, donde tienes que aumentar el nivel de soda haciendo estallar las botellas. También presenta nuevos dulces, como caramelos de pescado, glaseado y miel. </li>
|
185 |
-
<li>Candy Crush Jelly Saga: Este juego presenta las reinas de jalea y los niveles de jalea, donde tienes que untar más jalea que tu oponente. También cuenta con nuevos dulces, como pufflers, monos y medusas. </li>
|
186 |
-
<li>Candy Crush Friends Saga: Este juego introduce dulces amigos y amigos niveles, donde tienes que recoger un cierto número de dulces para tus amigos. También presenta nuevos dulces, como mojar la galleta, liberar los pulpos y esparcir la mermelada. </li>
|
187 |
-
|
188 |
-
</ul>
|
189 |
-
<p>Puedes acceder a estos juegos spin-off descargándolos desde las tiendas de aplicaciones oficiales o desde apkfeed. También puede cambiar entre ellos en Candy Crush Saga tocando el icono en la esquina superior izquierda de la pantalla. Al jugar a estos juegos derivados, puedes disfrutar de más variedad y desafíos en el universo de Candy Crush. </p>
|
190 |
-
<h3>Las últimas actualizaciones y noticias sobre el juego</h3>
|
191 |
-
<p>Candy Crush Saga es un juego que es constantemente actualizado y mejorado por los desarrolladores. Regularmente añaden nuevos niveles, episodios, características, eventos y más para mantener el juego fresco y emocionante. También corrigen errores, errores o problemas que pueden afectar el rendimiento o la funcionalidad del juego. Para estar al día de las últimas noticias y actualizaciones sobre el juego, puedes:</p>
|
192 |
-
<ul>
|
193 |
-
<li>Sigue la página oficial de Facebook de Candy Crush Saga (https://www.facebook.com/CandyCrushSaga), donde publican anuncios, avances, consejos, concursos y más. </li>
|
194 |
-
<li>Visite el sitio web oficial de Candy Crush Saga (https://candycrushsaga.com), donde proporcionan información, apoyo, retroalimentación y más. </li>
|
195 |
-
<li>Echa un vistazo al blog oficial de Candy Crush Saga (https://blog.candycrushsaga.com), donde comparten historias, ideas, entre bastidores y más. </li>
|
196 |
-
<li>Suscríbete al canal oficial de YouTube de Candy Crush Saga (https://www.youtube.com/ user/CandyCrushOfficial), donde suben videos, trailers, tutoriales y más. </li>
|
197 |
-
<li>Únete a la comunidad oficial de Candy Crush Saga (https://community.king.com/en/candy-crush-saga), donde puedes interactuar con otros jugadores, hacer preguntas, compartir ideas, dar retroalimentación y más. </li>
|
198 |
-
</ul>
|
199 |
-
<p>Manteniéndote actualizado sobre las últimas noticias y actualizaciones del juego, puedes asegurarte de no perderte nada nuevo o importante en Candy Crush Saga.</p>
|
200 |
-
<h2>Conclusión</h2>
|
201 |
-
|
202 |
-
<p>En este artículo, le hemos dado una guía dulce a todo lo que necesita saber sobre Candy Crush Saga apkfeed. Hemos explicado lo que es Candy Crush Saga, cómo descargarlo e instalarlo desde apkfeed , cómo jugarlo como un profesional, y cómo disfrutarlo aún más. También te hemos dado algunos consejos y trucos, algunos beneficios y riesgos, y algunas maneras de mantenerte actualizado sobre el juego. </p>
|
203 |
-
<p>Esperamos que este artículo haya sido útil e informativo para usted. Esperamos que hayas aprendido algo nuevo y útil sobre Candy Crush Saga apkfeed. Esperamos que te lo pases genial jugando al juego y machacando caramelos. </p>
|
204 |
-
<p>Gracias por leer este artículo. Si tiene alguna pregunta, comentario o comentario, por favor siéntase libre de dejarlos abajo. Nos encantaría saber de ti y ayudarte de cualquier manera que podamos. ¡Feliz dulce aplastamiento! </p>
|
205 |
-
<h3>Preguntas frecuentes</h3>
|
206 |
-
<p>Aquí están algunas de las preguntas y respuestas frecuentes sobre Candy Crush Saga apkfeed:</p>
|
207 |
-
<ol>
|
208 |
-
<li>Q: ¿Es seguro y legal Candy Crush Saga apkfeed? <br>
|
209 |
-
R: Candy Crush Saga apkfeed no es oficialmente respaldado o apoyado por King, el desarrollador del juego, o por Google Play Store o cualquier otra tienda de aplicaciones oficial. Es un sitio web de terceros que proporciona enlaces de descarga para varias aplicaciones y juegos de Android, incluyendo Candy Crush Saga. No es seguro ni legal usar apkfeed, ya que puede exponer su dispositivo a malware, virus o spyware, o violar los términos y condiciones de las tiendas de aplicaciones oficiales o los desarrolladores. Debe usar apkfeed bajo su propio riesgo y discreción. </li>
|
210 |
-
<li>Q: ¿Cómo puedo actualizar Candy Crush Saga apkfeed? <br>
|
211 |
-
|
212 |
-
<li>Q: ¿Cómo puedo desinstalar Candy Crush Saga apkfeed? <br>
|
213 |
-
R: Si desea desinstalar Candy Crush Saga apkfeed desde su dispositivo, puede hacerlo siguiendo estos pasos:</p>
|
214 |
-
<ol>
|
215 |
-
<li>Ir a la configuración de su dispositivo, luego aplicaciones, entonces Candy Crush Saga.</li>
|
216 |
-
<li>Pulse en desinstalar y confirme su elección. </li>
|
217 |
-
<li>Espere a que termine la desinstalación. </li>
|
218 |
-
<li>También puede eliminar el archivo apk de Candy Crush Saga desde el almacenamiento de su dispositivo si todavía lo tiene. </li>
|
219 |
-
</ol>
|
220 |
-
<p>Tenga en cuenta que la desinstalación de Candy Crush Saga apkfeed no puede eliminar todos los datos o archivos asociados con el juego de su dispositivo. Es posible que necesite usar una aplicación más limpia o una aplicación de administrador de archivos para hacer esto. </li>
|
221 |
-
<li>Q: ¿Cómo puedo transferir mi progreso de Candy Crush Saga apkfeed a la versión oficial? <br>
|
222 |
-
R: Si quieres transferir tu progreso de Candy Crush Saga a la versión oficial del juego, puedes hacerlo vinculando tu cuenta de juego a tu cuenta de Facebook. Esto sincronizará tu progreso en diferentes dispositivos y plataformas, incluidas las tiendas de aplicaciones oficiales. Para hacer esto, necesitas:</p>
|
223 |
-
<ol>
|
224 |
-
<li>Abrir Candy Crush Saga apkfeed en su dispositivo. </li>
|
225 |
-
<li>Toque en el icono de configuración en la esquina inferior izquierda de la pantalla. </li>
|
226 |
-
<li>Toque en conectarse e iniciar sesión con su cuenta de Facebook. </li>
|
227 |
-
<li>Espera a que termine la sincronización. </li>
|
228 |
-
<li>Descargar e instalar Candy Crush Saga desde la tienda de aplicaciones oficial en su dispositivo. </li>
|
229 |
-
<li>Abra Candy Crush Saga en su dispositivo. </li>
|
230 |
-
<li>Toque en conectar e inicie sesión con la misma cuenta de Facebook. </li>
|
231 |
-
<li>Espera a que termine la sincronización. </li>
|
232 |
-
<li>Ahora deberías ver tu progreso transferido de Candy Crush Saga a la versión oficial. </li>
|
233 |
-
</ol></li>
|
234 |
-
<li>Q: ¿Cómo puedo contactar al equipo de soporte de Candy Crush Saga apkfeed? <br>
|
235 |
-
|
236 |
-
<ul <li>Visitando el sitio web oficial de Candy Crush Saga (https://candycrushsaga.com), donde proporcionan información, apoyo, retroalimentación y más. </li>
|
237 |
-
<li>Ir al menú de configuración en el juego, a continuación, tocando en el icono del centro de ayuda, donde proporcionan preguntas frecuentes, guías, consejos, y más. </li>
|
238 |
-
<li>Rellenando el formulario de contacto en la página del centro de ayuda, donde puede describir su problema y adjuntar capturas de pantalla o videos si es necesario. </li>
|
239 |
-
<li>Enviándoles un correo electrónico a [email protected], donde también puede explicar su problema y adjuntar archivos si es necesario. </li>
|
240 |
-
</ul>
|
241 |
-
<p>Intentarán responder a su consulta lo antes posible y le ayudarán a resolver su problema. </p> 64aa2da5cf<br />
|
242 |
-
<br />
|
243 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/config.py
DELETED
@@ -1,335 +0,0 @@
|
|
1 |
-
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import copy
|
14 |
-
|
15 |
-
from botocore.compat import OrderedDict
|
16 |
-
from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS
|
17 |
-
from botocore.exceptions import (
|
18 |
-
InvalidMaxRetryAttemptsError,
|
19 |
-
InvalidRetryConfigurationError,
|
20 |
-
InvalidRetryModeError,
|
21 |
-
InvalidS3AddressingStyleError,
|
22 |
-
)
|
23 |
-
|
24 |
-
|
25 |
-
class Config:
|
26 |
-
"""Advanced configuration for Botocore clients.
|
27 |
-
|
28 |
-
:type region_name: str
|
29 |
-
:param region_name: The region to use in instantiating the client
|
30 |
-
|
31 |
-
:type signature_version: str
|
32 |
-
:param signature_version: The signature version when signing requests.
|
33 |
-
|
34 |
-
:type user_agent: str
|
35 |
-
:param user_agent: The value to use in the User-Agent header.
|
36 |
-
|
37 |
-
:type user_agent_extra: str
|
38 |
-
:param user_agent_extra: The value to append to the current User-Agent
|
39 |
-
header value.
|
40 |
-
|
41 |
-
:type connect_timeout: float or int
|
42 |
-
:param connect_timeout: The time in seconds till a timeout exception is
|
43 |
-
thrown when attempting to make a connection. The default is 60
|
44 |
-
seconds.
|
45 |
-
|
46 |
-
:type read_timeout: float or int
|
47 |
-
:param read_timeout: The time in seconds till a timeout exception is
|
48 |
-
thrown when attempting to read from a connection. The default is
|
49 |
-
60 seconds.
|
50 |
-
|
51 |
-
:type parameter_validation: bool
|
52 |
-
:param parameter_validation: Whether parameter validation should occur
|
53 |
-
when serializing requests. The default is True. You can disable
|
54 |
-
parameter validation for performance reasons. Otherwise, it's
|
55 |
-
recommended to leave parameter validation enabled.
|
56 |
-
|
57 |
-
:type max_pool_connections: int
|
58 |
-
:param max_pool_connections: The maximum number of connections to
|
59 |
-
keep in a connection pool. If this value is not set, the default
|
60 |
-
value of 10 is used.
|
61 |
-
|
62 |
-
:type proxies: dict
|
63 |
-
:param proxies: A dictionary of proxy servers to use by protocol or
|
64 |
-
endpoint, e.g.:
|
65 |
-
``{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}``.
|
66 |
-
The proxies are used on each request.
|
67 |
-
|
68 |
-
:type proxies_config: dict
|
69 |
-
:param proxies_config: A dictionary of additional proxy configurations.
|
70 |
-
Valid keys are:
|
71 |
-
|
72 |
-
* ``proxy_ca_bundle`` -- The path to a custom certificate bundle to use
|
73 |
-
when establishing SSL/TLS connections with proxy.
|
74 |
-
|
75 |
-
* ``proxy_client_cert`` -- The path to a certificate for proxy
|
76 |
-
TLS client authentication.
|
77 |
-
|
78 |
-
When a string is provided it is treated as a path to a proxy client
|
79 |
-
certificate. When a two element tuple is provided, it will be
|
80 |
-
interpreted as the path to the client certificate, and the path
|
81 |
-
to the certificate key.
|
82 |
-
|
83 |
-
* ``proxy_use_forwarding_for_https`` -- For HTTPS proxies,
|
84 |
-
forward your requests to HTTPS destinations with an absolute
|
85 |
-
URI. We strongly recommend you only use this option with
|
86 |
-
trusted or corporate proxies. Value must be boolean.
|
87 |
-
|
88 |
-
:type s3: dict
|
89 |
-
:param s3: A dictionary of S3 specific configurations.
|
90 |
-
Valid keys are:
|
91 |
-
|
92 |
-
* ``use_accelerate_endpoint`` -- Refers to whether to use the S3
|
93 |
-
Accelerate endpoint. The value must be a boolean. If True, the
|
94 |
-
client will use the S3 Accelerate endpoint. If the S3 Accelerate
|
95 |
-
endpoint is being used then the addressing style will always
|
96 |
-
be virtual.
|
97 |
-
|
98 |
-
* ``payload_signing_enabled`` -- Refers to whether or not to SHA256
|
99 |
-
sign sigv4 payloads. By default, this is disabled for streaming
|
100 |
-
uploads (UploadPart and PutObject).
|
101 |
-
|
102 |
-
* ``addressing_style`` -- Refers to the style in which to address
|
103 |
-
s3 endpoints. Values must be a string that equals one of:
|
104 |
-
|
105 |
-
* ``auto`` -- Addressing style is chosen for user. Depending
|
106 |
-
on the configuration of client, the endpoint may be addressed in
|
107 |
-
the virtual or the path style. Note that this is the default
|
108 |
-
behavior if no style is specified.
|
109 |
-
|
110 |
-
* ``virtual`` -- Addressing style is always virtual. The name of the
|
111 |
-
bucket must be DNS compatible or an exception will be thrown.
|
112 |
-
Endpoints will be addressed as such: ``mybucket.s3.amazonaws.com``
|
113 |
-
|
114 |
-
* ``path`` -- Addressing style is always by path. Endpoints will be
|
115 |
-
addressed as such: ``s3.amazonaws.com/mybucket``
|
116 |
-
|
117 |
-
* ``us_east_1_regional_endpoint`` -- Refers to what S3 endpoint to use
|
118 |
-
when the region is configured to be us-east-1. Values must be a
|
119 |
-
string that equals:
|
120 |
-
|
121 |
-
* ``regional`` -- Use the us-east-1.amazonaws.com endpoint if the
|
122 |
-
client is configured to use the us-east-1 region.
|
123 |
-
|
124 |
-
* ``legacy`` -- Use the s3.amazonaws.com endpoint if the client is
|
125 |
-
configured to use the us-east-1 region. This is the default if
|
126 |
-
the configuration option is not specified.
|
127 |
-
|
128 |
-
|
129 |
-
:type retries: dict
|
130 |
-
:param retries: A dictionary for configuration related to retry behavior.
|
131 |
-
Valid keys are:
|
132 |
-
|
133 |
-
* ``total_max_attempts`` -- An integer representing the maximum number of
|
134 |
-
total attempts that will be made on a single request. This includes
|
135 |
-
the initial request, so a value of 1 indicates that no requests
|
136 |
-
will be retried. If ``total_max_attempts`` and ``max_attempts``
|
137 |
-
are both provided, ``total_max_attempts`` takes precedence.
|
138 |
-
``total_max_attempts`` is preferred over ``max_attempts`` because
|
139 |
-
it maps to the ``AWS_MAX_ATTEMPTS`` environment variable and
|
140 |
-
the ``max_attempts`` config file value.
|
141 |
-
* ``max_attempts`` -- An integer representing the maximum number of
|
142 |
-
retry attempts that will be made on a single request. For
|
143 |
-
example, setting this value to 2 will result in the request
|
144 |
-
being retried at most two times after the initial request. Setting
|
145 |
-
this value to 0 will result in no retries ever being attempted after
|
146 |
-
the initial request. If not provided, the number of retries will
|
147 |
-
default to the value specified in the service model, which is
|
148 |
-
typically four retries.
|
149 |
-
* ``mode`` -- A string representing the type of retry mode botocore
|
150 |
-
should use. Valid values are:
|
151 |
-
|
152 |
-
* ``legacy`` - The pre-existing retry behavior.
|
153 |
-
|
154 |
-
* ``standard`` - The standardized set of retry rules. This will also
|
155 |
-
default to 3 max attempts unless overridden.
|
156 |
-
|
157 |
-
* ``adaptive`` - Retries with additional client side throttling.
|
158 |
-
|
159 |
-
:type client_cert: str, (str, str)
|
160 |
-
:param client_cert: The path to a certificate for TLS client authentication.
|
161 |
-
|
162 |
-
When a string is provided it is treated as a path to a client
|
163 |
-
certificate to be used when creating a TLS connection.
|
164 |
-
|
165 |
-
If a client key is to be provided alongside the client certificate the
|
166 |
-
client_cert should be set to a tuple of length two where the first
|
167 |
-
element is the path to the client certificate and the second element is
|
168 |
-
the path to the certificate key.
|
169 |
-
|
170 |
-
:type inject_host_prefix: bool
|
171 |
-
:param inject_host_prefix: Whether host prefix injection should occur.
|
172 |
-
|
173 |
-
Defaults to True.
|
174 |
-
|
175 |
-
Setting this to False disables the injection of operation parameters
|
176 |
-
into the prefix of the hostname. This is useful for clients providing
|
177 |
-
custom endpoints that should not have their host prefix modified.
|
178 |
-
|
179 |
-
:type use_dualstack_endpoint: bool
|
180 |
-
:param use_dualstack_endpoint: Setting to True enables dualstack
|
181 |
-
endpoint resolution.
|
182 |
-
|
183 |
-
Defaults to None.
|
184 |
-
|
185 |
-
:type use_fips_endpoint: bool
|
186 |
-
:param use_fips_endpoint: Setting to True enables fips
|
187 |
-
endpoint resolution.
|
188 |
-
|
189 |
-
Defaults to None.
|
190 |
-
|
191 |
-
:type tcp_keepalive: bool
|
192 |
-
:param tcp_keepalive: Enables the TCP Keep-Alive socket option used when
|
193 |
-
creating new connections if set to True.
|
194 |
-
|
195 |
-
Defaults to False.
|
196 |
-
"""
|
197 |
-
|
198 |
-
OPTION_DEFAULTS = OrderedDict(
|
199 |
-
[
|
200 |
-
('region_name', None),
|
201 |
-
('signature_version', None),
|
202 |
-
('user_agent', None),
|
203 |
-
('user_agent_extra', None),
|
204 |
-
('connect_timeout', DEFAULT_TIMEOUT),
|
205 |
-
('read_timeout', DEFAULT_TIMEOUT),
|
206 |
-
('parameter_validation', True),
|
207 |
-
('max_pool_connections', MAX_POOL_CONNECTIONS),
|
208 |
-
('proxies', None),
|
209 |
-
('proxies_config', None),
|
210 |
-
('s3', None),
|
211 |
-
('retries', None),
|
212 |
-
('client_cert', None),
|
213 |
-
('inject_host_prefix', True),
|
214 |
-
('endpoint_discovery_enabled', None),
|
215 |
-
('use_dualstack_endpoint', None),
|
216 |
-
('use_fips_endpoint', None),
|
217 |
-
('defaults_mode', None),
|
218 |
-
('tcp_keepalive', None),
|
219 |
-
]
|
220 |
-
)
|
221 |
-
|
222 |
-
NON_LEGACY_OPTION_DEFAULTS = {
|
223 |
-
'connect_timeout': None,
|
224 |
-
}
|
225 |
-
|
226 |
-
def __init__(self, *args, **kwargs):
|
227 |
-
self._user_provided_options = self._record_user_provided_options(
|
228 |
-
args, kwargs
|
229 |
-
)
|
230 |
-
|
231 |
-
# Merge the user_provided options onto the default options
|
232 |
-
config_vars = copy.copy(self.OPTION_DEFAULTS)
|
233 |
-
defaults_mode = self._user_provided_options.get(
|
234 |
-
'defaults_mode', 'legacy'
|
235 |
-
)
|
236 |
-
if defaults_mode != 'legacy':
|
237 |
-
config_vars.update(self.NON_LEGACY_OPTION_DEFAULTS)
|
238 |
-
config_vars.update(self._user_provided_options)
|
239 |
-
|
240 |
-
# Set the attributes based on the config_vars
|
241 |
-
for key, value in config_vars.items():
|
242 |
-
setattr(self, key, value)
|
243 |
-
|
244 |
-
# Validate the s3 options
|
245 |
-
self._validate_s3_configuration(self.s3)
|
246 |
-
|
247 |
-
self._validate_retry_configuration(self.retries)
|
248 |
-
|
249 |
-
def _record_user_provided_options(self, args, kwargs):
|
250 |
-
option_order = list(self.OPTION_DEFAULTS)
|
251 |
-
user_provided_options = {}
|
252 |
-
|
253 |
-
# Iterate through the kwargs passed through to the constructor and
|
254 |
-
# map valid keys to the dictionary
|
255 |
-
for key, value in kwargs.items():
|
256 |
-
if key in self.OPTION_DEFAULTS:
|
257 |
-
user_provided_options[key] = value
|
258 |
-
# The key must exist in the available options
|
259 |
-
else:
|
260 |
-
raise TypeError(f"Got unexpected keyword argument '{key}'")
|
261 |
-
|
262 |
-
# The number of args should not be longer than the allowed
|
263 |
-
# options
|
264 |
-
if len(args) > len(option_order):
|
265 |
-
raise TypeError(
|
266 |
-
f"Takes at most {len(option_order)} arguments ({len(args)} given)"
|
267 |
-
)
|
268 |
-
|
269 |
-
# Iterate through the args passed through to the constructor and map
|
270 |
-
# them to appropriate keys.
|
271 |
-
for i, arg in enumerate(args):
|
272 |
-
# If it a kwarg was specified for the arg, then error out
|
273 |
-
if option_order[i] in user_provided_options:
|
274 |
-
raise TypeError(
|
275 |
-
f"Got multiple values for keyword argument '{option_order[i]}'"
|
276 |
-
)
|
277 |
-
user_provided_options[option_order[i]] = arg
|
278 |
-
|
279 |
-
return user_provided_options
|
280 |
-
|
281 |
-
def _validate_s3_configuration(self, s3):
|
282 |
-
if s3 is not None:
|
283 |
-
addressing_style = s3.get('addressing_style')
|
284 |
-
if addressing_style not in ['virtual', 'auto', 'path', None]:
|
285 |
-
raise InvalidS3AddressingStyleError(
|
286 |
-
s3_addressing_style=addressing_style
|
287 |
-
)
|
288 |
-
|
289 |
-
def _validate_retry_configuration(self, retries):
|
290 |
-
valid_options = ('max_attempts', 'mode', 'total_max_attempts')
|
291 |
-
valid_modes = ('legacy', 'standard', 'adaptive')
|
292 |
-
if retries is not None:
|
293 |
-
for key, value in retries.items():
|
294 |
-
if key not in valid_options:
|
295 |
-
raise InvalidRetryConfigurationError(
|
296 |
-
retry_config_option=key,
|
297 |
-
valid_options=valid_options,
|
298 |
-
)
|
299 |
-
if key == 'max_attempts' and value < 0:
|
300 |
-
raise InvalidMaxRetryAttemptsError(
|
301 |
-
provided_max_attempts=value,
|
302 |
-
min_value=0,
|
303 |
-
)
|
304 |
-
if key == 'total_max_attempts' and value < 1:
|
305 |
-
raise InvalidMaxRetryAttemptsError(
|
306 |
-
provided_max_attempts=value,
|
307 |
-
min_value=1,
|
308 |
-
)
|
309 |
-
if key == 'mode' and value not in valid_modes:
|
310 |
-
raise InvalidRetryModeError(
|
311 |
-
provided_retry_mode=value,
|
312 |
-
valid_modes=valid_modes,
|
313 |
-
)
|
314 |
-
|
315 |
-
def merge(self, other_config):
|
316 |
-
"""Merges the config object with another config object
|
317 |
-
|
318 |
-
This will merge in all non-default values from the provided config
|
319 |
-
and return a new config object
|
320 |
-
|
321 |
-
:type other_config: botocore.config.Config
|
322 |
-
:param other config: Another config object to merge with. The values
|
323 |
-
in the provided config object will take precedence in the merging
|
324 |
-
|
325 |
-
:returns: A config object built from the merged values of both
|
326 |
-
config objects.
|
327 |
-
"""
|
328 |
-
# Make a copy of the current attributes in the config object.
|
329 |
-
config_options = copy.copy(self._user_provided_options)
|
330 |
-
|
331 |
-
# Merge in the user provided options from the other config
|
332 |
-
config_options.update(other_config._user_provided_options)
|
333 |
-
|
334 |
-
# Return a new config object with the merged properties.
|
335 |
-
return Config(**config_options)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Blaise-g/summarize-biomedical-papers-long-summary-or-tldr/utils.py
DELETED
@@ -1,63 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
utils.py - Utility functions for the gradio demo app.
|
3 |
-
"""
|
4 |
-
|
5 |
-
import re
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
from natsort import natsorted
|
9 |
-
|
10 |
-
|
11 |
-
def truncate_word_count(text, max_words=512):
|
12 |
-
"""
|
13 |
-
truncate_word_count - a helper function for the gradio module
|
14 |
-
Parameters
|
15 |
-
----------
|
16 |
-
text : str, required, the text to be processed
|
17 |
-
max_words : int, optional, the maximum number of words, default=512
|
18 |
-
Returns
|
19 |
-
-------
|
20 |
-
dict, the text and whether it was truncated
|
21 |
-
"""
|
22 |
-
# split on whitespace with regex
|
23 |
-
words = re.split(r"\s+", text)
|
24 |
-
processed = {}
|
25 |
-
if len(words) > max_words:
|
26 |
-
processed["was_truncated"] = True
|
27 |
-
processed["truncated_text"] = " ".join(words[:max_words])
|
28 |
-
else:
|
29 |
-
processed["was_truncated"] = False
|
30 |
-
processed["truncated_text"] = text
|
31 |
-
return processed
|
32 |
-
|
33 |
-
|
34 |
-
def load_examples(src):
|
35 |
-
"""
|
36 |
-
load_examples - a helper function for the gradio module to load examples
|
37 |
-
Returns:
|
38 |
-
list of str, the examples
|
39 |
-
"""
|
40 |
-
src = Path(src)
|
41 |
-
src.mkdir(exist_ok=True)
|
42 |
-
examples = [f for f in src.glob("*.txt")]
|
43 |
-
examples = natsorted(examples)
|
44 |
-
# load the examples into a list
|
45 |
-
text_examples = []
|
46 |
-
for example in examples:
|
47 |
-
with open(example, "r") as f:
|
48 |
-
text = f.read()
|
49 |
-
text_examples.append([text, "large", 2, 512, 0.7, 3.5, 3])
|
50 |
-
|
51 |
-
return text_examples
|
52 |
-
|
53 |
-
|
54 |
-
def load_example_filenames(example_path: str or Path):
|
55 |
-
"""
|
56 |
-
load_example_filenames - a helper function for the gradio module to load examples
|
57 |
-
Returns:
|
58 |
-
dict, the examples (filename:full path)
|
59 |
-
"""
|
60 |
-
example_path = Path(example_path)
|
61 |
-
# load the examples into a list
|
62 |
-
examples = {f.name: f for f in example_path.glob("*.txt")}
|
63 |
-
return examples
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BramVanroy/llama-2-13b-chat-dutch-space/USE_POLICY.md
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
# Llama 2 Acceptable Use Policy
|
2 |
-
|
3 |
-
Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
|
4 |
-
|
5 |
-
## Prohibited Uses
|
6 |
-
We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
|
7 |
-
|
8 |
-
1. Violate the law or others’ rights, including to:
|
9 |
-
1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
|
10 |
-
1. Violence or terrorism
|
11 |
-
2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
|
12 |
-
3. Human trafficking, exploitation, and sexual violence
|
13 |
-
4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
|
14 |
-
5. Sexual solicitation
|
15 |
-
6. Any other criminal activity
|
16 |
-
2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
|
17 |
-
3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
|
18 |
-
4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
|
19 |
-
5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
|
20 |
-
6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
|
21 |
-
7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
|
26 |
-
1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
|
27 |
-
2. Guns and illegal weapons (including weapon development)
|
28 |
-
3. Illegal drugs and regulated/controlled substances
|
29 |
-
4. Operation of critical infrastructure, transportation technologies, or heavy machinery
|
30 |
-
5. Self-harm or harm to others, including suicide, cutting, and eating disorders
|
31 |
-
6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
|
36 |
-
1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
|
37 |
-
2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
|
38 |
-
3. Generating, promoting, or further distributing spam
|
39 |
-
4. Impersonating another individual without consent, authorization, or legal right
|
40 |
-
5. Representing that the use of Llama 2 or outputs are human-generated
|
41 |
-
6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
|
42 |
-
4. Fail to appropriately disclose to end users any known dangers of your AI system
|
43 |
-
|
44 |
-
Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
|
45 |
-
|
46 |
-
* Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
|
47 |
-
* Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
|
48 |
-
* Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
|
49 |
-
* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected])
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BramVanroy/text-to-amr/app.py
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import base64
|
2 |
-
from collections import Counter
|
3 |
-
|
4 |
-
import graphviz
|
5 |
-
import penman
|
6 |
-
from multi_amr.data.postprocessing_graph import ParsedStatus
|
7 |
-
|
8 |
-
from utils import get_resources, LANGUAGES, translate
|
9 |
-
|
10 |
-
import streamlit as st
|
11 |
-
|
12 |
-
st.set_page_config(
|
13 |
-
page_title="Multilingual text-to-AMR demo by Bram Vanroy",
|
14 |
-
page_icon="👩💻"
|
15 |
-
)
|
16 |
-
|
17 |
-
st.title("👩💻 Multilingual text-to-AMR")
|
18 |
-
|
19 |
-
if "text" not in st.session_state:
|
20 |
-
st.session_state["text"] = ""
|
21 |
-
if "language" not in st.session_state:
|
22 |
-
st.session_state["language"] = "English"
|
23 |
-
if "use_multilingual" not in st.session_state:
|
24 |
-
st.session_state["use_multilingual"] = False
|
25 |
-
|
26 |
-
text_col, lang_col = st.columns((4, 1))
|
27 |
-
text = text_col.text_input(label="Input text", key="text")
|
28 |
-
src_lang = lang_col.selectbox(label="Language", options=list(LANGUAGES.keys()), index=0, key="language")
|
29 |
-
multilingual = st.checkbox("Use multilingual model", label_visibility="visible", key="use_multilingual",
|
30 |
-
help="Whether to use a single multilingual model that was trained on English, Spanish and"
|
31 |
-
" Dutch together, or (if not checked) language-specific models. Enabling this will"
|
32 |
-
" results in worse performance but can be of interest for research purposes.")
|
33 |
-
|
34 |
-
error_ct = st.empty()
|
35 |
-
if st.session_state["text"]:
|
36 |
-
if st.button("Submit"):
|
37 |
-
text = text.strip()
|
38 |
-
error_ct.info("Generating abstract meaning representation (AMR)...", icon="💻")
|
39 |
-
model, tokenizer = get_resources(multilingual, src_lang)
|
40 |
-
gen_kwargs = {
|
41 |
-
"max_new_tokens": 512,
|
42 |
-
"num_beams": 5,
|
43 |
-
}
|
44 |
-
|
45 |
-
outputs = translate(text, src_lang, model, tokenizer, **gen_kwargs)
|
46 |
-
error_ct.empty()
|
47 |
-
|
48 |
-
if outputs["status"][0] == ParsedStatus.BACKOFF:
|
49 |
-
st.write(f"The system could not generate a valid graph no matter how hard it tried.")
|
50 |
-
else:
|
51 |
-
graph = outputs["graph"][0]
|
52 |
-
visualized = graphviz.Digraph(node_attr={"color": "#3aafa9", "style": "rounded,filled", "shape": "box",
|
53 |
-
"fontcolor": "white"})
|
54 |
-
|
55 |
-
# Count which names occur multiple times, e.g. t/talk-01 t2/talk-01
|
56 |
-
nodename_c = Counter([item[2] for item in graph.triples if item[1] == ":instance"])
|
57 |
-
# Generated initial nodenames for each variable, e.g. {"t": "talk-01", "t2": "talk-01"}
|
58 |
-
nodenames = {item[0]: item[2] for item in graph.triples if item[1] == ":instance"}
|
59 |
-
|
60 |
-
# Modify nodenames, so that the values are unique, e.g. {"t": "talk-01 (1)", "t2": "talk-01 (2)"}
|
61 |
-
# but only the value occurs more than once
|
62 |
-
nodename_str_c = Counter()
|
63 |
-
for varname in nodenames:
|
64 |
-
nodename = nodenames[varname]
|
65 |
-
if nodename_c[nodename] > 1:
|
66 |
-
nodename_str_c[nodename] += 1
|
67 |
-
nodenames[varname] = f"{nodename} ({nodename_str_c[nodename]})"
|
68 |
-
|
69 |
-
def get_node_name(item: str):
|
70 |
-
return nodenames[item] if item in nodenames else item
|
71 |
-
|
72 |
-
for triple in graph.triples:
|
73 |
-
if triple[1] == ":instance":
|
74 |
-
continue
|
75 |
-
else:
|
76 |
-
visualized.edge(get_node_name(triple[0]), get_node_name(triple[2]), label=triple[1])
|
77 |
-
st.subheader("Graph visualization")
|
78 |
-
st.graphviz_chart(visualized, use_container_width=True)
|
79 |
-
|
80 |
-
# Download link
|
81 |
-
def create_download_link(img_bytes: bytes):
|
82 |
-
encoded = base64.b64encode(img_bytes).decode("utf-8")
|
83 |
-
return f'<a href="data:image/png;charset=utf-8;base64,{encoded}" download="amr-graph.png">Download graph</a>'
|
84 |
-
|
85 |
-
img = visualized.pipe(format="png")
|
86 |
-
st.markdown(create_download_link(img), unsafe_allow_html=True)
|
87 |
-
|
88 |
-
# Additional info
|
89 |
-
st.subheader("PENMAN representation")
|
90 |
-
st.code(penman.encode(graph))
|
91 |
-
else:
|
92 |
-
error_ct.warning("Text cannot be empty!", icon="⚠️")
|
93 |
-
|
94 |
-
########################
|
95 |
-
# Information, socials #
|
96 |
-
########################
|
97 |
-
st.header("SignON 🤟")
|
98 |
-
|
99 |
-
st.markdown("""
|
100 |
-
<div style="display: flex">
|
101 |
-
<img style="margin-right: 1em" alt="SignON logo" src="https://signon-project.eu/wp-content/uploads/2021/05/SignOn_Favicon_500x500px.png" width=64 height=64>
|
102 |
-
<p><a href="https://signon-project.eu/" target="_blank" title="SignON homepage">SignON</a> aims to bridge the
|
103 |
-
communication gap between deaf, hard-of-hearing and hearing people through an accessible translation service.
|
104 |
-
This service will translate between languages and modalities with particular attention for sign languages.</p>
|
105 |
-
</div>""", unsafe_allow_html=True)
|
106 |
-
|
107 |
-
st.markdown("""[Abstract meaning representation](https://aclanthology.org/W13-2322/) (AMR)
|
108 |
-
is a semantic framework to describe meaning relations of sentences as graphs. In the SignON project, AMR is used as
|
109 |
-
an interlingua to translate between modalities and languages. To this end, I built MBART models for the task of
|
110 |
-
generating AMR representations from an input sentence, which is show-cased in this demo.
|
111 |
-
""")
|
112 |
-
|
113 |
-
|
114 |
-
st.header("Contact ✒️")
|
115 |
-
|
116 |
-
st.markdown("Would you like additional functionality in the demo, do you have questions, or just want to get in touch?"
|
117 |
-
" Give me a shout on [Twitter](https://twitter.com/BramVanroy)"
|
118 |
-
" or add me on [LinkedIn](https://www.linkedin.com/in/bramvanroy/)!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/main.py
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import argparse
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
from torch.utils.data import DataLoader
|
6 |
-
import numpy as np
|
7 |
-
|
8 |
-
from dataset import Dictionary, VQAFeatureDataset
|
9 |
-
import base_model
|
10 |
-
from train import train
|
11 |
-
import utils
|
12 |
-
|
13 |
-
from extract import extract_suite
|
14 |
-
|
15 |
-
def parse_args():
|
16 |
-
parser = argparse.ArgumentParser()
|
17 |
-
parser.add_argument('--epochs', type=int, default=20)
|
18 |
-
parser.add_argument('--num_hid', type=int, default=1024)
|
19 |
-
parser.add_argument('--model', type=str, default='baseline0_newatt')
|
20 |
-
parser.add_argument('--saveroot', type=str, default='saved_models/')
|
21 |
-
parser.add_argument('--batch_size', type=int, default=512)
|
22 |
-
parser.add_argument('--seed', type=int, default=1111, help='random seed')
|
23 |
-
parser.add_argument('--dataroot', type=str, default='../data/')
|
24 |
-
parser.add_argument('--data_id', type=str, default='clean', help='which version of the VQAv2 dataset to load')
|
25 |
-
parser.add_argument('--detector', type=str, default='R-50', help='which image features to use')
|
26 |
-
parser.add_argument('--nb', type=int, default=36, help='how many bbox features per images')
|
27 |
-
parser.add_argument('--model_id', type=str, default='m0', help='name for the model')
|
28 |
-
parser.add_argument('--resdir', type=str, default='results/')
|
29 |
-
parser.add_argument("--over", action='store_true', help="enable to allow writing over model folder")
|
30 |
-
parser.add_argument("--dis_eval", action='store_true', help="for efficiency, disable eval during training")
|
31 |
-
parser.add_argument("--save_last", action='store_true', help="for efficiency, save only final model")
|
32 |
-
args = parser.parse_args()
|
33 |
-
return args
|
34 |
-
|
35 |
-
|
36 |
-
if __name__ == '__main__':
|
37 |
-
args = parse_args()
|
38 |
-
output_dir = os.path.join(args.saveroot, args.model_id)
|
39 |
-
if os.path.isdir(output_dir):
|
40 |
-
print('WARNING: found existing save dir at location: ' + output_dir)
|
41 |
-
if not args.over:
|
42 |
-
print('to override, use the --over flag')
|
43 |
-
exit(-1)
|
44 |
-
else:
|
45 |
-
print('override is enabled')
|
46 |
-
|
47 |
-
torch.manual_seed(args.seed)
|
48 |
-
torch.cuda.manual_seed(args.seed)
|
49 |
-
torch.backends.cudnn.benchmark = True
|
50 |
-
|
51 |
-
dictionary = Dictionary.load_from_file(os.path.join(args.dataroot, 'dictionary.pkl'))
|
52 |
-
train_dset = VQAFeatureDataset('train', dictionary, dataroot=args.dataroot, ver=args.data_id, detector=args.detector, nb=args.nb)
|
53 |
-
eval_dset = VQAFeatureDataset('val', dictionary, dataroot=args.dataroot, ver='clean', detector=args.detector, nb=args.nb)
|
54 |
-
batch_size = args.batch_size
|
55 |
-
|
56 |
-
constructor = 'build_%s' % args.model
|
57 |
-
model = getattr(base_model, constructor)(train_dset, args.num_hid).cuda()
|
58 |
-
model.w_emb.init_embedding(os.path.join(args.dataroot, 'glove6b_init_300d.npy'))
|
59 |
-
|
60 |
-
# model = nn.DataParallel(model).cuda()
|
61 |
-
model = model.cuda()
|
62 |
-
|
63 |
-
train_loader = DataLoader(train_dset, batch_size, shuffle=True, num_workers=1)
|
64 |
-
eval_loader = DataLoader(eval_dset, batch_size, shuffle=True, num_workers=1)
|
65 |
-
train(model, train_loader, eval_loader, args.epochs, output_dir, args.dis_eval, args.save_last)
|
66 |
-
|
67 |
-
print('========== TRAINING DONE ==========')
|
68 |
-
print('running extraction suite...')
|
69 |
-
extract_suite(model, args.dataroot, args.batch_size, args.data_id, args.model_id, args.resdir, args.detector, args.nb)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/tests/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubInstallRules.cmake
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
# Thrust manages its own copy of these rules. Update ThrustInstallRules.cmake
|
2 |
-
# if modifying this file.
|
3 |
-
if (CUB_IN_THRUST)
|
4 |
-
return()
|
5 |
-
endif()
|
6 |
-
|
7 |
-
# CUB is a header library; no need to build anything before installing:
|
8 |
-
set(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY TRUE)
|
9 |
-
|
10 |
-
install(DIRECTORY "${CUB_SOURCE_DIR}/cub"
|
11 |
-
TYPE INCLUDE
|
12 |
-
FILES_MATCHING
|
13 |
-
PATTERN "*.cuh"
|
14 |
-
PATTERN "*.cmake"
|
15 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cong723/gpt-academic-public/crazy_functions/test_project/latex/attention/model_architecture.tex
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
|
2 |
-
\begin{figure}
|
3 |
-
\centering
|
4 |
-
\includegraphics[scale=0.6]{Figures/ModalNet-21}
|
5 |
-
\caption{The Transformer - model architecture.}
|
6 |
-
\label{fig:model-arch}
|
7 |
-
\end{figure}
|
8 |
-
|
9 |
-
% Although the primary workhorse of our model is attention,
|
10 |
-
%Our model maintains the encoder-decoder structure that is common to many so-called sequence-to-sequence models \citep{bahdanau2014neural,sutskever14}. As in all such architectures, the encoder computes a representation of the input sequence, and the decoder consumes these representations along with the output tokens to autoregressively produce the output sequence. Where, traditionally, the encoder and decoder contain stacks of recurrent or convolutional layers, our encoder and decoder stacks are composed of attention layers and position-wise feed-forward layers (Figure~\ref{fig:model-arch}). The following sections describe the gross architecture and these particular components in detail.
|
11 |
-
|
12 |
-
Most competitive neural sequence transduction models have an encoder-decoder structure \citep{cho2014learning,bahdanau2014neural,sutskever14}. Here, the encoder maps an input sequence of symbol representations $(x_1, ..., x_n)$ to a sequence of continuous representations $\mathbf{z} = (z_1, ..., z_n)$. Given $\mathbf{z}$, the decoder then generates an output sequence $(y_1,...,y_m)$ of symbols one element at a time. At each step the model is auto-regressive \citep{graves2013generating}, consuming the previously generated symbols as additional input when generating the next.
|
13 |
-
|
14 |
-
The Transformer follows this overall architecture using stacked self-attention and point-wise, fully connected layers for both the encoder and decoder, shown in the left and right halves of Figure~\ref{fig:model-arch}, respectively.
|
15 |
-
|
16 |
-
\subsection{Encoder and Decoder Stacks}
|
17 |
-
|
18 |
-
\paragraph{Encoder:}The encoder is composed of a stack of $N=6$ identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position-wise fully connected feed-forward network. We employ a residual connection \citep{he2016deep} around each of the two sub-layers, followed by layer normalization \cite{layernorm2016}. That is, the output of each sub-layer is $\mathrm{LayerNorm}(x + \mathrm{Sublayer}(x))$, where $\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers, produce outputs of dimension $\dmodel=512$.
|
19 |
-
|
20 |
-
\paragraph{Decoder:}The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two sub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack. Similar to the encoder, we employ residual connections around each of the sub-layers, followed by layer normalization. We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position $i$ can depend only on the known outputs at positions less than $i$.
|
21 |
-
|
22 |
-
% In our model (Figure~\ref{fig:model-arch}), the encoder and decoder are composed of stacks of alternating self-attention layers (for cross-positional communication) and position-wise feed-forward layers (for in-place computation). In addition, the decoder stack contains encoder-decoder attention layers. Since attention is agnostic to the distances between words, our model requires a "positional encoding" to be added to the encoder and decoder input. The following sections describe all of these components in detail.
|
23 |
-
|
24 |
-
\subsection{Attention} \label{sec:attention}
|
25 |
-
An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function of the query with the corresponding key.
|
26 |
-
|
27 |
-
\subsubsection{Scaled Dot-Product Attention} \label{sec:scaled-dot-prod}
|
28 |
-
|
29 |
-
% \begin{figure}
|
30 |
-
% \centering
|
31 |
-
% \includegraphics[scale=0.6]{Figures/ModalNet-19}
|
32 |
-
% \caption{Scaled Dot-Product Attention.}
|
33 |
-
% \label{fig:multi-head-att}
|
34 |
-
% \end{figure}
|
35 |
-
|
36 |
-
We call our particular attention "Scaled Dot-Product Attention" (Figure~\ref{fig:multi-head-att}). The input consists of queries and keys of dimension $d_k$, and values of dimension $d_v$. We compute the dot products of the query with all keys, divide each by $\sqrt{d_k}$, and apply a softmax function to obtain the weights on the values.
|
37 |
-
|
38 |
-
In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$. The keys and values are also packed together into matrices $K$ and $V$. We compute the matrix of outputs as:
|
39 |
-
|
40 |
-
\begin{equation}
|
41 |
-
\mathrm{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V
|
42 |
-
\end{equation}
|
43 |
-
|
44 |
-
The two most commonly used attention functions are additive attention \citep{bahdanau2014neural}, and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$. Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code.
|
45 |
-
|
46 |
-
%We scale the dot products by $1/\sqrt{d_k}$ to limit the magnitude of the dot products, which works well in practice. Otherwise, we found applying the softmax to often result in weights very close to 0 or 1, and hence minuscule gradients.
|
47 |
-
|
48 |
-
% Already described in the subsequent section
|
49 |
-
%When used as part of decoder self-attention, an optional mask function is applied just before the softmax to prevent positions from attending to subsequent positions. This mask simply sets the logits corresponding to all illegal connections (those outside of the lower triangle) to $-\infty$.
|
50 |
-
|
51 |
-
%\paragraph{Comparison to Additive Attention: } We choose dot product attention over additive attention \citep{bahdanau2014neural} since it can be computed using highly optimized matrix multiplication code. This optimization is particularly important to us, as we employ many attention layers in our model.
|
52 |
-
|
53 |
-
While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ \citep{DBLP:journals/corr/BritzGLL17}. We suspect that for large values of $d_k$, the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients \footnote{To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean $0$ and variance $1$. Then their dot product, $q \cdot k = \sum_{i=1}^{d_k} q_ik_i$, has mean $0$ and variance $d_k$.}. To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$.
|
54 |
-
|
55 |
-
|
56 |
-
%We suspect this to be caused by the dot products growing too large in magnitude to result in useful gradients after applying the softmax function. To counteract this, we scale the dot product by $1/\sqrt{d_k}$.
|
57 |
-
|
58 |
-
|
59 |
-
\subsubsection{Multi-Head Attention} \label{sec:multihead}
|
60 |
-
|
61 |
-
\begin{figure}
|
62 |
-
\begin{minipage}[t]{0.5\textwidth}
|
63 |
-
\centering
|
64 |
-
Scaled Dot-Product Attention \\
|
65 |
-
\vspace{0.5cm}
|
66 |
-
\includegraphics[scale=0.6]{Figures/ModalNet-19}
|
67 |
-
\end{minipage}
|
68 |
-
\begin{minipage}[t]{0.5\textwidth}
|
69 |
-
\centering
|
70 |
-
Multi-Head Attention \\
|
71 |
-
\vspace{0.1cm}
|
72 |
-
\includegraphics[scale=0.6]{Figures/ModalNet-20}
|
73 |
-
\end{minipage}
|
74 |
-
|
75 |
-
|
76 |
-
% \centering
|
77 |
-
|
78 |
-
\caption{(left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.}
|
79 |
-
\label{fig:multi-head-att}
|
80 |
-
\end{figure}
|
81 |
-
|
82 |
-
Instead of performing a single attention function with $\dmodel$-dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$, $d_k$ and $d_v$ dimensions, respectively.
|
83 |
-
On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$-dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure~\ref{fig:multi-head-att}.
|
84 |
-
|
85 |
-
Multi-head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this.
|
86 |
-
|
87 |
-
\begin{align*}
|
88 |
-
\mathrm{MultiHead}(Q, K, V) &= \mathrm{Concat}(\mathrm{head_1}, ..., \mathrm{head_h})W^O\\
|
89 |
-
% \mathrm{where} \mathrm{head_i} &= \mathrm{Attention}(QW_Q_i^{\dmodel \times d_q}, KW_K_i^{\dmodel \times d_k}, VW^V_i^{\dmodel \times d_v})\\
|
90 |
-
\text{where}~\mathrm{head_i} &= \mathrm{Attention}(QW^Q_i, KW^K_i, VW^V_i)\\
|
91 |
-
\end{align*}
|
92 |
-
|
93 |
-
Where the projections are parameter matrices $W^Q_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^K_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^V_i \in \mathbb{R}^{\dmodel \times d_v}$ and $W^O \in \mathbb{R}^{hd_v \times \dmodel}$.
|
94 |
-
|
95 |
-
|
96 |
-
%find it better (and no more expensive) to have multiple parallel attention layers (each over the full set of positions) with proportionally lower-dimensional keys, values and queries. We call this "Multi-Head Attention" (Figure~\ref{fig:multi-head-att}). The keys, values, and queries for each of these parallel attention layers are computed by learned linear transformations of the inputs to the multi-head attention. We use different linear transformations across different parallel attention layers. The output of the parallel attention layers are concatenated, and then passed through a final learned linear transformation.
|
97 |
-
|
98 |
-
In this work we employ $h=8$ parallel attention layers, or heads. For each of these we use $d_k=d_v=\dmodel/h=64$.
|
99 |
-
Due to the reduced dimension of each head, the total computational cost is similar to that of single-head attention with full dimensionality.
|
100 |
-
|
101 |
-
\subsubsection{Applications of Attention in our Model}
|
102 |
-
|
103 |
-
The Transformer uses multi-head attention in three different ways:
|
104 |
-
\begin{itemize}
|
105 |
-
\item In "encoder-decoder attention" layers, the queries come from the previous decoder layer, and the memory keys and values come from the output of the encoder. This allows every position in the decoder to attend over all positions in the input sequence. This mimics the typical encoder-decoder attention mechanisms in sequence-to-sequence models such as \citep{wu2016google, bahdanau2014neural,JonasFaceNet2017}.
|
106 |
-
|
107 |
-
\item The encoder contains self-attention layers. In a self-attention layer all of the keys, values and queries come from the same place, in this case, the output of the previous layer in the encoder. Each position in the encoder can attend to all positions in the previous layer of the encoder.
|
108 |
-
|
109 |
-
\item Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to $-\infty$) all values in the input of the softmax which correspond to illegal connections. See Figure~\ref{fig:multi-head-att}.
|
110 |
-
|
111 |
-
\end{itemize}
|
112 |
-
|
113 |
-
\subsection{Position-wise Feed-Forward Networks}\label{sec:ffn}
|
114 |
-
|
115 |
-
In addition to attention sub-layers, each of the layers in our encoder and decoder contains a fully connected feed-forward network, which is applied to each position separately and identically. This consists of two linear transformations with a ReLU activation in between.
|
116 |
-
|
117 |
-
\begin{equation}
|
118 |
-
\mathrm{FFN}(x)=\max(0, xW_1 + b_1) W_2 + b_2
|
119 |
-
\end{equation}
|
120 |
-
|
121 |
-
While the linear transformations are the same across different positions, they use different parameters from layer to layer. Another way of describing this is as two convolutions with kernel size 1. The dimensionality of input and output is $\dmodel=512$, and the inner-layer has dimensionality $d_{ff}=2048$.
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
%In the appendix, we describe how the position-wise feed-forward network can also be seen as a form of attention.
|
126 |
-
|
127 |
-
%from Jakob: The number of operations required for the model to relate signals from two arbitrary input or output positions grows in the distance between positions in input or output, linearly for ConvS2S and logarithmically for ByteNet, making it harder to learn dependencies between these positions \citep{hochreiter2001gradient}. In the transformer this is reduced to a constant number of operations, albeit at the cost of effective resolution caused by averaging attention-weighted positions, an effect we aim to counteract with multi-headed attention.
|
128 |
-
|
129 |
-
|
130 |
-
%Figure~\ref{fig:simple-att} presents a simple attention function, $A$, with a single head, that forms the basis of our multi-head attention. $A$ takes a query key vector $\kq$, matrices of memory keys $\km$ and memory values $\vm$ ,and produces a query value vector $\vq$ as
|
131 |
-
%\begin{equation*} \label{eq:attention}
|
132 |
-
% A(\kq, \km, \vm) = {\vm}^T (Softmax(\km \kq).
|
133 |
-
%\end{equation*}
|
134 |
-
%We linearly transform $\kq,\,\km$, and $\vm$ with learned matrices ${\Wkq \text{,} \, \Wkm}$, and ${\Wvm}$ before calling the attention function, and transform the output query with $\Wvq$ before handing it to the feed forward layer. Each attention layer has it's own set of transformation matrices, which are shared across all query positions. $A$ is applied in parallel for each query position, and is implemented very efficiently as a batch of matrix multiplies. The self-attention and encoder-decoder attention layers use $A$, but with different arguments. For example, in encdoder self-attention, queries in encoder layer $i$ attention to memories in encoder layer $i-1$. To ensure that decoder self-attention layers do not look at future words, we add $- \inf$ to the softmax logits in positions $j+1$ to query length for query position $l$.
|
135 |
-
|
136 |
-
%In simple attention, the query value is a weighted combination of the memory values where the attention weights sum to one. Although this function performs well in practice, the constraint on attention weights can restrict the amount of information that flows from memories to queries because the query cannot focus on multiple memory positions at once, which might be desirable when translating long sequences. \marginpar{@usz, could you think of an example of this ?} We remedy this by maintaining multiple attention heads at each query position that attend to all memory positions in parallel, with a different set of parameters per attention head $h$.
|
137 |
-
%\marginpar{}
|
138 |
-
|
139 |
-
\subsection{Embeddings and Softmax}
|
140 |
-
Similarly to other sequence transduction models, we use learned embeddings to convert the input tokens and output tokens to vectors of dimension $\dmodel$. We also use the usual learned linear transformation and softmax function to convert the decoder output to predicted next-token probabilities. In our model, we share the same weight matrix between the two embedding layers and the pre-softmax linear transformation, similar to \citep{press2016using}. In the embedding layers, we multiply those weights by $\sqrt{\dmodel}$.
|
141 |
-
|
142 |
-
|
143 |
-
\subsection{Positional Encoding}
|
144 |
-
Since our model contains no recurrence and no convolution, in order for the model to make use of the order of the sequence, we must inject some information about the relative or absolute position of the tokens in the sequence. To this end, we add "positional encodings" to the input embeddings at the bottoms of the encoder and decoder stacks. The positional encodings have the same dimension $\dmodel$ as the embeddings, so that the two can be summed. There are many choices of positional encodings, learned and fixed \citep{JonasFaceNet2017}.
|
145 |
-
|
146 |
-
In this work, we use sine and cosine functions of different frequencies:
|
147 |
-
|
148 |
-
\begin{align*}
|
149 |
-
PE_{(pos,2i)} = sin(pos / 10000^{2i/\dmodel}) \\
|
150 |
-
PE_{(pos,2i+1)} = cos(pos / 10000^{2i/\dmodel})
|
151 |
-
\end{align*}
|
152 |
-
|
153 |
-
where $pos$ is the position and $i$ is the dimension. That is, each dimension of the positional encoding corresponds to a sinusoid. The wavelengths form a geometric progression from $2\pi$ to $10000 \cdot 2\pi$. We chose this function because we hypothesized it would allow the model to easily learn to attend by relative positions, since for any fixed offset $k$, $PE_{pos+k}$ can be represented as a linear function of $PE_{pos}$.
|
154 |
-
|
155 |
-
We also experimented with using learned positional embeddings \citep{JonasFaceNet2017} instead, and found that the two versions produced nearly identical results (see Table~\ref{tab:variations} row (E)). We chose the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than the ones encountered during training.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/fcos/loss.py
DELETED
@@ -1,194 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This file contains specific functions for computing losses of FCOS
|
3 |
-
file
|
4 |
-
"""
|
5 |
-
|
6 |
-
import torch
|
7 |
-
from torch.nn import functional as F
|
8 |
-
from torch import nn
|
9 |
-
|
10 |
-
from ..utils import concat_box_prediction_layers
|
11 |
-
from maskrcnn_benchmark.layers import IOULoss
|
12 |
-
from maskrcnn_benchmark.layers import SigmoidFocalLoss
|
13 |
-
from maskrcnn_benchmark.modeling.matcher import Matcher
|
14 |
-
from maskrcnn_benchmark.modeling.utils import cat
|
15 |
-
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
|
16 |
-
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
|
17 |
-
|
18 |
-
|
19 |
-
INF = 100000000
|
20 |
-
|
21 |
-
|
22 |
-
class FCOSLossComputation(object):
|
23 |
-
"""
|
24 |
-
This class computes the FCOS losses.
|
25 |
-
"""
|
26 |
-
|
27 |
-
def __init__(self, cfg):
|
28 |
-
self.cls_loss_func = SigmoidFocalLoss(
|
29 |
-
cfg.MODEL.FCOS.LOSS_GAMMA,
|
30 |
-
cfg.MODEL.FCOS.LOSS_ALPHA
|
31 |
-
)
|
32 |
-
# we make use of IOU Loss for bounding boxes regression,
|
33 |
-
# but we found that L1 in log scale can yield a similar performance
|
34 |
-
self.box_reg_loss_func = IOULoss()
|
35 |
-
self.centerness_loss_func = nn.BCEWithLogitsLoss()
|
36 |
-
# generate sizes of interest
|
37 |
-
soi = []
|
38 |
-
prev_size = -1
|
39 |
-
for s in cfg.MODEL.FCOS.SIZES_OF_INTEREST:
|
40 |
-
soi.append([prev_size, s])
|
41 |
-
prev_size = s
|
42 |
-
soi.append([prev_size, INF])
|
43 |
-
self.object_sizes_of_interest = soi
|
44 |
-
|
45 |
-
def prepare_targets(self, points, targets):
|
46 |
-
object_sizes_of_interest = self.object_sizes_of_interest
|
47 |
-
expanded_object_sizes_of_interest = []
|
48 |
-
for l, points_per_level in enumerate(points):
|
49 |
-
object_sizes_of_interest_per_level = \
|
50 |
-
points_per_level.new_tensor(object_sizes_of_interest[l])
|
51 |
-
expanded_object_sizes_of_interest.append(
|
52 |
-
object_sizes_of_interest_per_level[None].expand(len(points_per_level), -1)
|
53 |
-
)
|
54 |
-
|
55 |
-
expanded_object_sizes_of_interest = torch.cat(expanded_object_sizes_of_interest, dim=0)
|
56 |
-
num_points_per_level = [len(points_per_level) for points_per_level in points]
|
57 |
-
points_all_level = torch.cat(points, dim=0)
|
58 |
-
labels, reg_targets = self.compute_targets_for_locations(
|
59 |
-
points_all_level, targets, expanded_object_sizes_of_interest
|
60 |
-
)
|
61 |
-
|
62 |
-
for i in range(len(labels)):
|
63 |
-
labels[i] = torch.split(labels[i], num_points_per_level, dim=0)
|
64 |
-
reg_targets[i] = torch.split(reg_targets[i], num_points_per_level, dim=0)
|
65 |
-
|
66 |
-
labels_level_first = []
|
67 |
-
reg_targets_level_first = []
|
68 |
-
for level in range(len(points)):
|
69 |
-
labels_level_first.append(
|
70 |
-
torch.cat([labels_per_im[level] for labels_per_im in labels], dim=0)
|
71 |
-
)
|
72 |
-
reg_targets_level_first.append(
|
73 |
-
torch.cat([reg_targets_per_im[level] for reg_targets_per_im in reg_targets], dim=0)
|
74 |
-
)
|
75 |
-
|
76 |
-
return labels_level_first, reg_targets_level_first
|
77 |
-
|
78 |
-
def compute_targets_for_locations(self, locations, targets, object_sizes_of_interest):
|
79 |
-
labels = []
|
80 |
-
reg_targets = []
|
81 |
-
xs, ys = locations[:, 0], locations[:, 1]
|
82 |
-
|
83 |
-
for im_i in range(len(targets)):
|
84 |
-
targets_per_im = targets[im_i]
|
85 |
-
assert targets_per_im.mode == "xyxy"
|
86 |
-
bboxes = targets_per_im.bbox
|
87 |
-
labels_per_im = targets_per_im.get_field("labels")
|
88 |
-
area = targets_per_im.area()
|
89 |
-
|
90 |
-
l = xs[:, None] - bboxes[:, 0][None]
|
91 |
-
t = ys[:, None] - bboxes[:, 1][None]
|
92 |
-
r = bboxes[:, 2][None] - xs[:, None]
|
93 |
-
b = bboxes[:, 3][None] - ys[:, None]
|
94 |
-
reg_targets_per_im = torch.stack([l, t, r, b], dim=2)
|
95 |
-
|
96 |
-
is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0
|
97 |
-
|
98 |
-
max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]
|
99 |
-
# limit the regression range for each location
|
100 |
-
is_cared_in_the_level = \
|
101 |
-
(max_reg_targets_per_im >= object_sizes_of_interest[:, [0]]) & \
|
102 |
-
(max_reg_targets_per_im <= object_sizes_of_interest[:, [1]])
|
103 |
-
|
104 |
-
locations_to_gt_area = area[None].repeat(len(locations), 1)
|
105 |
-
locations_to_gt_area[is_in_boxes == 0] = INF
|
106 |
-
locations_to_gt_area[is_cared_in_the_level == 0] = INF
|
107 |
-
|
108 |
-
# if there are still more than one objects for a location,
|
109 |
-
# we choose the one with minimal area
|
110 |
-
locations_to_min_aera, locations_to_gt_inds = locations_to_gt_area.min(dim=1)
|
111 |
-
|
112 |
-
reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds]
|
113 |
-
labels_per_im = labels_per_im[locations_to_gt_inds]
|
114 |
-
labels_per_im[locations_to_min_aera == INF] = 0
|
115 |
-
|
116 |
-
labels.append(labels_per_im)
|
117 |
-
reg_targets.append(reg_targets_per_im)
|
118 |
-
|
119 |
-
return labels, reg_targets
|
120 |
-
|
121 |
-
def compute_centerness_targets(self, reg_targets):
|
122 |
-
left_right = reg_targets[:, [0, 2]]
|
123 |
-
top_bottom = reg_targets[:, [1, 3]]
|
124 |
-
centerness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
|
125 |
-
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
|
126 |
-
return torch.sqrt(centerness)
|
127 |
-
|
128 |
-
def __call__(self, locations, box_cls, box_regression, centerness, targets):
|
129 |
-
"""
|
130 |
-
Arguments:
|
131 |
-
locations (list[BoxList])
|
132 |
-
box_cls (list[Tensor])
|
133 |
-
box_regression (list[Tensor])
|
134 |
-
centerness (list[Tensor])
|
135 |
-
targets (list[BoxList])
|
136 |
-
|
137 |
-
Returns:
|
138 |
-
cls_loss (Tensor)
|
139 |
-
reg_loss (Tensor)
|
140 |
-
centerness_loss (Tensor)
|
141 |
-
"""
|
142 |
-
N = box_cls[0].size(0)
|
143 |
-
num_classes = box_cls[0].size(1)
|
144 |
-
labels, reg_targets = self.prepare_targets(locations, targets)
|
145 |
-
|
146 |
-
box_cls_flatten = []
|
147 |
-
box_regression_flatten = []
|
148 |
-
centerness_flatten = []
|
149 |
-
labels_flatten = []
|
150 |
-
reg_targets_flatten = []
|
151 |
-
for l in range(len(labels)):
|
152 |
-
box_cls_flatten.append(box_cls[l].permute(0, 2, 3, 1).reshape(-1, num_classes))
|
153 |
-
box_regression_flatten.append(box_regression[l].permute(0, 2, 3, 1).reshape(-1, 4))
|
154 |
-
labels_flatten.append(labels[l].reshape(-1))
|
155 |
-
reg_targets_flatten.append(reg_targets[l].reshape(-1, 4))
|
156 |
-
centerness_flatten.append(centerness[l].reshape(-1))
|
157 |
-
|
158 |
-
box_cls_flatten = torch.cat(box_cls_flatten, dim=0)
|
159 |
-
box_regression_flatten = torch.cat(box_regression_flatten, dim=0)
|
160 |
-
centerness_flatten = torch.cat(centerness_flatten, dim=0)
|
161 |
-
labels_flatten = torch.cat(labels_flatten, dim=0)
|
162 |
-
reg_targets_flatten = torch.cat(reg_targets_flatten, dim=0)
|
163 |
-
|
164 |
-
pos_inds = torch.nonzero(labels_flatten > 0).squeeze(1)
|
165 |
-
cls_loss = self.cls_loss_func(
|
166 |
-
box_cls_flatten,
|
167 |
-
labels_flatten.int()
|
168 |
-
) / (pos_inds.numel() + N) # add N to avoid dividing by a zero
|
169 |
-
|
170 |
-
box_regression_flatten = box_regression_flatten[pos_inds]
|
171 |
-
reg_targets_flatten = reg_targets_flatten[pos_inds]
|
172 |
-
centerness_flatten = centerness_flatten[pos_inds]
|
173 |
-
|
174 |
-
if pos_inds.numel() > 0:
|
175 |
-
centerness_targets = self.compute_centerness_targets(reg_targets_flatten)
|
176 |
-
reg_loss = self.box_reg_loss_func(
|
177 |
-
box_regression_flatten,
|
178 |
-
reg_targets_flatten,
|
179 |
-
centerness_targets
|
180 |
-
)
|
181 |
-
centerness_loss = self.centerness_loss_func(
|
182 |
-
centerness_flatten,
|
183 |
-
centerness_targets
|
184 |
-
)
|
185 |
-
else:
|
186 |
-
reg_loss = box_regression_flatten.sum()
|
187 |
-
centerness_loss = centerness_flatten.sum()
|
188 |
-
|
189 |
-
return cls_loss, reg_loss, centerness_loss
|
190 |
-
|
191 |
-
|
192 |
-
def make_fcos_loss_evaluator(cfg):
|
193 |
-
loss_evaluator = FCOSLossComputation(cfg)
|
194 |
-
return loss_evaluator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DEVILOVER/image_captioning/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/nlpconnect/vit-gpt2-image-captioning").launch()
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/http_writer.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
"""Http related parsers and protocol."""
|
2 |
-
|
3 |
-
import asyncio
|
4 |
-
import zlib
|
5 |
-
from typing import Any, Awaitable, Callable, NamedTuple, Optional, Union # noqa
|
6 |
-
|
7 |
-
from multidict import CIMultiDict
|
8 |
-
|
9 |
-
from .abc import AbstractStreamWriter
|
10 |
-
from .base_protocol import BaseProtocol
|
11 |
-
from .helpers import NO_EXTENSIONS
|
12 |
-
|
13 |
-
__all__ = ("StreamWriter", "HttpVersion", "HttpVersion10", "HttpVersion11")
|
14 |
-
|
15 |
-
|
16 |
-
class HttpVersion(NamedTuple):
|
17 |
-
major: int
|
18 |
-
minor: int
|
19 |
-
|
20 |
-
|
21 |
-
HttpVersion10 = HttpVersion(1, 0)
|
22 |
-
HttpVersion11 = HttpVersion(1, 1)
|
23 |
-
|
24 |
-
|
25 |
-
_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]]
|
26 |
-
_T_OnHeadersSent = Optional[Callable[["CIMultiDict[str]"], Awaitable[None]]]
|
27 |
-
|
28 |
-
|
29 |
-
class StreamWriter(AbstractStreamWriter):
|
30 |
-
def __init__(
|
31 |
-
self,
|
32 |
-
protocol: BaseProtocol,
|
33 |
-
loop: asyncio.AbstractEventLoop,
|
34 |
-
on_chunk_sent: _T_OnChunkSent = None,
|
35 |
-
on_headers_sent: _T_OnHeadersSent = None,
|
36 |
-
) -> None:
|
37 |
-
self._protocol = protocol
|
38 |
-
|
39 |
-
self.loop = loop
|
40 |
-
self.length = None
|
41 |
-
self.chunked = False
|
42 |
-
self.buffer_size = 0
|
43 |
-
self.output_size = 0
|
44 |
-
|
45 |
-
self._eof = False
|
46 |
-
self._compress: Any = None
|
47 |
-
self._drain_waiter = None
|
48 |
-
|
49 |
-
self._on_chunk_sent: _T_OnChunkSent = on_chunk_sent
|
50 |
-
self._on_headers_sent: _T_OnHeadersSent = on_headers_sent
|
51 |
-
|
52 |
-
@property
|
53 |
-
def transport(self) -> Optional[asyncio.Transport]:
|
54 |
-
return self._protocol.transport
|
55 |
-
|
56 |
-
@property
|
57 |
-
def protocol(self) -> BaseProtocol:
|
58 |
-
return self._protocol
|
59 |
-
|
60 |
-
def enable_chunking(self) -> None:
|
61 |
-
self.chunked = True
|
62 |
-
|
63 |
-
def enable_compression(
|
64 |
-
self, encoding: str = "deflate", strategy: int = zlib.Z_DEFAULT_STRATEGY
|
65 |
-
) -> None:
|
66 |
-
zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else zlib.MAX_WBITS
|
67 |
-
self._compress = zlib.compressobj(wbits=zlib_mode, strategy=strategy)
|
68 |
-
|
69 |
-
def _write(self, chunk: bytes) -> None:
|
70 |
-
size = len(chunk)
|
71 |
-
self.buffer_size += size
|
72 |
-
self.output_size += size
|
73 |
-
transport = self.transport
|
74 |
-
if not self._protocol.connected or transport is None or transport.is_closing():
|
75 |
-
raise ConnectionResetError("Cannot write to closing transport")
|
76 |
-
transport.write(chunk)
|
77 |
-
|
78 |
-
async def write(
|
79 |
-
self, chunk: bytes, *, drain: bool = True, LIMIT: int = 0x10000
|
80 |
-
) -> None:
|
81 |
-
"""Writes chunk of data to a stream.
|
82 |
-
|
83 |
-
write_eof() indicates end of stream.
|
84 |
-
writer can't be used after write_eof() method being called.
|
85 |
-
write() return drain future.
|
86 |
-
"""
|
87 |
-
if self._on_chunk_sent is not None:
|
88 |
-
await self._on_chunk_sent(chunk)
|
89 |
-
|
90 |
-
if isinstance(chunk, memoryview):
|
91 |
-
if chunk.nbytes != len(chunk):
|
92 |
-
# just reshape it
|
93 |
-
chunk = chunk.cast("c")
|
94 |
-
|
95 |
-
if self._compress is not None:
|
96 |
-
chunk = self._compress.compress(chunk)
|
97 |
-
if not chunk:
|
98 |
-
return
|
99 |
-
|
100 |
-
if self.length is not None:
|
101 |
-
chunk_len = len(chunk)
|
102 |
-
if self.length >= chunk_len:
|
103 |
-
self.length = self.length - chunk_len
|
104 |
-
else:
|
105 |
-
chunk = chunk[: self.length]
|
106 |
-
self.length = 0
|
107 |
-
if not chunk:
|
108 |
-
return
|
109 |
-
|
110 |
-
if chunk:
|
111 |
-
if self.chunked:
|
112 |
-
chunk_len_pre = ("%x\r\n" % len(chunk)).encode("ascii")
|
113 |
-
chunk = chunk_len_pre + chunk + b"\r\n"
|
114 |
-
|
115 |
-
self._write(chunk)
|
116 |
-
|
117 |
-
if self.buffer_size > LIMIT and drain:
|
118 |
-
self.buffer_size = 0
|
119 |
-
await self.drain()
|
120 |
-
|
121 |
-
async def write_headers(
|
122 |
-
self, status_line: str, headers: "CIMultiDict[str]"
|
123 |
-
) -> None:
|
124 |
-
"""Write request/response status and headers."""
|
125 |
-
if self._on_headers_sent is not None:
|
126 |
-
await self._on_headers_sent(headers)
|
127 |
-
|
128 |
-
# status + headers
|
129 |
-
buf = _serialize_headers(status_line, headers)
|
130 |
-
self._write(buf)
|
131 |
-
|
132 |
-
async def write_eof(self, chunk: bytes = b"") -> None:
|
133 |
-
if self._eof:
|
134 |
-
return
|
135 |
-
|
136 |
-
if chunk and self._on_chunk_sent is not None:
|
137 |
-
await self._on_chunk_sent(chunk)
|
138 |
-
|
139 |
-
if self._compress:
|
140 |
-
if chunk:
|
141 |
-
chunk = self._compress.compress(chunk)
|
142 |
-
|
143 |
-
chunk = chunk + self._compress.flush()
|
144 |
-
if chunk and self.chunked:
|
145 |
-
chunk_len = ("%x\r\n" % len(chunk)).encode("ascii")
|
146 |
-
chunk = chunk_len + chunk + b"\r\n0\r\n\r\n"
|
147 |
-
else:
|
148 |
-
if self.chunked:
|
149 |
-
if chunk:
|
150 |
-
chunk_len = ("%x\r\n" % len(chunk)).encode("ascii")
|
151 |
-
chunk = chunk_len + chunk + b"\r\n0\r\n\r\n"
|
152 |
-
else:
|
153 |
-
chunk = b"0\r\n\r\n"
|
154 |
-
|
155 |
-
if chunk:
|
156 |
-
self._write(chunk)
|
157 |
-
|
158 |
-
await self.drain()
|
159 |
-
|
160 |
-
self._eof = True
|
161 |
-
|
162 |
-
async def drain(self) -> None:
|
163 |
-
"""Flush the write buffer.
|
164 |
-
|
165 |
-
The intended use is to write
|
166 |
-
|
167 |
-
await w.write(data)
|
168 |
-
await w.drain()
|
169 |
-
"""
|
170 |
-
if self._protocol.transport is not None:
|
171 |
-
await self._protocol._drain_helper()
|
172 |
-
|
173 |
-
|
174 |
-
def _safe_header(string: str) -> str:
|
175 |
-
if "\r" in string or "\n" in string:
|
176 |
-
raise ValueError(
|
177 |
-
"Newline or carriage return detected in headers. "
|
178 |
-
"Potential header injection attack."
|
179 |
-
)
|
180 |
-
return string
|
181 |
-
|
182 |
-
|
183 |
-
def _py_serialize_headers(status_line: str, headers: "CIMultiDict[str]") -> bytes:
|
184 |
-
headers_gen = (_safe_header(k) + ": " + _safe_header(v) for k, v in headers.items())
|
185 |
-
line = status_line + "\r\n" + "\r\n".join(headers_gen) + "\r\n\r\n"
|
186 |
-
return line.encode("utf-8")
|
187 |
-
|
188 |
-
|
189 |
-
_serialize_headers = _py_serialize_headers
|
190 |
-
|
191 |
-
try:
|
192 |
-
import aiohttp._http_writer as _http_writer # type: ignore[import]
|
193 |
-
|
194 |
-
_c_serialize_headers = _http_writer._serialize_headers
|
195 |
-
if not NO_EXTENSIONS:
|
196 |
-
_serialize_headers = _c_serialize_headers
|
197 |
-
except ImportError:
|
198 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/S__i_l_l.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
from fontTools.misc import sstruct
|
2 |
-
from fontTools.misc.fixedTools import floatToFixedToStr
|
3 |
-
from fontTools.misc.textTools import safeEval
|
4 |
-
from . import DefaultTable
|
5 |
-
from . import grUtils
|
6 |
-
import struct
|
7 |
-
|
8 |
-
Sill_hdr = """
|
9 |
-
>
|
10 |
-
version: 16.16F
|
11 |
-
"""
|
12 |
-
|
13 |
-
|
14 |
-
class table_S__i_l_l(DefaultTable.DefaultTable):
|
15 |
-
def __init__(self, tag=None):
|
16 |
-
DefaultTable.DefaultTable.__init__(self, tag)
|
17 |
-
self.langs = {}
|
18 |
-
|
19 |
-
def decompile(self, data, ttFont):
|
20 |
-
(_, data) = sstruct.unpack2(Sill_hdr, data, self)
|
21 |
-
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
22 |
-
(numLangs,) = struct.unpack(">H", data[:2])
|
23 |
-
data = data[8:]
|
24 |
-
maxsetting = 0
|
25 |
-
langinfo = []
|
26 |
-
for i in range(numLangs):
|
27 |
-
(langcode, numsettings, offset) = struct.unpack(
|
28 |
-
">4sHH", data[i * 8 : (i + 1) * 8]
|
29 |
-
)
|
30 |
-
offset = int(offset / 8) - (numLangs + 1)
|
31 |
-
langcode = langcode.replace(b"\000", b"")
|
32 |
-
langinfo.append((langcode.decode("utf-8"), numsettings, offset))
|
33 |
-
maxsetting = max(maxsetting, offset + numsettings)
|
34 |
-
data = data[numLangs * 8 :]
|
35 |
-
finfo = []
|
36 |
-
for i in range(maxsetting):
|
37 |
-
(fid, val, _) = struct.unpack(">LHH", data[i * 8 : (i + 1) * 8])
|
38 |
-
finfo.append((fid, val))
|
39 |
-
self.langs = {}
|
40 |
-
for c, n, o in langinfo:
|
41 |
-
self.langs[c] = []
|
42 |
-
for i in range(o, o + n):
|
43 |
-
self.langs[c].append(finfo[i])
|
44 |
-
|
45 |
-
def compile(self, ttFont):
|
46 |
-
ldat = b""
|
47 |
-
fdat = b""
|
48 |
-
offset = len(self.langs)
|
49 |
-
for c, inf in sorted(self.langs.items()):
|
50 |
-
ldat += struct.pack(">4sHH", c.encode("utf8"), len(inf), 8 * offset + 20)
|
51 |
-
for fid, val in inf:
|
52 |
-
fdat += struct.pack(">LHH", fid, val, 0)
|
53 |
-
offset += len(inf)
|
54 |
-
ldat += struct.pack(">LHH", 0x80808080, 0, 8 * offset + 20)
|
55 |
-
return (
|
56 |
-
sstruct.pack(Sill_hdr, self)
|
57 |
-
+ grUtils.bininfo(len(self.langs))
|
58 |
-
+ ldat
|
59 |
-
+ fdat
|
60 |
-
)
|
61 |
-
|
62 |
-
def toXML(self, writer, ttFont):
|
63 |
-
writer.simpletag("version", version=self.version)
|
64 |
-
writer.newline()
|
65 |
-
for c, inf in sorted(self.langs.items()):
|
66 |
-
writer.begintag("lang", name=c)
|
67 |
-
writer.newline()
|
68 |
-
for fid, val in inf:
|
69 |
-
writer.simpletag("feature", fid=grUtils.num2tag(fid), val=val)
|
70 |
-
writer.newline()
|
71 |
-
writer.endtag("lang")
|
72 |
-
writer.newline()
|
73 |
-
|
74 |
-
def fromXML(self, name, attrs, content, ttFont):
|
75 |
-
if name == "version":
|
76 |
-
self.version = float(safeEval(attrs["version"]))
|
77 |
-
elif name == "lang":
|
78 |
-
c = attrs["name"]
|
79 |
-
self.langs[c] = []
|
80 |
-
for element in content:
|
81 |
-
if not isinstance(element, tuple):
|
82 |
-
continue
|
83 |
-
tag, a, subcontent = element
|
84 |
-
if tag == "feature":
|
85 |
-
self.langs[c].append(
|
86 |
-
(grUtils.tag2num(a["fid"]), int(safeEval(a["val"])))
|
87 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9001a1ae.js
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import{S as M,e as V,s as H,J as R,K as k,p as _,M as v,n as w,A as m,N as y,O as T,U as Z,u as B,v as b,y as C,z as d,P as h,R as A,m as L,G as E,V as Y,Q as q,k as O,o as N,x as S,ai as W,Z as X,$ as x,B as ee,E as te,ae as le,q as ne,r as ie}from"./index-1d65707a.js";import{f as se,B as oe}from"./Button-f155035a.js";import{C as re,a as ce}from"./Copy-9f1657c4.js";import{E as ae}from"./Empty-eec13822.js";import{B as fe}from"./BlockLabel-66866176.js";import"./Blocks-c9e1499d.js";function ue(a){let e,t;return{c(){e=R("svg"),t=R("path"),k(t,"fill","currentColor"),k(t,"d","M5 3h2v2H5v5a2 2 0 0 1-2 2a2 2 0 0 1 2 2v5h2v2H5c-1.07-.27-2-.9-2-2v-4a2 2 0 0 0-2-2H0v-2h1a2 2 0 0 0 2-2V5a2 2 0 0 1 2-2m14 0a2 2 0 0 1 2 2v4a2 2 0 0 0 2 2h1v2h-1a2 2 0 0 0-2 2v4a2 2 0 0 1-2 2h-2v-2h2v-5a2 2 0 0 1 2-2a2 2 0 0 1-2-2V5h-2V3h2m-7 12a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1m-4 0a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1m8 0a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1Z"),k(e,"xmlns","http://www.w3.org/2000/svg"),k(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),k(e,"aria-hidden","true"),k(e,"role","img"),k(e,"class","iconify iconify--mdi"),k(e,"width","100%"),k(e,"height","100%"),k(e,"preserveAspectRatio","xMidYMid meet"),k(e,"viewBox","0 0 24 24")},m(l,i){_(l,e,i),v(e,t)},p:w,i:w,o:w,d(l){l&&m(e)}}}let F=class extends M{constructor(e){super(),V(this,e,null,ue,H,{})}};function $(a,e,t){const l=a.slice();return l[5]=e[t],l[7]=t,l}function z(a,e,t){const l=a.slice();return l[5]=e[t],l[7]=t,l}function _e(a){let e,t;return{c(){e=y("div"),t=h(a[1]),k(e,"class","json-item svelte-1kspdo")},m(l,i){_(l,e,i),v(e,t)},p(l,i){i&2&&A(t,l[1])},i:w,o:w,d(l){l&&m(e)}}}function me(a){let e,t;return{c(){e=y("div"),t=h(a[1]),k(e,"class","json-item number svelte-1kspdo")},m(l,i){_(l,e,i),v(e,t)},p(l,i){i&2&&A(t,l[1])},i:w,o:w,d(l){l&&m(e)}}}function de(a){let e,t=a[1].toLocaleString()+"",l;return{c(){e=y("div"),l=h(t),k(e,"class","json-item bool svelte-1kspdo")},m(i,r){_(i,e,r),v(e,l)},p(i,r){r&2&&t!==(t=i[1].toLocaleString()+"")&&A(l,t)},i:w,o:w,d(i){i&&m(e)}}}function be(a){let e,t,l,i;return{c(){e=y("div"),t=h('"'),l=h(a[1]),i=h('"'),k(e,"class","json-item string svelte-1kspdo")},m(r,o){_(r,e,o),v(e,t),v(e,l),v(e,i)},p(r,o){o&2&&A(l,r[1])},i:w,o:w,d(r){r&&m(e)}}}function pe(a){let e;return{c(){e=y("div"),e.textContent="null",k(e,"class","json-item null svelte-1kspdo")},m(t,l){_(t,e,l)},p:w,i:w,o:w,d(t){t&&m(e)}}}function ke(a){let e,t,l,i;const r=[ge,ve],o=[];function f(n,s){return n[0]?0:1}return e=f(a),t=o[e]=r[e](a),{c(){t.c(),l=L()},m(n,s){o[e].m(n,s),_(n,l,s),i=!0},p(n,s){let c=e;e=f(n),e===c?o[e].p(n,s):(B(),b(o[c],1,1,()=>{o[c]=null}),C(),t=o[e],t?t.p(n,s):(t=o[e]=r[e](n),t.c()),d(t,1),t.m(l.parentNode,l))},i(n){i||(d(t),i=!0)},o(n){b(t),i=!1},d(n){n&&m(l),o[e].d(n)}}}function he(a){let e,t,l,i;const r=[ye,we],o=[];function f(n,s){return n[0]?0:1}return e=f(a),t=o[e]=r[e](a),{c(){t.c(),l=L()},m(n,s){o[e].m(n,s),_(n,l,s),i=!0},p(n,s){let c=e;e=f(n),e===c?o[e].p(n,s):(B(),b(o[c],1,1,()=>{o[c]=null}),C(),t=o[e],t?t.p(n,s):(t=o[e]=r[e](n),t.c()),d(t,1),t.m(l.parentNode,l))},i(n){i||(d(t),i=!0)},o(n){b(t),i=!1},d(n){n&&m(l),o[e].d(n)}}}function ve(a){let e,t,l,i,r=E(Object.entries(a[1])),o=[];for(let n=0;n<r.length;n+=1)o[n]=I($(a,r,n));const f=n=>b(o[n],1,1,()=>{o[n]=null});return{c(){e=h(`{
|
2 |
-
`),t=y("div");for(let n=0;n<o.length;n+=1)o[n].c();l=h(`
|
3 |
-
}`),k(t,"class","children svelte-1kspdo")},m(n,s){_(n,e,s),_(n,t,s);for(let c=0;c<o.length;c+=1)o[c]&&o[c].m(t,null);_(n,l,s),i=!0},p(n,s){if(s&6){r=E(Object.entries(n[1]));let c;for(c=0;c<r.length;c+=1){const u=$(n,r,c);o[c]?(o[c].p(u,s),d(o[c],1)):(o[c]=I(u),o[c].c(),d(o[c],1),o[c].m(t,null))}for(B(),c=r.length;c<o.length;c+=1)f(c);C()}},i(n){if(!i){for(let s=0;s<r.length;s+=1)d(o[s]);i=!0}},o(n){o=o.filter(Boolean);for(let s=0;s<o.length;s+=1)b(o[s]);i=!1},d(n){n&&(m(e),m(t),m(l)),Y(o,n)}}}function ge(a){let e,t,l=Object.keys(a[1]).length+"",i,r,o,f;return{c(){e=y("button"),t=h("{+"),i=h(l),r=h(" items}")},m(n,s){_(n,e,s),v(e,t),v(e,i),v(e,r),o||(f=q(e,"click",a[4]),o=!0)},p(n,s){s&2&&l!==(l=Object.keys(n[1]).length+"")&&A(i,l)},i:w,o:w,d(n){n&&m(e),o=!1,f()}}}function G(a){let e;return{c(){e=h(",")},m(t,l){_(t,e,l)},d(t){t&&m(e)}}}function I(a){let e,t=a[5][0]+"",l,i,r,o=a[7]!==Object.keys(a[1]).length-1,f,n;r=new D({props:{value:a[5][1],depth:a[2]+1,key:a[7]}});let s=o&&G();return{c(){e=y("div"),l=h(t),i=h(": "),O(r.$$.fragment),s&&s.c(),f=T()},m(c,u){_(c,e,u),v(e,l),v(e,i),N(r,e,null),s&&s.m(e,null),v(e,f),n=!0},p(c,u){(!n||u&2)&&t!==(t=c[5][0]+"")&&A(l,t);const j={};u&2&&(j.value=c[5][1]),u&4&&(j.depth=c[2]+1),r.$set(j),u&2&&(o=c[7]!==Object.keys(c[1]).length-1),o?s||(s=G(),s.c(),s.m(e,f)):s&&(s.d(1),s=null)},i(c){n||(d(r.$$.fragment,c),n=!0)},o(c){b(r.$$.fragment,c),n=!1},d(c){c&&m(e),S(r),s&&s.d()}}}function we(a){let e,t,l,i,r=E(a[1]),o=[];for(let n=0;n<r.length;n+=1)o[n]=Q(z(a,r,n));const f=n=>b(o[n],1,1,()=>{o[n]=null});return{c(){e=h(`[
|
4 |
-
`),t=y("div");for(let n=0;n<o.length;n+=1)o[n].c();l=h(`
|
5 |
-
]`),k(t,"class","children svelte-1kspdo")},m(n,s){_(n,e,s),_(n,t,s);for(let c=0;c<o.length;c+=1)o[c]&&o[c].m(t,null);_(n,l,s),i=!0},p(n,s){if(s&6){r=E(n[1]);let c;for(c=0;c<r.length;c+=1){const u=z(n,r,c);o[c]?(o[c].p(u,s),d(o[c],1)):(o[c]=Q(u),o[c].c(),d(o[c],1),o[c].m(t,null))}for(B(),c=r.length;c<o.length;c+=1)f(c);C()}},i(n){if(!i){for(let s=0;s<r.length;s+=1)d(o[s]);i=!0}},o(n){o=o.filter(Boolean);for(let s=0;s<o.length;s+=1)b(o[s]);i=!1},d(n){n&&(m(e),m(t),m(l)),Y(o,n)}}}function ye(a){let e,t,l,i=a[1].length+"",r,o,f,n;return{c(){e=y("button"),t=y("span"),l=h("expand "),r=h(i),o=h(" children"),k(t,"class","expand-array svelte-1kspdo")},m(s,c){_(s,e,c),v(e,t),v(t,l),v(t,r),v(t,o),f||(n=q(e,"click",a[3]),f=!0)},p(s,c){c&2&&i!==(i=s[1].length+"")&&A(r,i)},i:w,o:w,d(s){s&&m(e),f=!1,n()}}}function K(a){let e;return{c(){e=h(",")},m(t,l){_(t,e,l)},d(t){t&&m(e)}}}function Q(a){let e,t,l,i,r,o,f;i=new D({props:{value:a[5],depth:a[2]+1}});let n=a[7]!==a[1].length-1&&K();return{c(){e=y("div"),t=h(a[7]),l=h(": "),O(i.$$.fragment),r=T(),n&&n.c(),o=T()},m(s,c){_(s,e,c),v(e,t),v(e,l),N(i,e,null),v(e,r),n&&n.m(e,null),v(e,o),f=!0},p(s,c){const u={};c&2&&(u.value=s[5]),c&4&&(u.depth=s[2]+1),i.$set(u),s[7]!==s[1].length-1?n||(n=K(),n.c(),n.m(e,o)):n&&(n.d(1),n=null)},i(s){f||(d(i.$$.fragment,s),f=!0)},o(s){b(i.$$.fragment,s),f=!1},d(s){s&&m(e),S(i),n&&n.d()}}}function je(a){let e,t,l,i,r,o;const f=[he,ke,pe,be,de,me,_e],n=[];function s(c,u){return c[1]instanceof Array?0:c[1]instanceof Object?1:c[1]===null?2:typeof c[1]=="string"?3:typeof c[1]=="boolean"?4:typeof c[1]=="number"?5:6}return i=s(a),r=n[i]=f[i](a),{c(){e=y("span"),t=T(),l=y("div"),r.c(),k(e,"class","spacer svelte-1kspdo"),Z(e,"mt-10",a[2]===0),k(l,"class","json-node svelte-1kspdo")},m(c,u){_(c,e,u),_(c,t,u),_(c,l,u),n[i].m(l,null),o=!0},p(c,[u]){(!o||u&4)&&Z(e,"mt-10",c[2]===0);let j=i;i=s(c),i===j?n[i].p(c,u):(B(),b(n[j],1,1,()=>{n[j]=null}),C(),r=n[i],r?r.p(c,u):(r=n[i]=f[i](c),r.c()),d(r,1),r.m(l,null))},i(c){o||(d(r),o=!0)},o(c){b(r),o=!1},d(c){c&&(m(e),m(t),m(l)),n[i].d()}}}function Oe(a,e,t){let{value:l}=e,{depth:i}=e,{collapsed:r=i>4}=e;const o=()=>{t(0,r=!1)},f=()=>{t(0,r=!1)};return a.$$set=n=>{"value"in n&&t(1,l=n.value),"depth"in n&&t(2,i=n.depth),"collapsed"in n&&t(0,r=n.collapsed)},[r,l,i,o,f]}class D extends M{constructor(e){super(),V(this,e,Oe,je,H,{value:1,depth:2,collapsed:0})}}function Ne(a){let e,t;return e=new ae({props:{$$slots:{default:[Je]},$$scope:{ctx:a}}}),{c(){O(e.$$.fragment)},m(l,i){N(e,l,i),t=!0},p(l,i){const r={};i&32&&(r.$$scope={dirty:i,ctx:l}),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){S(e,l)}}}function Se(a){let e,t,l,i,r,o,f,n,s;const c=[Ce,Be],u=[];function j(g,J){return g[1]?0:1}return t=j(a),l=u[t]=c[t](a),o=new D({props:{value:a[0],depth:0}}),{c(){e=y("button"),l.c(),i=T(),r=y("div"),O(o.$$.fragment),k(e,"class","svelte-1trjy9a"),k(r,"class","json-holder svelte-1trjy9a")},m(g,J){_(g,e,J),u[t].m(e,null),_(g,i,J),_(g,r,J),N(o,r,null),f=!0,n||(s=q(e,"click",a[2]),n=!0)},p(g,J){let p=t;t=j(g),t!==p&&(B(),b(u[p],1,1,()=>{u[p]=null}),C(),l=u[t],l||(l=u[t]=c[t](g),l.c()),d(l,1),l.m(e,null));const P={};J&1&&(P.value=g[0]),o.$set(P)},i(g){f||(d(l),d(o.$$.fragment,g),f=!0)},o(g){b(l),b(o.$$.fragment,g),f=!1},d(g){g&&(m(e),m(i),m(r)),u[t].d(),S(o),n=!1,s()}}}function Je(a){let e,t;return e=new F({}),{c(){O(e.$$.fragment)},m(l,i){N(e,l,i),t=!0},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){S(e,l)}}}function Be(a){let e,t,l;return t=new re({}),{c(){e=y("span"),O(t.$$.fragment),k(e,"class","copy-text")},m(i,r){_(i,e,r),N(t,e,null),l=!0},i(i){l||(d(t.$$.fragment,i),l=!0)},o(i){b(t.$$.fragment,i),l=!1},d(i){i&&m(e),S(t)}}}function Ce(a){let e,t,l,i;return t=new ce({}),{c(){e=y("span"),O(t.$$.fragment)},m(r,o){_(r,e,o),N(t,e,null),i=!0},i(r){i||(d(t.$$.fragment,r),r&&(l||X(()=>{l=x(e,se,{duration:300}),l.start()})),i=!0)},o(r){b(t.$$.fragment,r),i=!1},d(r){r&&m(e),S(t)}}}function Te(a){let e,t,l,i,r;const o=[Se,Ne],f=[];function n(s,c){return c&1&&(e=null),e==null&&(e=!!(s[0]&&s[0]!=='""'&&!Ae(s[0]))),e?0:1}return t=n(a,-1),l=f[t]=o[t](a),{c(){l.c(),i=L()},m(s,c){f[t].m(s,c),_(s,i,c),r=!0},p(s,[c]){let u=t;t=n(s,c),t===u?f[t].p(s,c):(B(),b(f[u],1,1,()=>{f[u]=null}),C(),l=f[t],l?l.p(s,c):(l=f[t]=o[t](s),l.c()),d(l,1),l.m(i.parentNode,i))},i(s){r||(d(l),r=!0)},o(s){b(l),r=!1},d(s){s&&m(i),f[t].d(s)}}}function Ae(a){return a&&Object.keys(a).length===0&&Object.getPrototypeOf(a)===Object.prototype}function Ee(a,e,t){let{value:l={}}=e,i=!1,r;function o(){t(1,i=!0),r&&clearTimeout(r),r=setTimeout(()=>{t(1,i=!1)},1e3)}async function f(){"clipboard"in navigator&&(await navigator.clipboard.writeText(JSON.stringify(l,null,2)),o())}return W(()=>{r&&clearTimeout(r)}),a.$$set=n=>{"value"in n&&t(0,l=n.value)},[l,i,f]}class Me extends M{constructor(e){super(),V(this,e,Ee,Te,H,{value:0})}}function U(a){let e,t;return e=new fe({props:{Icon:F,show_label:a[6],label:a[5],float:!1,disable:a[7]===!1}}),{c(){O(e.$$.fragment)},m(l,i){N(e,l,i),t=!0},p(l,i){const r={};i&64&&(r.show_label=l[6]),i&32&&(r.label=l[5]),i&128&&(r.disable=l[7]===!1),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){S(e,l)}}}function Ve(a){let e,t,l,i,r,o=a[5]&&U(a);const f=[a[4]];let n={};for(let s=0;s<f.length;s+=1)n=te(n,f[s]);return t=new le({props:n}),i=new Me({props:{value:a[3]}}),{c(){o&&o.c(),e=T(),O(t.$$.fragment),l=T(),O(i.$$.fragment)},m(s,c){o&&o.m(s,c),_(s,e,c),N(t,s,c),_(s,l,c),N(i,s,c),r=!0},p(s,c){s[5]?o?(o.p(s,c),c&32&&d(o,1)):(o=U(s),o.c(),d(o,1),o.m(e.parentNode,e)):o&&(B(),b(o,1,1,()=>{o=null}),C());const u=c&16?ne(f,[ie(s[4])]):{};t.$set(u);const j={};c&8&&(j.value=s[3]),i.$set(j)},i(s){r||(d(o),d(t.$$.fragment,s),d(i.$$.fragment,s),r=!0)},o(s){b(o),b(t.$$.fragment,s),b(i.$$.fragment,s),r=!1},d(s){s&&(m(e),m(l)),o&&o.d(s),S(t,s),S(i,s)}}}function He(a){let e,t;return e=new oe({props:{visible:a[2],test_id:"json",elem_id:a[0],elem_classes:a[1],container:a[7],scale:a[8],min_width:a[9],padding:!1,$$slots:{default:[Ve]},$$scope:{ctx:a}}}),{c(){O(e.$$.fragment)},m(l,i){N(e,l,i),t=!0},p(l,[i]){const r={};i&4&&(r.visible=l[2]),i&1&&(r.elem_id=l[0]),i&2&&(r.elem_classes=l[1]),i&128&&(r.container=l[7]),i&256&&(r.scale=l[8]),i&512&&(r.min_width=l[9]),i&4344&&(r.$$scope={dirty:i,ctx:l}),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){S(e,l)}}}function Le(a,e,t){let{elem_id:l=""}=e,{elem_classes:i=[]}=e,{visible:r=!0}=e,{value:o}=e,f,{loading_status:n}=e,{label:s}=e,{show_label:c}=e,{container:u=!0}=e,{scale:j=null}=e,{min_width:g=void 0}=e;const J=ee();return a.$$set=p=>{"elem_id"in p&&t(0,l=p.elem_id),"elem_classes"in p&&t(1,i=p.elem_classes),"visible"in p&&t(2,r=p.visible),"value"in p&&t(3,o=p.value),"loading_status"in p&&t(4,n=p.loading_status),"label"in p&&t(5,s=p.label),"show_label"in p&&t(6,c=p.show_label),"container"in p&&t(7,u=p.container),"scale"in p&&t(8,j=p.scale),"min_width"in p&&t(9,g=p.min_width)},a.$$.update=()=>{a.$$.dirty&1032&&o!==f&&(t(10,f=o),J("change"))},[l,i,r,o,n,s,c,u,j,g,f]}class qe extends M{constructor(e){super(),V(this,e,Le,He,H,{elem_id:0,elem_classes:1,visible:2,value:3,loading_status:4,label:5,show_label:6,container:7,scale:8,min_width:9})}}const Ie=qe,Ke=["static"],Qe=a=>({type:{payload:"Object | Array"},description:{payload:"JSON object"}});export{Ie as Component,Qe as document,Ke as modes};
|
6 |
-
//# sourceMappingURL=index-9001a1ae.js.map
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_connection.py
DELETED
@@ -1,633 +0,0 @@
|
|
1 |
-
# This contains the main Connection class. Everything in h11 revolves around
|
2 |
-
# this.
|
3 |
-
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union
|
4 |
-
|
5 |
-
from ._events import (
|
6 |
-
ConnectionClosed,
|
7 |
-
Data,
|
8 |
-
EndOfMessage,
|
9 |
-
Event,
|
10 |
-
InformationalResponse,
|
11 |
-
Request,
|
12 |
-
Response,
|
13 |
-
)
|
14 |
-
from ._headers import get_comma_header, has_expect_100_continue, set_comma_header
|
15 |
-
from ._readers import READERS, ReadersType
|
16 |
-
from ._receivebuffer import ReceiveBuffer
|
17 |
-
from ._state import (
|
18 |
-
_SWITCH_CONNECT,
|
19 |
-
_SWITCH_UPGRADE,
|
20 |
-
CLIENT,
|
21 |
-
ConnectionState,
|
22 |
-
DONE,
|
23 |
-
ERROR,
|
24 |
-
MIGHT_SWITCH_PROTOCOL,
|
25 |
-
SEND_BODY,
|
26 |
-
SERVER,
|
27 |
-
SWITCHED_PROTOCOL,
|
28 |
-
)
|
29 |
-
from ._util import ( # Import the internal things we need
|
30 |
-
LocalProtocolError,
|
31 |
-
RemoteProtocolError,
|
32 |
-
Sentinel,
|
33 |
-
)
|
34 |
-
from ._writers import WRITERS, WritersType
|
35 |
-
|
36 |
-
# Everything in __all__ gets re-exported as part of the h11 public API.
|
37 |
-
__all__ = ["Connection", "NEED_DATA", "PAUSED"]
|
38 |
-
|
39 |
-
|
40 |
-
class NEED_DATA(Sentinel, metaclass=Sentinel):
|
41 |
-
pass
|
42 |
-
|
43 |
-
|
44 |
-
class PAUSED(Sentinel, metaclass=Sentinel):
|
45 |
-
pass
|
46 |
-
|
47 |
-
|
48 |
-
# If we ever have this much buffered without it making a complete parseable
|
49 |
-
# event, we error out. The only time we really buffer is when reading the
|
50 |
-
# request/response line + headers together, so this is effectively the limit on
|
51 |
-
# the size of that.
|
52 |
-
#
|
53 |
-
# Some precedents for defaults:
|
54 |
-
# - node.js: 80 * 1024
|
55 |
-
# - tomcat: 8 * 1024
|
56 |
-
# - IIS: 16 * 1024
|
57 |
-
# - Apache: <8 KiB per line>
|
58 |
-
DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024
|
59 |
-
|
60 |
-
# RFC 7230's rules for connection lifecycles:
|
61 |
-
# - If either side says they want to close the connection, then the connection
|
62 |
-
# must close.
|
63 |
-
# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close
|
64 |
-
# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive
|
65 |
-
# (and even this is a mess -- e.g. if you're implementing a proxy then
|
66 |
-
# sending Connection: keep-alive is forbidden).
|
67 |
-
#
|
68 |
-
# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So
|
69 |
-
# our rule is:
|
70 |
-
# - If someone says Connection: close, we will close
|
71 |
-
# - If someone uses HTTP/1.0, we will close.
|
72 |
-
def _keep_alive(event: Union[Request, Response]) -> bool:
|
73 |
-
connection = get_comma_header(event.headers, b"connection")
|
74 |
-
if b"close" in connection:
|
75 |
-
return False
|
76 |
-
if getattr(event, "http_version", b"1.1") < b"1.1":
|
77 |
-
return False
|
78 |
-
return True
|
79 |
-
|
80 |
-
|
81 |
-
def _body_framing(
|
82 |
-
request_method: bytes, event: Union[Request, Response]
|
83 |
-
) -> Tuple[str, Union[Tuple[()], Tuple[int]]]:
|
84 |
-
# Called when we enter SEND_BODY to figure out framing information for
|
85 |
-
# this body.
|
86 |
-
#
|
87 |
-
# These are the only two events that can trigger a SEND_BODY state:
|
88 |
-
assert type(event) in (Request, Response)
|
89 |
-
# Returns one of:
|
90 |
-
#
|
91 |
-
# ("content-length", count)
|
92 |
-
# ("chunked", ())
|
93 |
-
# ("http/1.0", ())
|
94 |
-
#
|
95 |
-
# which are (lookup key, *args) for constructing body reader/writer
|
96 |
-
# objects.
|
97 |
-
#
|
98 |
-
# Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3
|
99 |
-
#
|
100 |
-
# Step 1: some responses always have an empty body, regardless of what the
|
101 |
-
# headers say.
|
102 |
-
if type(event) is Response:
|
103 |
-
if (
|
104 |
-
event.status_code in (204, 304)
|
105 |
-
or request_method == b"HEAD"
|
106 |
-
or (request_method == b"CONNECT" and 200 <= event.status_code < 300)
|
107 |
-
):
|
108 |
-
return ("content-length", (0,))
|
109 |
-
# Section 3.3.3 also lists another case -- responses with status_code
|
110 |
-
# < 200. For us these are InformationalResponses, not Responses, so
|
111 |
-
# they can't get into this function in the first place.
|
112 |
-
assert event.status_code >= 200
|
113 |
-
|
114 |
-
# Step 2: check for Transfer-Encoding (T-E beats C-L):
|
115 |
-
transfer_encodings = get_comma_header(event.headers, b"transfer-encoding")
|
116 |
-
if transfer_encodings:
|
117 |
-
assert transfer_encodings == [b"chunked"]
|
118 |
-
return ("chunked", ())
|
119 |
-
|
120 |
-
# Step 3: check for Content-Length
|
121 |
-
content_lengths = get_comma_header(event.headers, b"content-length")
|
122 |
-
if content_lengths:
|
123 |
-
return ("content-length", (int(content_lengths[0]),))
|
124 |
-
|
125 |
-
# Step 4: no applicable headers; fallback/default depends on type
|
126 |
-
if type(event) is Request:
|
127 |
-
return ("content-length", (0,))
|
128 |
-
else:
|
129 |
-
return ("http/1.0", ())
|
130 |
-
|
131 |
-
|
132 |
-
################################################################
|
133 |
-
#
|
134 |
-
# The main Connection class
|
135 |
-
#
|
136 |
-
################################################################
|
137 |
-
|
138 |
-
|
139 |
-
class Connection:
|
140 |
-
"""An object encapsulating the state of an HTTP connection.
|
141 |
-
|
142 |
-
Args:
|
143 |
-
our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If
|
144 |
-
you're implementing a server, pass :data:`h11.SERVER`.
|
145 |
-
|
146 |
-
max_incomplete_event_size (int):
|
147 |
-
The maximum number of bytes we're willing to buffer of an
|
148 |
-
incomplete event. In practice this mostly sets a limit on the
|
149 |
-
maximum size of the request/response line + headers. If this is
|
150 |
-
exceeded, then :meth:`next_event` will raise
|
151 |
-
:exc:`RemoteProtocolError`.
|
152 |
-
|
153 |
-
"""
|
154 |
-
|
155 |
-
def __init__(
|
156 |
-
self,
|
157 |
-
our_role: Type[Sentinel],
|
158 |
-
max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,
|
159 |
-
) -> None:
|
160 |
-
self._max_incomplete_event_size = max_incomplete_event_size
|
161 |
-
# State and role tracking
|
162 |
-
if our_role not in (CLIENT, SERVER):
|
163 |
-
raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role))
|
164 |
-
self.our_role = our_role
|
165 |
-
self.their_role: Type[Sentinel]
|
166 |
-
if our_role is CLIENT:
|
167 |
-
self.their_role = SERVER
|
168 |
-
else:
|
169 |
-
self.their_role = CLIENT
|
170 |
-
self._cstate = ConnectionState()
|
171 |
-
|
172 |
-
# Callables for converting data->events or vice-versa given the
|
173 |
-
# current state
|
174 |
-
self._writer = self._get_io_object(self.our_role, None, WRITERS)
|
175 |
-
self._reader = self._get_io_object(self.their_role, None, READERS)
|
176 |
-
|
177 |
-
# Holds any unprocessed received data
|
178 |
-
self._receive_buffer = ReceiveBuffer()
|
179 |
-
# If this is true, then it indicates that the incoming connection was
|
180 |
-
# closed *after* the end of whatever's in self._receive_buffer:
|
181 |
-
self._receive_buffer_closed = False
|
182 |
-
|
183 |
-
# Extra bits of state that don't fit into the state machine.
|
184 |
-
#
|
185 |
-
# These two are only used to interpret framing headers for figuring
|
186 |
-
# out how to read/write response bodies. their_http_version is also
|
187 |
-
# made available as a convenient public API.
|
188 |
-
self.their_http_version: Optional[bytes] = None
|
189 |
-
self._request_method: Optional[bytes] = None
|
190 |
-
# This is pure flow-control and doesn't at all affect the set of legal
|
191 |
-
# transitions, so no need to bother ConnectionState with it:
|
192 |
-
self.client_is_waiting_for_100_continue = False
|
193 |
-
|
194 |
-
@property
|
195 |
-
def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]:
|
196 |
-
"""A dictionary like::
|
197 |
-
|
198 |
-
{CLIENT: <client state>, SERVER: <server state>}
|
199 |
-
|
200 |
-
See :ref:`state-machine` for details.
|
201 |
-
|
202 |
-
"""
|
203 |
-
return dict(self._cstate.states)
|
204 |
-
|
205 |
-
@property
|
206 |
-
def our_state(self) -> Type[Sentinel]:
|
207 |
-
"""The current state of whichever role we are playing. See
|
208 |
-
:ref:`state-machine` for details.
|
209 |
-
"""
|
210 |
-
return self._cstate.states[self.our_role]
|
211 |
-
|
212 |
-
@property
|
213 |
-
def their_state(self) -> Type[Sentinel]:
|
214 |
-
"""The current state of whichever role we are NOT playing. See
|
215 |
-
:ref:`state-machine` for details.
|
216 |
-
"""
|
217 |
-
return self._cstate.states[self.their_role]
|
218 |
-
|
219 |
-
@property
|
220 |
-
def they_are_waiting_for_100_continue(self) -> bool:
|
221 |
-
return self.their_role is CLIENT and self.client_is_waiting_for_100_continue
|
222 |
-
|
223 |
-
def start_next_cycle(self) -> None:
|
224 |
-
"""Attempt to reset our connection state for a new request/response
|
225 |
-
cycle.
|
226 |
-
|
227 |
-
If both client and server are in :data:`DONE` state, then resets them
|
228 |
-
both to :data:`IDLE` state in preparation for a new request/response
|
229 |
-
cycle on this same connection. Otherwise, raises a
|
230 |
-
:exc:`LocalProtocolError`.
|
231 |
-
|
232 |
-
See :ref:`keepalive-and-pipelining`.
|
233 |
-
|
234 |
-
"""
|
235 |
-
old_states = dict(self._cstate.states)
|
236 |
-
self._cstate.start_next_cycle()
|
237 |
-
self._request_method = None
|
238 |
-
# self.their_http_version gets left alone, since it presumably lasts
|
239 |
-
# beyond a single request/response cycle
|
240 |
-
assert not self.client_is_waiting_for_100_continue
|
241 |
-
self._respond_to_state_changes(old_states)
|
242 |
-
|
243 |
-
def _process_error(self, role: Type[Sentinel]) -> None:
|
244 |
-
old_states = dict(self._cstate.states)
|
245 |
-
self._cstate.process_error(role)
|
246 |
-
self._respond_to_state_changes(old_states)
|
247 |
-
|
248 |
-
def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]:
|
249 |
-
if type(event) is InformationalResponse and event.status_code == 101:
|
250 |
-
return _SWITCH_UPGRADE
|
251 |
-
if type(event) is Response:
|
252 |
-
if (
|
253 |
-
_SWITCH_CONNECT in self._cstate.pending_switch_proposals
|
254 |
-
and 200 <= event.status_code < 300
|
255 |
-
):
|
256 |
-
return _SWITCH_CONNECT
|
257 |
-
return None
|
258 |
-
|
259 |
-
# All events go through here
|
260 |
-
def _process_event(self, role: Type[Sentinel], event: Event) -> None:
|
261 |
-
# First, pass the event through the state machine to make sure it
|
262 |
-
# succeeds.
|
263 |
-
old_states = dict(self._cstate.states)
|
264 |
-
if role is CLIENT and type(event) is Request:
|
265 |
-
if event.method == b"CONNECT":
|
266 |
-
self._cstate.process_client_switch_proposal(_SWITCH_CONNECT)
|
267 |
-
if get_comma_header(event.headers, b"upgrade"):
|
268 |
-
self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE)
|
269 |
-
server_switch_event = None
|
270 |
-
if role is SERVER:
|
271 |
-
server_switch_event = self._server_switch_event(event)
|
272 |
-
self._cstate.process_event(role, type(event), server_switch_event)
|
273 |
-
|
274 |
-
# Then perform the updates triggered by it.
|
275 |
-
|
276 |
-
if type(event) is Request:
|
277 |
-
self._request_method = event.method
|
278 |
-
|
279 |
-
if role is self.their_role and type(event) in (
|
280 |
-
Request,
|
281 |
-
Response,
|
282 |
-
InformationalResponse,
|
283 |
-
):
|
284 |
-
event = cast(Union[Request, Response, InformationalResponse], event)
|
285 |
-
self.their_http_version = event.http_version
|
286 |
-
|
287 |
-
# Keep alive handling
|
288 |
-
#
|
289 |
-
# RFC 7230 doesn't really say what one should do if Connection: close
|
290 |
-
# shows up on a 1xx InformationalResponse. I think the idea is that
|
291 |
-
# this is not supposed to happen. In any case, if it does happen, we
|
292 |
-
# ignore it.
|
293 |
-
if type(event) in (Request, Response) and not _keep_alive(
|
294 |
-
cast(Union[Request, Response], event)
|
295 |
-
):
|
296 |
-
self._cstate.process_keep_alive_disabled()
|
297 |
-
|
298 |
-
# 100-continue
|
299 |
-
if type(event) is Request and has_expect_100_continue(event):
|
300 |
-
self.client_is_waiting_for_100_continue = True
|
301 |
-
if type(event) in (InformationalResponse, Response):
|
302 |
-
self.client_is_waiting_for_100_continue = False
|
303 |
-
if role is CLIENT and type(event) in (Data, EndOfMessage):
|
304 |
-
self.client_is_waiting_for_100_continue = False
|
305 |
-
|
306 |
-
self._respond_to_state_changes(old_states, event)
|
307 |
-
|
308 |
-
def _get_io_object(
|
309 |
-
self,
|
310 |
-
role: Type[Sentinel],
|
311 |
-
event: Optional[Event],
|
312 |
-
io_dict: Union[ReadersType, WritersType],
|
313 |
-
) -> Optional[Callable[..., Any]]:
|
314 |
-
# event may be None; it's only used when entering SEND_BODY
|
315 |
-
state = self._cstate.states[role]
|
316 |
-
if state is SEND_BODY:
|
317 |
-
# Special case: the io_dict has a dict of reader/writer factories
|
318 |
-
# that depend on the request/response framing.
|
319 |
-
framing_type, args = _body_framing(
|
320 |
-
cast(bytes, self._request_method), cast(Union[Request, Response], event)
|
321 |
-
)
|
322 |
-
return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index]
|
323 |
-
else:
|
324 |
-
# General case: the io_dict just has the appropriate reader/writer
|
325 |
-
# for this state
|
326 |
-
return io_dict.get((role, state)) # type: ignore[return-value]
|
327 |
-
|
328 |
-
# This must be called after any action that might have caused
|
329 |
-
# self._cstate.states to change.
|
330 |
-
def _respond_to_state_changes(
|
331 |
-
self,
|
332 |
-
old_states: Dict[Type[Sentinel], Type[Sentinel]],
|
333 |
-
event: Optional[Event] = None,
|
334 |
-
) -> None:
|
335 |
-
# Update reader/writer
|
336 |
-
if self.our_state != old_states[self.our_role]:
|
337 |
-
self._writer = self._get_io_object(self.our_role, event, WRITERS)
|
338 |
-
if self.their_state != old_states[self.their_role]:
|
339 |
-
self._reader = self._get_io_object(self.their_role, event, READERS)
|
340 |
-
|
341 |
-
@property
|
342 |
-
def trailing_data(self) -> Tuple[bytes, bool]:
|
343 |
-
"""Data that has been received, but not yet processed, represented as
|
344 |
-
a tuple with two elements, where the first is a byte-string containing
|
345 |
-
the unprocessed data itself, and the second is a bool that is True if
|
346 |
-
the receive connection was closed.
|
347 |
-
|
348 |
-
See :ref:`switching-protocols` for discussion of why you'd want this.
|
349 |
-
"""
|
350 |
-
return (bytes(self._receive_buffer), self._receive_buffer_closed)
|
351 |
-
|
352 |
-
def receive_data(self, data: bytes) -> None:
|
353 |
-
"""Add data to our internal receive buffer.
|
354 |
-
|
355 |
-
This does not actually do any processing on the data, just stores
|
356 |
-
it. To trigger processing, you have to call :meth:`next_event`.
|
357 |
-
|
358 |
-
Args:
|
359 |
-
data (:term:`bytes-like object`):
|
360 |
-
The new data that was just received.
|
361 |
-
|
362 |
-
Special case: If *data* is an empty byte-string like ``b""``,
|
363 |
-
then this indicates that the remote side has closed the
|
364 |
-
connection (end of file). Normally this is convenient, because
|
365 |
-
standard Python APIs like :meth:`file.read` or
|
366 |
-
:meth:`socket.recv` use ``b""`` to indicate end-of-file, while
|
367 |
-
other failures to read are indicated using other mechanisms
|
368 |
-
like raising :exc:`TimeoutError`. When using such an API you
|
369 |
-
can just blindly pass through whatever you get from ``read``
|
370 |
-
to :meth:`receive_data`, and everything will work.
|
371 |
-
|
372 |
-
But, if you have an API where reading an empty string is a
|
373 |
-
valid non-EOF condition, then you need to be aware of this and
|
374 |
-
make sure to check for such strings and avoid passing them to
|
375 |
-
:meth:`receive_data`.
|
376 |
-
|
377 |
-
Returns:
|
378 |
-
Nothing, but after calling this you should call :meth:`next_event`
|
379 |
-
to parse the newly received data.
|
380 |
-
|
381 |
-
Raises:
|
382 |
-
RuntimeError:
|
383 |
-
Raised if you pass an empty *data*, indicating EOF, and then
|
384 |
-
pass a non-empty *data*, indicating more data that somehow
|
385 |
-
arrived after the EOF.
|
386 |
-
|
387 |
-
(Calling ``receive_data(b"")`` multiple times is fine,
|
388 |
-
and equivalent to calling it once.)
|
389 |
-
|
390 |
-
"""
|
391 |
-
if data:
|
392 |
-
if self._receive_buffer_closed:
|
393 |
-
raise RuntimeError("received close, then received more data?")
|
394 |
-
self._receive_buffer += data
|
395 |
-
else:
|
396 |
-
self._receive_buffer_closed = True
|
397 |
-
|
398 |
-
def _extract_next_receive_event(
|
399 |
-
self,
|
400 |
-
) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
|
401 |
-
state = self.their_state
|
402 |
-
# We don't pause immediately when they enter DONE, because even in
|
403 |
-
# DONE state we can still process a ConnectionClosed() event. But
|
404 |
-
# if we have data in our buffer, then we definitely aren't getting
|
405 |
-
# a ConnectionClosed() immediately and we need to pause.
|
406 |
-
if state is DONE and self._receive_buffer:
|
407 |
-
return PAUSED
|
408 |
-
if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL:
|
409 |
-
return PAUSED
|
410 |
-
assert self._reader is not None
|
411 |
-
event = self._reader(self._receive_buffer)
|
412 |
-
if event is None:
|
413 |
-
if not self._receive_buffer and self._receive_buffer_closed:
|
414 |
-
# In some unusual cases (basically just HTTP/1.0 bodies), EOF
|
415 |
-
# triggers an actual protocol event; in that case, we want to
|
416 |
-
# return that event, and then the state will change and we'll
|
417 |
-
# get called again to generate the actual ConnectionClosed().
|
418 |
-
if hasattr(self._reader, "read_eof"):
|
419 |
-
event = self._reader.read_eof() # type: ignore[attr-defined]
|
420 |
-
else:
|
421 |
-
event = ConnectionClosed()
|
422 |
-
if event is None:
|
423 |
-
event = NEED_DATA
|
424 |
-
return event # type: ignore[no-any-return]
|
425 |
-
|
426 |
-
def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
|
427 |
-
"""Parse the next event out of our receive buffer, update our internal
|
428 |
-
state, and return it.
|
429 |
-
|
430 |
-
This is a mutating operation -- think of it like calling :func:`next`
|
431 |
-
on an iterator.
|
432 |
-
|
433 |
-
Returns:
|
434 |
-
: One of three things:
|
435 |
-
|
436 |
-
1) An event object -- see :ref:`events`.
|
437 |
-
|
438 |
-
2) The special constant :data:`NEED_DATA`, which indicates that
|
439 |
-
you need to read more data from your socket and pass it to
|
440 |
-
:meth:`receive_data` before this method will be able to return
|
441 |
-
any more events.
|
442 |
-
|
443 |
-
3) The special constant :data:`PAUSED`, which indicates that we
|
444 |
-
are not in a state where we can process incoming data (usually
|
445 |
-
because the peer has finished their part of the current
|
446 |
-
request/response cycle, and you have not yet called
|
447 |
-
:meth:`start_next_cycle`). See :ref:`flow-control` for details.
|
448 |
-
|
449 |
-
Raises:
|
450 |
-
RemoteProtocolError:
|
451 |
-
The peer has misbehaved. You should close the connection
|
452 |
-
(possibly after sending some kind of 4xx response).
|
453 |
-
|
454 |
-
Once this method returns :class:`ConnectionClosed` once, then all
|
455 |
-
subsequent calls will also return :class:`ConnectionClosed`.
|
456 |
-
|
457 |
-
If this method raises any exception besides :exc:`RemoteProtocolError`
|
458 |
-
then that's a bug -- if it happens please file a bug report!
|
459 |
-
|
460 |
-
If this method raises any exception then it also sets
|
461 |
-
:attr:`Connection.their_state` to :data:`ERROR` -- see
|
462 |
-
:ref:`error-handling` for discussion.
|
463 |
-
|
464 |
-
"""
|
465 |
-
|
466 |
-
if self.their_state is ERROR:
|
467 |
-
raise RemoteProtocolError("Can't receive data when peer state is ERROR")
|
468 |
-
try:
|
469 |
-
event = self._extract_next_receive_event()
|
470 |
-
if event not in [NEED_DATA, PAUSED]:
|
471 |
-
self._process_event(self.their_role, cast(Event, event))
|
472 |
-
if event is NEED_DATA:
|
473 |
-
if len(self._receive_buffer) > self._max_incomplete_event_size:
|
474 |
-
# 431 is "Request header fields too large" which is pretty
|
475 |
-
# much the only situation where we can get here
|
476 |
-
raise RemoteProtocolError(
|
477 |
-
"Receive buffer too long", error_status_hint=431
|
478 |
-
)
|
479 |
-
if self._receive_buffer_closed:
|
480 |
-
# We're still trying to complete some event, but that's
|
481 |
-
# never going to happen because no more data is coming
|
482 |
-
raise RemoteProtocolError("peer unexpectedly closed connection")
|
483 |
-
return event
|
484 |
-
except BaseException as exc:
|
485 |
-
self._process_error(self.their_role)
|
486 |
-
if isinstance(exc, LocalProtocolError):
|
487 |
-
exc._reraise_as_remote_protocol_error()
|
488 |
-
else:
|
489 |
-
raise
|
490 |
-
|
491 |
-
def send(self, event: Event) -> Optional[bytes]:
|
492 |
-
"""Convert a high-level event into bytes that can be sent to the peer,
|
493 |
-
while updating our internal state machine.
|
494 |
-
|
495 |
-
Args:
|
496 |
-
event: The :ref:`event <events>` to send.
|
497 |
-
|
498 |
-
Returns:
|
499 |
-
If ``type(event) is ConnectionClosed``, then returns
|
500 |
-
``None``. Otherwise, returns a :term:`bytes-like object`.
|
501 |
-
|
502 |
-
Raises:
|
503 |
-
LocalProtocolError:
|
504 |
-
Sending this event at this time would violate our
|
505 |
-
understanding of the HTTP/1.1 protocol.
|
506 |
-
|
507 |
-
If this method raises any exception then it also sets
|
508 |
-
:attr:`Connection.our_state` to :data:`ERROR` -- see
|
509 |
-
:ref:`error-handling` for discussion.
|
510 |
-
|
511 |
-
"""
|
512 |
-
data_list = self.send_with_data_passthrough(event)
|
513 |
-
if data_list is None:
|
514 |
-
return None
|
515 |
-
else:
|
516 |
-
return b"".join(data_list)
|
517 |
-
|
518 |
-
def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]:
|
519 |
-
"""Identical to :meth:`send`, except that in situations where
|
520 |
-
:meth:`send` returns a single :term:`bytes-like object`, this instead
|
521 |
-
returns a list of them -- and when sending a :class:`Data` event, this
|
522 |
-
list is guaranteed to contain the exact object you passed in as
|
523 |
-
:attr:`Data.data`. See :ref:`sendfile` for discussion.
|
524 |
-
|
525 |
-
"""
|
526 |
-
if self.our_state is ERROR:
|
527 |
-
raise LocalProtocolError("Can't send data when our state is ERROR")
|
528 |
-
try:
|
529 |
-
if type(event) is Response:
|
530 |
-
event = self._clean_up_response_headers_for_sending(event)
|
531 |
-
# We want to call _process_event before calling the writer,
|
532 |
-
# because if someone tries to do something invalid then this will
|
533 |
-
# give a sensible error message, while our writers all just assume
|
534 |
-
# they will only receive valid events. But, _process_event might
|
535 |
-
# change self._writer. So we have to do a little dance:
|
536 |
-
writer = self._writer
|
537 |
-
self._process_event(self.our_role, event)
|
538 |
-
if type(event) is ConnectionClosed:
|
539 |
-
return None
|
540 |
-
else:
|
541 |
-
# In any situation where writer is None, process_event should
|
542 |
-
# have raised ProtocolError
|
543 |
-
assert writer is not None
|
544 |
-
data_list: List[bytes] = []
|
545 |
-
writer(event, data_list.append)
|
546 |
-
return data_list
|
547 |
-
except:
|
548 |
-
self._process_error(self.our_role)
|
549 |
-
raise
|
550 |
-
|
551 |
-
def send_failed(self) -> None:
|
552 |
-
"""Notify the state machine that we failed to send the data it gave
|
553 |
-
us.
|
554 |
-
|
555 |
-
This causes :attr:`Connection.our_state` to immediately become
|
556 |
-
:data:`ERROR` -- see :ref:`error-handling` for discussion.
|
557 |
-
|
558 |
-
"""
|
559 |
-
self._process_error(self.our_role)
|
560 |
-
|
561 |
-
# When sending a Response, we take responsibility for a few things:
|
562 |
-
#
|
563 |
-
# - Sometimes you MUST set Connection: close. We take care of those
|
564 |
-
# times. (You can also set it yourself if you want, and if you do then
|
565 |
-
# we'll respect that and close the connection at the right time. But you
|
566 |
-
# don't have to worry about that unless you want to.)
|
567 |
-
#
|
568 |
-
# - The user has to set Content-Length if they want it. Otherwise, for
|
569 |
-
# responses that have bodies (e.g. not HEAD), then we will automatically
|
570 |
-
# select the right mechanism for streaming a body of unknown length,
|
571 |
-
# which depends on depending on the peer's HTTP version.
|
572 |
-
#
|
573 |
-
# This function's *only* responsibility is making sure headers are set up
|
574 |
-
# right -- everything downstream just looks at the headers. There are no
|
575 |
-
# side channels.
|
576 |
-
def _clean_up_response_headers_for_sending(self, response: Response) -> Response:
|
577 |
-
assert type(response) is Response
|
578 |
-
|
579 |
-
headers = response.headers
|
580 |
-
need_close = False
|
581 |
-
|
582 |
-
# HEAD requests need some special handling: they always act like they
|
583 |
-
# have Content-Length: 0, and that's how _body_framing treats
|
584 |
-
# them. But their headers are supposed to match what we would send if
|
585 |
-
# the request was a GET. (Technically there is one deviation allowed:
|
586 |
-
# we're allowed to leave out the framing headers -- see
|
587 |
-
# https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as
|
588 |
-
# easy to get them right.)
|
589 |
-
method_for_choosing_headers = cast(bytes, self._request_method)
|
590 |
-
if method_for_choosing_headers == b"HEAD":
|
591 |
-
method_for_choosing_headers = b"GET"
|
592 |
-
framing_type, _ = _body_framing(method_for_choosing_headers, response)
|
593 |
-
if framing_type in ("chunked", "http/1.0"):
|
594 |
-
# This response has a body of unknown length.
|
595 |
-
# If our peer is HTTP/1.1, we use Transfer-Encoding: chunked
|
596 |
-
# If our peer is HTTP/1.0, we use no framing headers, and close the
|
597 |
-
# connection afterwards.
|
598 |
-
#
|
599 |
-
# Make sure to clear Content-Length (in principle user could have
|
600 |
-
# set both and then we ignored Content-Length b/c
|
601 |
-
# Transfer-Encoding overwrote it -- this would be naughty of them,
|
602 |
-
# but the HTTP spec says that if our peer does this then we have
|
603 |
-
# to fix it instead of erroring out, so we'll accord the user the
|
604 |
-
# same respect).
|
605 |
-
headers = set_comma_header(headers, b"content-length", [])
|
606 |
-
if self.their_http_version is None or self.their_http_version < b"1.1":
|
607 |
-
# Either we never got a valid request and are sending back an
|
608 |
-
# error (their_http_version is None), so we assume the worst;
|
609 |
-
# or else we did get a valid HTTP/1.0 request, so we know that
|
610 |
-
# they don't understand chunked encoding.
|
611 |
-
headers = set_comma_header(headers, b"transfer-encoding", [])
|
612 |
-
# This is actually redundant ATM, since currently we
|
613 |
-
# unconditionally disable keep-alive when talking to HTTP/1.0
|
614 |
-
# peers. But let's be defensive just in case we add
|
615 |
-
# Connection: keep-alive support later:
|
616 |
-
if self._request_method != b"HEAD":
|
617 |
-
need_close = True
|
618 |
-
else:
|
619 |
-
headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"])
|
620 |
-
|
621 |
-
if not self._cstate.keep_alive or need_close:
|
622 |
-
# Make sure Connection: close is set
|
623 |
-
connection = set(get_comma_header(headers, b"connection"))
|
624 |
-
connection.discard(b"keep-alive")
|
625 |
-
connection.add(b"close")
|
626 |
-
headers = set_comma_header(headers, b"connection", sorted(connection))
|
627 |
-
|
628 |
-
return Response(
|
629 |
-
headers=headers,
|
630 |
-
status_code=response.status_code,
|
631 |
-
http_version=response.http_version,
|
632 |
-
reason=response.reason,
|
633 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/analytics.ts
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
export interface GAEvent {
|
2 |
-
hitType: "event";
|
3 |
-
eventCategory: string;
|
4 |
-
eventAction: string;
|
5 |
-
eventLabel?: string;
|
6 |
-
eventValue?: number;
|
7 |
-
}
|
8 |
-
|
9 |
-
// Send a Google Analytics event
|
10 |
-
export function sendAnalyticsEvent({
|
11 |
-
eventCategory,
|
12 |
-
eventAction,
|
13 |
-
eventLabel,
|
14 |
-
eventValue,
|
15 |
-
}: Omit<GAEvent, "hitType">): void {
|
16 |
-
// Mandatory fields
|
17 |
-
const event: GAEvent = {
|
18 |
-
hitType: "event",
|
19 |
-
eventCategory,
|
20 |
-
eventAction,
|
21 |
-
};
|
22 |
-
// Optional fields
|
23 |
-
if (eventLabel) {
|
24 |
-
event.eventLabel = eventLabel;
|
25 |
-
}
|
26 |
-
if (eventValue) {
|
27 |
-
event.eventValue = eventValue;
|
28 |
-
}
|
29 |
-
|
30 |
-
// @ts-expect-error typescript doesn't know gtag is on the window object
|
31 |
-
if (!!window?.gtag && typeof window?.gtag === "function") {
|
32 |
-
// @ts-expect-error typescript doesn't know gtag is on the window object
|
33 |
-
window?.gtag("event", eventAction, {
|
34 |
-
event_category: event.eventCategory,
|
35 |
-
event_label: event.eventLabel,
|
36 |
-
value: event.eventValue,
|
37 |
-
});
|
38 |
-
}
|
39 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/memory/pinecone.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import pinecone
|
2 |
-
from colorama import Fore, Style
|
3 |
-
|
4 |
-
from autogpt.llm_utils import create_embedding_with_ada
|
5 |
-
from autogpt.logs import logger
|
6 |
-
from autogpt.memory.base import MemoryProviderSingleton
|
7 |
-
|
8 |
-
|
9 |
-
class PineconeMemory(MemoryProviderSingleton):
|
10 |
-
def __init__(self, cfg):
|
11 |
-
pinecone_api_key = cfg.pinecone_api_key
|
12 |
-
pinecone_region = cfg.pinecone_region
|
13 |
-
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
|
14 |
-
dimension = 1536
|
15 |
-
metric = "cosine"
|
16 |
-
pod_type = "p1"
|
17 |
-
table_name = "auto-gpt"
|
18 |
-
# this assumes we don't start with memory.
|
19 |
-
# for now this works.
|
20 |
-
# we'll need a more complicated and robust system if we want to start with
|
21 |
-
# memory.
|
22 |
-
self.vec_num = 0
|
23 |
-
|
24 |
-
try:
|
25 |
-
pinecone.whoami()
|
26 |
-
except Exception as e:
|
27 |
-
logger.typewriter_log(
|
28 |
-
"FAILED TO CONNECT TO PINECONE",
|
29 |
-
Fore.RED,
|
30 |
-
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
31 |
-
)
|
32 |
-
logger.double_check(
|
33 |
-
"Please ensure you have setup and configured Pinecone properly for use."
|
34 |
-
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
|
35 |
-
"https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup"
|
36 |
-
f"{Style.RESET_ALL} to ensure you've set up everything correctly."
|
37 |
-
)
|
38 |
-
exit(1)
|
39 |
-
|
40 |
-
if table_name not in pinecone.list_indexes():
|
41 |
-
pinecone.create_index(
|
42 |
-
table_name, dimension=dimension, metric=metric, pod_type=pod_type
|
43 |
-
)
|
44 |
-
self.index = pinecone.Index(table_name)
|
45 |
-
|
46 |
-
def add(self, data):
|
47 |
-
vector = create_embedding_with_ada(data)
|
48 |
-
# no metadata here. We may wish to change that long term.
|
49 |
-
self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
50 |
-
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
51 |
-
self.vec_num += 1
|
52 |
-
return _text
|
53 |
-
|
54 |
-
def get(self, data):
|
55 |
-
return self.get_relevant(data, 1)
|
56 |
-
|
57 |
-
def clear(self):
|
58 |
-
self.index.delete(deleteAll=True)
|
59 |
-
return "Obliviated"
|
60 |
-
|
61 |
-
def get_relevant(self, data, num_relevant=5):
|
62 |
-
"""
|
63 |
-
Returns all the data in the memory that is relevant to the given data.
|
64 |
-
:param data: The data to compare to.
|
65 |
-
:param num_relevant: The number of relevant data to return. Defaults to 5
|
66 |
-
"""
|
67 |
-
query_embedding = create_embedding_with_ada(data)
|
68 |
-
results = self.index.query(
|
69 |
-
query_embedding, top_k=num_relevant, include_metadata=True
|
70 |
-
)
|
71 |
-
sorted_results = sorted(results.matches, key=lambda x: x.score)
|
72 |
-
return [str(item["metadata"]["raw_text"]) for item in sorted_results]
|
73 |
-
|
74 |
-
def get_stats(self):
|
75 |
-
return self.index.describe_index_stats()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-comic-generation/src/components/ui/command.tsx
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
"use client"
|
2 |
-
|
3 |
-
import * as React from "react"
|
4 |
-
import { DialogProps } from "@radix-ui/react-dialog"
|
5 |
-
import { Command as CommandPrimitive } from "cmdk"
|
6 |
-
import { Search } from "lucide-react"
|
7 |
-
|
8 |
-
import { cn } from "@/lib/utils"
|
9 |
-
import { Dialog, DialogContent } from "@/components/ui/dialog"
|
10 |
-
|
11 |
-
const Command = React.forwardRef<
|
12 |
-
React.ElementRef<typeof CommandPrimitive>,
|
13 |
-
React.ComponentPropsWithoutRef<typeof CommandPrimitive>
|
14 |
-
>(({ className, ...props }, ref) => (
|
15 |
-
<CommandPrimitive
|
16 |
-
ref={ref}
|
17 |
-
className={cn(
|
18 |
-
"flex h-full w-full flex-col overflow-hidden rounded-md bg-white text-stone-950 dark:bg-stone-950 dark:text-stone-50",
|
19 |
-
className
|
20 |
-
)}
|
21 |
-
{...props}
|
22 |
-
/>
|
23 |
-
))
|
24 |
-
Command.displayName = CommandPrimitive.displayName
|
25 |
-
|
26 |
-
interface CommandDialogProps extends DialogProps {}
|
27 |
-
|
28 |
-
const CommandDialog = ({ children, ...props }: CommandDialogProps) => {
|
29 |
-
return (
|
30 |
-
<Dialog {...props}>
|
31 |
-
<DialogContent className="overflow-hidden p-0 shadow-lg">
|
32 |
-
<Command className="[&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group-heading]]:text-stone-500 [&_[cmdk-group]:not([hidden])_~[cmdk-group]]:pt-0 [&_[cmdk-group]]:px-2 [&_[cmdk-input-wrapper]_svg]:h-5 [&_[cmdk-input-wrapper]_svg]:w-5 [&_[cmdk-input]]:h-12 [&_[cmdk-item]]:px-2 [&_[cmdk-item]]:py-3 [&_[cmdk-item]_svg]:h-5 [&_[cmdk-item]_svg]:w-5 dark:[&_[cmdk-group-heading]]:text-stone-400">
|
33 |
-
{children}
|
34 |
-
</Command>
|
35 |
-
</DialogContent>
|
36 |
-
</Dialog>
|
37 |
-
)
|
38 |
-
}
|
39 |
-
|
40 |
-
const CommandInput = React.forwardRef<
|
41 |
-
React.ElementRef<typeof CommandPrimitive.Input>,
|
42 |
-
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Input>
|
43 |
-
>(({ className, ...props }, ref) => (
|
44 |
-
<div className="flex items-center border-b px-3" cmdk-input-wrapper="">
|
45 |
-
<Search className="mr-2 h-4 w-4 shrink-0 opacity-50" />
|
46 |
-
<CommandPrimitive.Input
|
47 |
-
ref={ref}
|
48 |
-
className={cn(
|
49 |
-
"flex h-11 w-full rounded-md bg-transparent py-3 text-sm outline-none placeholder:text-stone-500 disabled:cursor-not-allowed disabled:opacity-50 dark:placeholder:text-stone-400",
|
50 |
-
className
|
51 |
-
)}
|
52 |
-
{...props}
|
53 |
-
/>
|
54 |
-
</div>
|
55 |
-
))
|
56 |
-
|
57 |
-
CommandInput.displayName = CommandPrimitive.Input.displayName
|
58 |
-
|
59 |
-
const CommandList = React.forwardRef<
|
60 |
-
React.ElementRef<typeof CommandPrimitive.List>,
|
61 |
-
React.ComponentPropsWithoutRef<typeof CommandPrimitive.List>
|
62 |
-
>(({ className, ...props }, ref) => (
|
63 |
-
<CommandPrimitive.List
|
64 |
-
ref={ref}
|
65 |
-
className={cn("max-h-[300px] overflow-y-auto overflow-x-hidden", className)}
|
66 |
-
{...props}
|
67 |
-
/>
|
68 |
-
))
|
69 |
-
|
70 |
-
CommandList.displayName = CommandPrimitive.List.displayName
|
71 |
-
|
72 |
-
const CommandEmpty = React.forwardRef<
|
73 |
-
React.ElementRef<typeof CommandPrimitive.Empty>,
|
74 |
-
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Empty>
|
75 |
-
>((props, ref) => (
|
76 |
-
<CommandPrimitive.Empty
|
77 |
-
ref={ref}
|
78 |
-
className="py-6 text-center text-sm"
|
79 |
-
{...props}
|
80 |
-
/>
|
81 |
-
))
|
82 |
-
|
83 |
-
CommandEmpty.displayName = CommandPrimitive.Empty.displayName
|
84 |
-
|
85 |
-
const CommandGroup = React.forwardRef<
|
86 |
-
React.ElementRef<typeof CommandPrimitive.Group>,
|
87 |
-
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Group>
|
88 |
-
>(({ className, ...props }, ref) => (
|
89 |
-
<CommandPrimitive.Group
|
90 |
-
ref={ref}
|
91 |
-
className={cn(
|
92 |
-
"overflow-hidden p-1 text-stone-950 [&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:py-1.5 [&_[cmdk-group-heading]]:text-xs [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group-heading]]:text-stone-500 dark:text-stone-50 dark:[&_[cmdk-group-heading]]:text-stone-400",
|
93 |
-
className
|
94 |
-
)}
|
95 |
-
{...props}
|
96 |
-
/>
|
97 |
-
))
|
98 |
-
|
99 |
-
CommandGroup.displayName = CommandPrimitive.Group.displayName
|
100 |
-
|
101 |
-
const CommandSeparator = React.forwardRef<
|
102 |
-
React.ElementRef<typeof CommandPrimitive.Separator>,
|
103 |
-
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Separator>
|
104 |
-
>(({ className, ...props }, ref) => (
|
105 |
-
<CommandPrimitive.Separator
|
106 |
-
ref={ref}
|
107 |
-
className={cn("-mx-1 h-px bg-stone-200 dark:bg-stone-800", className)}
|
108 |
-
{...props}
|
109 |
-
/>
|
110 |
-
))
|
111 |
-
CommandSeparator.displayName = CommandPrimitive.Separator.displayName
|
112 |
-
|
113 |
-
const CommandItem = React.forwardRef<
|
114 |
-
React.ElementRef<typeof CommandPrimitive.Item>,
|
115 |
-
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Item>
|
116 |
-
>(({ className, ...props }, ref) => (
|
117 |
-
<CommandPrimitive.Item
|
118 |
-
ref={ref}
|
119 |
-
className={cn(
|
120 |
-
"relative flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none aria-selected:bg-stone-100 aria-selected:text-stone-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:aria-selected:bg-stone-800 dark:aria-selected:text-stone-50",
|
121 |
-
className
|
122 |
-
)}
|
123 |
-
{...props}
|
124 |
-
/>
|
125 |
-
))
|
126 |
-
|
127 |
-
CommandItem.displayName = CommandPrimitive.Item.displayName
|
128 |
-
|
129 |
-
const CommandShortcut = ({
|
130 |
-
className,
|
131 |
-
...props
|
132 |
-
}: React.HTMLAttributes<HTMLSpanElement>) => {
|
133 |
-
return (
|
134 |
-
<span
|
135 |
-
className={cn(
|
136 |
-
"ml-auto text-xs tracking-widest text-stone-500 dark:text-stone-400",
|
137 |
-
className
|
138 |
-
)}
|
139 |
-
{...props}
|
140 |
-
/>
|
141 |
-
)
|
142 |
-
}
|
143 |
-
CommandShortcut.displayName = "CommandShortcut"
|
144 |
-
|
145 |
-
export {
|
146 |
-
Command,
|
147 |
-
CommandDialog,
|
148 |
-
CommandInput,
|
149 |
-
CommandList,
|
150 |
-
CommandEmpty,
|
151 |
-
CommandGroup,
|
152 |
-
CommandItem,
|
153 |
-
CommandShortcut,
|
154 |
-
CommandSeparator,
|
155 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/bias_act.h
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
// Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
//
|
5 |
-
// NVIDIA CORPORATION and its licensors retain all intellectual property
|
6 |
-
// and proprietary rights in and to this software, related documentation
|
7 |
-
// and any modifications thereto. Any use, reproduction, disclosure or
|
8 |
-
// distribution of this software and related documentation without an express
|
9 |
-
// license agreement from NVIDIA CORPORATION is strictly prohibited.
|
10 |
-
|
11 |
-
//------------------------------------------------------------------------
|
12 |
-
// CUDA kernel parameters.
|
13 |
-
|
14 |
-
struct bias_act_kernel_params
|
15 |
-
{
|
16 |
-
const void* x; // [sizeX]
|
17 |
-
const void* b; // [sizeB] or NULL
|
18 |
-
const void* xref; // [sizeX] or NULL
|
19 |
-
const void* yref; // [sizeX] or NULL
|
20 |
-
const void* dy; // [sizeX] or NULL
|
21 |
-
void* y; // [sizeX]
|
22 |
-
|
23 |
-
int grad;
|
24 |
-
int act;
|
25 |
-
float alpha;
|
26 |
-
float gain;
|
27 |
-
float clamp;
|
28 |
-
|
29 |
-
int sizeX;
|
30 |
-
int sizeB;
|
31 |
-
int stepB;
|
32 |
-
int loopX;
|
33 |
-
};
|
34 |
-
|
35 |
-
//------------------------------------------------------------------------
|
36 |
-
// CUDA kernel selection.
|
37 |
-
|
38 |
-
template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p);
|
39 |
-
|
40 |
-
//------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EPFL-VILAB/MultiMAE/multimae/output_adapter_utils.py
DELETED
@@ -1,290 +0,0 @@
|
|
1 |
-
# Copyright (c) EPFL VILAB.
|
2 |
-
# All rights reserved.
|
3 |
-
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
# --------------------------------------------------------
|
7 |
-
# Based on timm, DPT and ConvNeXt code bases
|
8 |
-
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
9 |
-
# https://github.com/isl-org/DPT
|
10 |
-
# https://github.com/facebookresearch/ConvNeXt
|
11 |
-
# --------------------------------------------------------
|
12 |
-
|
13 |
-
import torch
|
14 |
-
import torch.nn as nn
|
15 |
-
|
16 |
-
from .multimae_utils import DropPath
|
17 |
-
|
18 |
-
|
19 |
-
class ConvNeXtBlock(nn.Module):
|
20 |
-
r"""ConvNeXt Block. There are two equivalent implementations:
|
21 |
-
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
|
22 |
-
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
|
23 |
-
We use (2) as we find it slightly faster in PyTorch
|
24 |
-
|
25 |
-
Args:
|
26 |
-
dim (int): Number of input channels.
|
27 |
-
drop_path: Stochastic depth rate. Default: 0.0
|
28 |
-
layer_scale_init_value (float): Init value for Layer Scale. Default: 0 (disabled for isotropic ConvNeXt).
|
29 |
-
|
30 |
-
Code from: https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
|
31 |
-
"""
|
32 |
-
|
33 |
-
def __init__(self, dim, drop_path=0., layer_scale_init_value=0.):
|
34 |
-
super().__init__()
|
35 |
-
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
|
36 |
-
self.norm = nn.LayerNorm(dim, eps=1e-6)
|
37 |
-
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
|
38 |
-
self.act = nn.GELU()
|
39 |
-
self.pwconv2 = nn.Linear(4 * dim, dim)
|
40 |
-
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
|
41 |
-
requires_grad=True) if layer_scale_init_value > 0 else None
|
42 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
43 |
-
|
44 |
-
def forward(self, x):
|
45 |
-
input = x
|
46 |
-
x = self.dwconv(x)
|
47 |
-
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
|
48 |
-
x = self.norm(x)
|
49 |
-
x = self.pwconv1(x)
|
50 |
-
x = self.act(x)
|
51 |
-
x = self.pwconv2(x)
|
52 |
-
if self.gamma is not None:
|
53 |
-
x = self.gamma * x
|
54 |
-
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
|
55 |
-
|
56 |
-
x = input + self.drop_path(x)
|
57 |
-
return x
|
58 |
-
|
59 |
-
|
60 |
-
class ResidualConvUnit_custom(nn.Module):
|
61 |
-
"""Residual convolution module."""
|
62 |
-
|
63 |
-
def __init__(self, features, activation, bn):
|
64 |
-
"""Init.
|
65 |
-
Args:
|
66 |
-
features (int): number of features
|
67 |
-
"""
|
68 |
-
super().__init__()
|
69 |
-
|
70 |
-
self.bn = bn
|
71 |
-
|
72 |
-
self.groups = 1
|
73 |
-
|
74 |
-
self.conv1 = nn.Conv2d(
|
75 |
-
features,
|
76 |
-
features,
|
77 |
-
kernel_size=3,
|
78 |
-
stride=1,
|
79 |
-
padding=1,
|
80 |
-
bias=not self.bn,
|
81 |
-
groups=self.groups,
|
82 |
-
)
|
83 |
-
|
84 |
-
self.conv2 = nn.Conv2d(
|
85 |
-
features,
|
86 |
-
features,
|
87 |
-
kernel_size=3,
|
88 |
-
stride=1,
|
89 |
-
padding=1,
|
90 |
-
bias=not self.bn,
|
91 |
-
groups=self.groups,
|
92 |
-
)
|
93 |
-
|
94 |
-
if self.bn == True:
|
95 |
-
self.bn1 = nn.BatchNorm2d(features)
|
96 |
-
self.bn2 = nn.BatchNorm2d(features)
|
97 |
-
|
98 |
-
self.activation = activation
|
99 |
-
|
100 |
-
self.skip_add = nn.quantized.FloatFunctional()
|
101 |
-
|
102 |
-
def forward(self, x):
|
103 |
-
"""Forward pass.
|
104 |
-
Args:
|
105 |
-
x (tensor): input
|
106 |
-
Returns:
|
107 |
-
tensor: output
|
108 |
-
"""
|
109 |
-
|
110 |
-
out = self.activation(x)
|
111 |
-
out = self.conv1(out)
|
112 |
-
if self.bn == True:
|
113 |
-
out = self.bn1(out)
|
114 |
-
|
115 |
-
out = self.activation(out)
|
116 |
-
out = self.conv2(out)
|
117 |
-
if self.bn == True:
|
118 |
-
out = self.bn2(out)
|
119 |
-
|
120 |
-
if self.groups > 1:
|
121 |
-
out = self.conv_merge(out)
|
122 |
-
|
123 |
-
return self.skip_add.add(out, x)
|
124 |
-
|
125 |
-
def make_scratch(in_shape, out_shape, groups=1, expand=False):
|
126 |
-
scratch = nn.Module()
|
127 |
-
|
128 |
-
out_shape1 = out_shape
|
129 |
-
out_shape2 = out_shape
|
130 |
-
out_shape3 = out_shape
|
131 |
-
out_shape4 = out_shape
|
132 |
-
if expand == True:
|
133 |
-
out_shape1 = out_shape
|
134 |
-
out_shape2 = out_shape * 2
|
135 |
-
out_shape3 = out_shape * 4
|
136 |
-
out_shape4 = out_shape * 8
|
137 |
-
|
138 |
-
scratch.layer1_rn = nn.Conv2d(
|
139 |
-
in_shape[0],
|
140 |
-
out_shape1,
|
141 |
-
kernel_size=3,
|
142 |
-
stride=1,
|
143 |
-
padding=1,
|
144 |
-
bias=False,
|
145 |
-
groups=groups,
|
146 |
-
)
|
147 |
-
scratch.layer2_rn = nn.Conv2d(
|
148 |
-
in_shape[1],
|
149 |
-
out_shape2,
|
150 |
-
kernel_size=3,
|
151 |
-
stride=1,
|
152 |
-
padding=1,
|
153 |
-
bias=False,
|
154 |
-
groups=groups,
|
155 |
-
)
|
156 |
-
scratch.layer3_rn = nn.Conv2d(
|
157 |
-
in_shape[2],
|
158 |
-
out_shape3,
|
159 |
-
kernel_size=3,
|
160 |
-
stride=1,
|
161 |
-
padding=1,
|
162 |
-
bias=False,
|
163 |
-
groups=groups,
|
164 |
-
)
|
165 |
-
scratch.layer4_rn = nn.Conv2d(
|
166 |
-
in_shape[3],
|
167 |
-
out_shape4,
|
168 |
-
kernel_size=3,
|
169 |
-
stride=1,
|
170 |
-
padding=1,
|
171 |
-
bias=False,
|
172 |
-
groups=groups,
|
173 |
-
)
|
174 |
-
|
175 |
-
scratch.layer_rn = nn.ModuleList([
|
176 |
-
scratch.layer1_rn,
|
177 |
-
scratch.layer2_rn,
|
178 |
-
scratch.layer3_rn,
|
179 |
-
scratch.layer4_rn,
|
180 |
-
])
|
181 |
-
|
182 |
-
return scratch
|
183 |
-
|
184 |
-
class FeatureFusionBlock_custom(nn.Module):
|
185 |
-
"""Feature fusion block."""
|
186 |
-
|
187 |
-
def __init__(
|
188 |
-
self,
|
189 |
-
features,
|
190 |
-
activation,
|
191 |
-
deconv=False,
|
192 |
-
bn=False,
|
193 |
-
expand=False,
|
194 |
-
align_corners=True,
|
195 |
-
):
|
196 |
-
"""Init.
|
197 |
-
Args:
|
198 |
-
features (int): number of features
|
199 |
-
"""
|
200 |
-
super(FeatureFusionBlock_custom, self).__init__()
|
201 |
-
|
202 |
-
self.deconv = deconv
|
203 |
-
self.align_corners = align_corners
|
204 |
-
|
205 |
-
self.groups = 1
|
206 |
-
|
207 |
-
self.expand = expand
|
208 |
-
out_features = features
|
209 |
-
if self.expand == True:
|
210 |
-
out_features = features // 2
|
211 |
-
|
212 |
-
self.out_conv = nn.Conv2d(
|
213 |
-
features,
|
214 |
-
out_features,
|
215 |
-
kernel_size=1,
|
216 |
-
stride=1,
|
217 |
-
padding=0,
|
218 |
-
bias=True,
|
219 |
-
groups=1,
|
220 |
-
)
|
221 |
-
|
222 |
-
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
|
223 |
-
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
|
224 |
-
|
225 |
-
self.skip_add = nn.quantized.FloatFunctional()
|
226 |
-
|
227 |
-
def forward(self, *xs):
|
228 |
-
"""Forward pass.
|
229 |
-
Returns:
|
230 |
-
tensor: output
|
231 |
-
"""
|
232 |
-
output = xs[0]
|
233 |
-
|
234 |
-
if len(xs) == 2:
|
235 |
-
res = self.resConfUnit1(xs[1])
|
236 |
-
output = self.skip_add.add(output, res)
|
237 |
-
# output += res
|
238 |
-
|
239 |
-
output = self.resConfUnit2(output)
|
240 |
-
|
241 |
-
output = nn.functional.interpolate(
|
242 |
-
output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
|
243 |
-
)
|
244 |
-
|
245 |
-
output = self.out_conv(output)
|
246 |
-
|
247 |
-
return output
|
248 |
-
|
249 |
-
def make_fusion_block(features, use_bn):
|
250 |
-
return FeatureFusionBlock_custom(
|
251 |
-
features,
|
252 |
-
nn.ReLU(False),
|
253 |
-
deconv=False,
|
254 |
-
bn=use_bn,
|
255 |
-
expand=False,
|
256 |
-
align_corners=True,
|
257 |
-
)
|
258 |
-
|
259 |
-
class Interpolate(nn.Module):
|
260 |
-
"""Interpolation module."""
|
261 |
-
|
262 |
-
def __init__(self, scale_factor, mode, align_corners=False):
|
263 |
-
"""Init.
|
264 |
-
Args:
|
265 |
-
scale_factor (float): scaling
|
266 |
-
mode (str): interpolation mode
|
267 |
-
"""
|
268 |
-
super(Interpolate, self).__init__()
|
269 |
-
|
270 |
-
self.interp = nn.functional.interpolate
|
271 |
-
self.scale_factor = scale_factor
|
272 |
-
self.mode = mode
|
273 |
-
self.align_corners = align_corners
|
274 |
-
|
275 |
-
def forward(self, x):
|
276 |
-
"""Forward pass.
|
277 |
-
Args:
|
278 |
-
x (tensor): input
|
279 |
-
Returns:
|
280 |
-
tensor: interpolated data
|
281 |
-
"""
|
282 |
-
|
283 |
-
x = self.interp(
|
284 |
-
x,
|
285 |
-
scale_factor=self.scale_factor,
|
286 |
-
mode=self.mode,
|
287 |
-
align_corners=self.align_corners,
|
288 |
-
)
|
289 |
-
|
290 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Edward-Ji/essentials-of-microeconomics/essentials_of_microeconomics/elasticity.py
DELETED
@@ -1,338 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
|
3 |
-
import matplotlib as mpl
|
4 |
-
import matplotlib.pyplot as plt
|
5 |
-
import numpy as np
|
6 |
-
import pandas as pd
|
7 |
-
from shiny import module, reactive, render, req, ui
|
8 |
-
from sympy import (
|
9 |
-
Eq,
|
10 |
-
S,
|
11 |
-
Symbol,
|
12 |
-
diff,
|
13 |
-
evaluate,
|
14 |
-
latex,
|
15 |
-
plot,
|
16 |
-
simplify,
|
17 |
-
solve,
|
18 |
-
symbols,
|
19 |
-
zoo,
|
20 |
-
)
|
21 |
-
|
22 |
-
from util import latex_approx, parse_expr_safer
|
23 |
-
|
24 |
-
symbol_epsilon: Symbol = symbols("varepsilon")
|
25 |
-
|
26 |
-
|
27 |
-
@dataclass(frozen=True)
|
28 |
-
class ApplicationInfo:
|
29 |
-
name: str
|
30 |
-
definition: str
|
31 |
-
symbol_epsilon: Symbol
|
32 |
-
symbol_x: Symbol
|
33 |
-
symbol_y: Symbol
|
34 |
-
equation: str
|
35 |
-
value: str
|
36 |
-
interpret: pd.DataFrame
|
37 |
-
|
38 |
-
|
39 |
-
demand_info = ApplicationInfo(
|
40 |
-
"Elasticity of demand",
|
41 |
-
r"""
|
42 |
-
The elasticity of demand (\(\varepsilon_d\)) measures how sensitive the
|
43 |
-
quantity \(Q_d\) demanded of a good change in its price \(P\).
|
44 |
-
""",
|
45 |
-
symbols("varepsilon_d"),
|
46 |
-
symbols("P", positive=True),
|
47 |
-
symbols("Q_d", positive=True),
|
48 |
-
"Q_d = 100 - P",
|
49 |
-
"Q_d = 90",
|
50 |
-
pd.DataFrame(
|
51 |
-
[[Eq(symbol_epsilon, zoo, evaluate=False), "perfecly elastic", "extremely"],
|
52 |
-
[symbol_epsilon < S(-1), "elastic", "very"],
|
53 |
-
[Eq(symbol_epsilon, -1), "unit elastic", "fairly"],
|
54 |
-
[(S(-1) < symbol_epsilon) & (symbol_epsilon < S(0)), "inelastic", "not very"],
|
55 |
-
[Eq(symbol_epsilon, 0), "perfecly inelastic", "not at all"]],
|
56 |
-
columns=["Elasticity of demand", "How elastic", "Responsiveness"])
|
57 |
-
)
|
58 |
-
|
59 |
-
supply_info = ApplicationInfo(
|
60 |
-
"Elasticity of supply",
|
61 |
-
r"""
|
62 |
-
The elasticity of supply (\(\varepsilon_s\)) measures how sensitive the
|
63 |
-
quantity \(Q_s\) supplied of a good change in its price \(P\).
|
64 |
-
""",
|
65 |
-
symbols("varepsilon_s"),
|
66 |
-
symbols("P", positive=True),
|
67 |
-
symbols("Q_s", positive=True),
|
68 |
-
"Q_s = P - 5",
|
69 |
-
"P = 10",
|
70 |
-
pd.DataFrame(
|
71 |
-
[[Eq(symbol_epsilon, 0), "perfecly inelastic", "not at all"],
|
72 |
-
[(S(0) < symbol_epsilon) & (symbol_epsilon < S(1)), "inelastic", "not very"],
|
73 |
-
[Eq(symbol_epsilon, 1), "unit elastic", "fairly"],
|
74 |
-
[symbol_epsilon > S(1), "elastic", "very"],
|
75 |
-
[Eq(symbol_epsilon, zoo, evaluate=False), "perfecly elastic", "extremely"]],
|
76 |
-
columns=["Elasticity of supply", "How elastic", "Responsiveness"])
|
77 |
-
)
|
78 |
-
|
79 |
-
cross_price_info = ApplicationInfo(
|
80 |
-
"Cross-price elasticity",
|
81 |
-
r"""
|
82 |
-
The cross-price elasticity examines the relationship between the quantity
|
83 |
-
demanded of one good and the price of another related good. Specifically, it
|
84 |
-
measures how sensitive the quantity \(Q_A\) demanded of a good A changes in
|
85 |
-
the price \(P_B\) of another good B.
|
86 |
-
""",
|
87 |
-
symbols("varepsilon_AB"),
|
88 |
-
symbols("P_B", positive=True),
|
89 |
-
symbols("Q_A", positive=True),
|
90 |
-
"Q_A = 10",
|
91 |
-
"P_B = 1",
|
92 |
-
pd.DataFrame(
|
93 |
-
[[symbol_epsilon < S(0), "complements", "bacon and eggs"],
|
94 |
-
[Eq(symbol_epsilon, 0), "independent", "ice cream and chainsaws"],
|
95 |
-
[symbol_epsilon > S(0), "substitutes", "tea and coffee"]],
|
96 |
-
columns=["Cross-price elasticity", "Relationship", "Example"])
|
97 |
-
)
|
98 |
-
|
99 |
-
income_info = ApplicationInfo(
|
100 |
-
"Income elasticity",
|
101 |
-
r"""
|
102 |
-
Income elasticity \(\eta\) measures how sensitive the quantity demanded of a
|
103 |
-
good \(Q\) changes in income \(Y\).
|
104 |
-
""",
|
105 |
-
symbols("eta"),
|
106 |
-
symbols("Y", positive=True),
|
107 |
-
symbols("Q", positive=True),
|
108 |
-
"Q = Y",
|
109 |
-
"Y = 10",
|
110 |
-
pd.DataFrame(
|
111 |
-
[[symbol_epsilon < S(0), "inferior", "instant noodles and frozen food"],
|
112 |
-
[Eq(symbol_epsilon, 0), "neutral", ""],
|
113 |
-
[(S(0) < symbol_epsilon) & (symbol_epsilon <= S(1)),
|
114 |
-
"normal",
|
115 |
-
"food and clothes in general"],
|
116 |
-
[symbol_epsilon > S(1), "luxury", "jewelry and high-end watches"]],
|
117 |
-
columns=["Income elasticity", "Type of good", "Example"])
|
118 |
-
)
|
119 |
-
|
120 |
-
|
121 |
-
@module.ui
|
122 |
-
def application_ui(I: ApplicationInfo):
|
123 |
-
return ui.nav(
|
124 |
-
I.name,
|
125 |
-
ui.p(I.definition),
|
126 |
-
ui.row(
|
127 |
-
ui.column(
|
128 |
-
6,
|
129 |
-
ui.input_text("equation",
|
130 |
-
fr"""Enter an equation for \({latex(I.symbol_x)}\)
|
131 |
-
and \({latex(I.symbol_y)}\):""",
|
132 |
-
I.equation),
|
133 |
-
),
|
134 |
-
ui.column(6, ui.output_text("equation"))
|
135 |
-
),
|
136 |
-
ui.output_text("elasticity"),
|
137 |
-
ui.row(
|
138 |
-
ui.column(
|
139 |
-
6,
|
140 |
-
ui.input_text("point",
|
141 |
-
fr"""Enter a value for \({latex(I.symbol_x)}\)
|
142 |
-
or \({latex(I.symbol_y)}\):""",
|
143 |
-
I.value),
|
144 |
-
),
|
145 |
-
ui.column(6, ui.output_text("point"))
|
146 |
-
),
|
147 |
-
ui.output_text("point_elasticity"),
|
148 |
-
ui.output_table("interpret"),
|
149 |
-
ui.output_plot("curve")
|
150 |
-
)
|
151 |
-
|
152 |
-
|
153 |
-
@module.ui
|
154 |
-
def elasticity_ui():
|
155 |
-
return ui.nav(
|
156 |
-
"Elasticity",
|
157 |
-
ui.h1("Elasticity"),
|
158 |
-
ui.p("""We are interested in measuring how a change in one variable
|
159 |
-
affects another. One issue with measuring quantitative changes is
|
160 |
-
that different markets use different units of measurement. A way we
|
161 |
-
deal with this is to look at proportional changes."""),
|
162 |
-
ui.h2("Measuring elasticity"),
|
163 |
-
ui.p(r"""Elasticity \(\varepsilon\) measures how responsive one variable
|
164 |
-
\(y\) changes in another variable \(x\); We can calculate it by
|
165 |
-
deciding the percentage change in \(y\) by the percentage change in
|
166 |
-
\(x\):"""),
|
167 |
-
ui.p(r"$$\varepsilon=\frac{\%\Delta y}{\%\Delta x}$$"),
|
168 |
-
ui.p(r"""where \(\%\Delta x=\frac{\Delta x}{x}\). The larger the
|
169 |
-
absolute value of \(\varepsilon\), the more responsive \(y\) is to
|
170 |
-
changes in \(x\); Conversely, the smaller the absolute value of
|
171 |
-
\(\varepsilon\), the less responsive \(y\) is to changes in
|
172 |
-
\(x\)."""),
|
173 |
-
ui.h3("Point method"),
|
174 |
-
ui.p(r"""If we are interested in elasticity at a particular point and
|
175 |
-
\(y(x)\) is differentiable at that point, we can use the point
|
176 |
-
method."""),
|
177 |
-
ui.p(r"$$\varepsilon = \frac{\Delta y / {y}}{\Delta x / {x}}"
|
178 |
-
r"= \frac{\Delta y}{\Delta x} \cdot \frac x y"
|
179 |
-
r"= \frac{dy}{dx} \cdot \frac x y$$"),
|
180 |
-
ui.h3("Midpoint (or arc) method"),
|
181 |
-
ui.p("""If we are interested in elasticity when moving from one point to
|
182 |
-
another, we use the midpoint method."""),
|
183 |
-
ui.p(r"$$\varepsilon = \frac{\Delta y / {y^m}}{\Delta x / {x^m}}"
|
184 |
-
r"= \frac{\Delta y}{\Delta x} \cdot \frac{x^m}{y^m}$$"),
|
185 |
-
ui.p(r"where \(x^m=\frac{x_1+x_2}2\) and \(y^m=\frac{y_1+y_2}2\)."),
|
186 |
-
ui.h2("Applications"),
|
187 |
-
ui.navset_pill(
|
188 |
-
application_ui("demand", demand_info),
|
189 |
-
application_ui("supply", supply_info),
|
190 |
-
application_ui("cross_price", cross_price_info),
|
191 |
-
application_ui("income", income_info)
|
192 |
-
),
|
193 |
-
value="elasticity"
|
194 |
-
)
|
195 |
-
|
196 |
-
|
197 |
-
@module.server
|
198 |
-
def application_server(input, output, session, I: ApplicationInfo, settings):
|
199 |
-
|
200 |
-
@reactive.Calc
|
201 |
-
def y():
|
202 |
-
relation = parse_expr_safer(
|
203 |
-
input.equation(),
|
204 |
-
{I.symbol_x.name: I.symbol_x, I.symbol_y.name: I.symbol_y},
|
205 |
-
transformations="all")
|
206 |
-
solutions = solve(relation, I.symbol_y, dict=True)
|
207 |
-
req(len(solutions) == 1)
|
208 |
-
return solutions[0][I.symbol_y]
|
209 |
-
|
210 |
-
@reactive.Calc
|
211 |
-
def epsilon():
|
212 |
-
return diff(y(), I.symbol_x) * S(I.symbol_x) / I.symbol_y
|
213 |
-
|
214 |
-
@reactive.Calc
|
215 |
-
def epsilon_x():
|
216 |
-
return simplify(epsilon().subs({I.symbol_y: y()}))
|
217 |
-
|
218 |
-
@reactive.Calc
|
219 |
-
def point_xy():
|
220 |
-
try:
|
221 |
-
eq = parse_expr_safer(
|
222 |
-
input.point(),
|
223 |
-
{I.symbol_x.name: I.symbol_x, I.symbol_y.name: I.symbol_y},
|
224 |
-
transformations="all")
|
225 |
-
solutions = solve(
|
226 |
-
[eq, Eq(I.symbol_y, y())], (I.symbol_x, I.symbol_y), dict=True)
|
227 |
-
req(len(solutions) == 1)
|
228 |
-
return solutions[0]
|
229 |
-
except SyntaxError:
|
230 |
-
req(False, cancel_output=True)
|
231 |
-
assert False
|
232 |
-
|
233 |
-
@reactive.Calc
|
234 |
-
def point_x():
|
235 |
-
req(point_xy())
|
236 |
-
return point_xy()[I.symbol_x]
|
237 |
-
|
238 |
-
@reactive.Calc
|
239 |
-
def point_y():
|
240 |
-
req(point_xy())
|
241 |
-
return point_xy()[I.symbol_y]
|
242 |
-
|
243 |
-
@reactive.Calc
|
244 |
-
def point_epsilon():
|
245 |
-
return simplify(
|
246 |
-
epsilon().subs({I.symbol_x: point_x(), I.symbol_y: point_y()}))
|
247 |
-
|
248 |
-
@render.text
|
249 |
-
def equation():
|
250 |
-
return (
|
251 |
-
"$$"
|
252 |
-
+ latex(I.symbol_y) + "="
|
253 |
-
+ latex_approx(y(), settings.perc(), settings.approx())
|
254 |
-
+ "$$")
|
255 |
-
|
256 |
-
@render.text
|
257 |
-
def elasticity():
|
258 |
-
return (
|
259 |
-
"Using the point method to elasticity,"
|
260 |
-
+ "$$"
|
261 |
-
+ latex(I.symbol_epsilon)
|
262 |
-
+ r"= \frac{d" + latex(I.symbol_y) + "}{d" + latex(I.symbol_x) + "}"
|
263 |
-
+ r"\cdot"
|
264 |
-
+ r"\frac{" + latex(I.symbol_x) + "}{" + latex(I.symbol_y) + "}"
|
265 |
-
+ "=" + latex(epsilon())
|
266 |
-
+ ("=" + latex(epsilon_x()) if epsilon_x() != epsilon() else "")
|
267 |
-
+ "$$")
|
268 |
-
|
269 |
-
@render.text
|
270 |
-
def point():
|
271 |
-
return (
|
272 |
-
r"$$\begin{align*}"
|
273 |
-
+ latex(I.symbol_x) + "&="
|
274 |
-
+ latex_approx(point_x(), settings.perc(), settings.approx())
|
275 |
-
+ r"\\"
|
276 |
-
+ latex(I.symbol_y) + "&="
|
277 |
-
+ latex_approx(point_y(), settings.perc(), settings.approx())
|
278 |
-
+ r"\end{align*}$$")
|
279 |
-
|
280 |
-
@render.text
|
281 |
-
def point_elasticity():
|
282 |
-
return (
|
283 |
-
r"Substituting the point into \(" + latex(I.symbol_epsilon) + r"\),"
|
284 |
-
+ "$$"
|
285 |
-
+ latex(I.symbol_epsilon) + "="
|
286 |
-
+ latex_approx(point_epsilon(), settings.perc(), settings.approx())
|
287 |
-
+ "$$")
|
288 |
-
|
289 |
-
@render.table
|
290 |
-
def interpret():
|
291 |
-
def highlight_true(row):
|
292 |
-
if point_x() is not None:
|
293 |
-
try:
|
294 |
-
if row.iloc[0].subs({symbol_epsilon: point_epsilon()}):
|
295 |
-
return ["background-color: lightgreen"] * len(row)
|
296 |
-
except TypeError:
|
297 |
-
pass
|
298 |
-
return [None] * len(row)
|
299 |
-
|
300 |
-
def format_cell(cell):
|
301 |
-
if isinstance(cell, str):
|
302 |
-
return cell
|
303 |
-
with evaluate(False):
|
304 |
-
cell = latex(cell.subs({symbol_epsilon: I.symbol_epsilon}))
|
305 |
-
return r"\(" + cell + r"\)"
|
306 |
-
|
307 |
-
return (I.interpret.style
|
308 |
-
.set_table_attributes('class="dataframe table shiny-table w-auto"')
|
309 |
-
.apply(highlight_true, axis=1)
|
310 |
-
.format(format_cell)
|
311 |
-
.hide(axis="index"))
|
312 |
-
|
313 |
-
@render.plot(height=400)
|
314 |
-
def curve():
|
315 |
-
nb = 50
|
316 |
-
ax = plt.subplot()
|
317 |
-
xx, yy = plot(
|
318 |
-
y(), (I.symbol_x, 0, 100),
|
319 |
-
show=False, adaptive=False, nb_of_points=nb)[0].get_points()
|
320 |
-
_, cc = plot(
|
321 |
-
epsilon_x(), (I.symbol_x, 0, 100),
|
322 |
-
show=False, adaptive=False, nb_of_points=nb)[0].get_points()
|
323 |
-
sc = plt.scatter(np.resize(yy, nb), xx, c=np.resize(cc, nb),
|
324 |
-
norm=mpl.colors.AsinhNorm())
|
325 |
-
ax.set_xlim(0)
|
326 |
-
ax.set_ylim(0)
|
327 |
-
ax.set_xlabel(f"${latex(I.symbol_y)}$")
|
328 |
-
ax.set_ylabel(f"${latex(I.symbol_x)}$")
|
329 |
-
ax.get_figure().colorbar(sc, label=f"${latex(I.symbol_epsilon)}$")
|
330 |
-
return ax
|
331 |
-
|
332 |
-
|
333 |
-
@module.server
|
334 |
-
def elasticity_server(input, output, session, settings):
|
335 |
-
application_server("demand", demand_info, settings)
|
336 |
-
application_server("supply", supply_info, settings)
|
337 |
-
application_server("cross_price", cross_price_info, settings)
|
338 |
-
application_server("income", income_info, settings)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|