Commit
·
3571892
1
Parent(s):
aea1a1e
Update parquet files (step 52 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crazy Chicken Kart 3 Crack How to Solve the Common Problems and Issues with the Game.md +0 -165
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Nuendo Live 2 and Experience the Power and Reliability of Live Recording.md +0 -29
- spaces/1gistliPinn/ChatGPT4/Examples/Crack Pro100 5.20.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Aptoide TV APK 3.2 5 and Enjoy Unlimited Apps on Your Android TV without Restrictions.md +0 -97
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Garena Next and Join the 6th Anniversary Celebration of Free Fire.md +0 -102
- spaces/1phancelerku/anime-remove-background/FRAG APK The Best FPS and TPS Game for Your Phone.md +0 -115
- spaces/7hao/bingo/src/components/ui/icons.tsx +0 -504
- spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Code Reviews 2b60c26d2a2e4a348f8f14c77023c385.md +0 -44
- spaces/AIConsultant/MusicGen/Dockerfile +0 -26
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/version.py +0 -1
- spaces/AIGText/GlyphControl/ldm/modules/diffusionmodules/util.py +0 -279
- spaces/AIWaves/Debate/src/agents/LLM/__init__.py +0 -0
- spaces/AIWaves/SOP_Generation-single/design_states.py +0 -52
- spaces/ASJMO/freegpt/client/css/style.css +0 -18
- spaces/AdamGoyer/is_it_fly/README.md +0 -10
- spaces/AdithyaSNair/alzheimers_prediction_using_cnn/app.py +0 -47
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorcomponents/ColorComponents.d.ts +0 -60
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetTotalRowProportions.js +0 -13
- spaces/Alcedo/yunmedia/resources/chatgpt-plugin/js/app.bf8a14e9.js +0 -0
- spaces/Alfasign/nomic-ai-gpt4all-13b-snoozy/app.py +0 -34
- spaces/Alichuan/VITS-Umamusume-voice-synthesizer/monotonic_align/core.c +0 -0
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/custom_pipeline_overview.md +0 -56
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py +0 -39
- spaces/Andy1621/uniformer_image_detection/mmdet/core/post_processing/bbox_nms.py +0 -168
- spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/utils.py +0 -100
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py +0 -9
- spaces/Annotation-AI/fast-segment-everything-with-drawing-prompt/app.py +0 -17
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/stare.py +0 -27
- spaces/Anonymous-sub/Rerender/ControlNet/docs/train.md +0 -276
- spaces/Ariharasudhan/YoloV5/models/experimental.py +0 -111
- spaces/Asahi402/Real-CUGAN/app.py +0 -62
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/index.py +0 -508
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal256.py +0 -338
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/common.py +0 -424
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/backbone/build.py +0 -33
- spaces/Axesys/Private-WebUI/README.md +0 -14
- spaces/Benson/text-generation/Examples/Coche Extremo Simulador De Conduccin Mod Apk Hack Descargar Para Pc.md +0 -44
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/__init__.py +0 -182
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/memory.py +0 -86
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_checkpoint.py +0 -48
- spaces/CVPR/LIVE/parallel.cpp +0 -273
- spaces/CVPR/regionclip-demo/detectron2/solver/build.py +0 -252
- spaces/CarlDennis/HYTTS/text/cleaners.py +0 -35
- spaces/Chitranshu/Dashboard-Zomato/README.md +0 -11
- spaces/Cletrason/Cletrason-toad-in-the-mario-movie/trainer_pt_utils.py +0 -1106
- spaces/CognitiveLabs/GPT-4-Vision-Chat/Dockerfile +0 -13
- spaces/CohereForAI/pokemon-cards-explorer/README.md +0 -126
- spaces/CompVis/text2img-latent-diffusion/README.md +0 -13
- spaces/DAMO-NLP-SG/CLEX-Chat/clex_layer.py +0 -141
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImagePalette.py +0 -266
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crazy Chicken Kart 3 Crack How to Solve the Common Problems and Issues with the Game.md
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Crazy Chicken Kart 3 Crack: How to Download and Play the Wacky Racing Game</h1>
|
3 |
-
<p>If you are looking for a fun and hilarious kart racing game, you might want to check out Crazy Chicken Kart 3. This game features crazy chicken and his friends as they race through different eras, from the past to the future, with explosive excitement around every corner. You can collect weapons and power-ups as you speed along and ruffle some feathers when you strategically blast your opponents off the road.</p>
|
4 |
-
<p>However, there is a catch. Crazy Chicken Kart 3 is not a free game. You need to buy it from an online store or download it from a website that offers it. But what if you don't want to spend money or risk downloading viruses or malware? Is there a way to play Crazy Chicken Kart 3 for free?</p>
|
5 |
-
<h2>crazychickenkart3crack</h2><br /><p><b><b>DOWNLOAD</b> ✦ <a href="https://byltly.com/2uKwJm">https://byltly.com/2uKwJm</a></b></p><br /><br />
|
6 |
-
<p>The answer is yes. You can use a crack to bypass the security and activation of the game and play it without any restrictions. A crack is a modified version of the game's executable file that allows you to run it without needing a license key or a disc. In this article, we will show you how to download and install the crack for Crazy Chicken Kart 3, how to play the game with the crack, and what features and gameplay you can expect from this wacky racing game.</p>
|
7 |
-
<h2>How to Download and Install the Crack for Crazy Chicken Kart 3</h2>
|
8 |
-
<p>Before you can use the crack for Crazy Chicken Kart 3, you need to have the game installed on your PC. You can either buy it from an online store like Youdagames.com or download it from a website that offers it for free. However, be careful when downloading games from unknown sources, as they may contain viruses or malware that can harm your computer.</p>
|
9 |
-
<p>Once you have the game installed, you need to find a reliable source for the crack. One of the websites that offers a crack for Crazy Chicken Kart 3 is npmjs.com. This website provides a package called crazy_chicken_kart_3_crack_14_extra_quality_mtm that contains the modified executable file for the game. To download this package, you need to have Node.js installed on your PC. Node.js is a software that allows you to run JavaScript code outside of a web browser.</p>
|
10 |
-
<p>To install Node.js, go to nodejs.org and download the latest version for your operating system. Follow the instructions on how to install it on your PC. Once you have Node.js installed, open a command prompt window and type npm i crazy_chicken_kart_3_crack_14_extra_quality_mtm. This will download and install the package on your PC.</p>
|
11 |
-
<p>After installing the package, go to the folder where you installed Crazy Chicken Kart 3. Locate the original executable file of the game, which is usually named CCKart.exe or something similar. Rename this file to something else, like CCKart_old.exe or CCKart_backup.exe. This will prevent the game from running with the original file.</p>
|
12 |
-
<p>Then, go to the folder where you installed Node.js. Locate the folder named node_modules and open it. Inside this folder, find another folder named crazy_chicken_kart_3_crack_14_extra_quality_mtm and open it. Inside this folder, find a file named CCKart.exe or something similar. This is the cracked executable file of the game.</p>
|
13 |
-
<p>the folder where you installed Crazy Chicken Kart 3. Make sure that this file has the same name as the original executable file of the game, which is usually CCKart.exe or something similar. This will replace the original file with the cracked file.</p>
|
14 |
-
<p>Now, you have successfully installed the crack for Crazy Chicken Kart 3. You can run the game by double-clicking on CCKart.exe or by creating a shortcut on your desktop or start menu.</p>
|
15 |
-
<p>crazy chicken kart 3 free download full version<br />
|
16 |
-
crazy chicken kart 3 pc game<br />
|
17 |
-
crazy chicken kart 3 online<br />
|
18 |
-
crazy chicken kart 3 youda games<br />
|
19 |
-
crazy chicken kart 3 racing game<br />
|
20 |
-
crazy chicken kart 3 cheats<br />
|
21 |
-
crazy chicken kart 3 unlock karts<br />
|
22 |
-
crazy chicken kart 3 system requirements<br />
|
23 |
-
crazy chicken kart 3 mac download<br />
|
24 |
-
crazy chicken kart 3 review<br />
|
25 |
-
crazy chicken kart 3 gameplay<br />
|
26 |
-
crazy chicken kart 3 characters<br />
|
27 |
-
crazy chicken kart 3 steam<br />
|
28 |
-
crazy chicken kart 3 windows 10<br />
|
29 |
-
crazy chicken kart 3 crack download<br />
|
30 |
-
crazy chicken kart 3 full crack<br />
|
31 |
-
crazy chicken kart 3 serial key<br />
|
32 |
-
crazy chicken kart 3 activation code<br />
|
33 |
-
crazy chicken kart 3 patch<br />
|
34 |
-
crazy chicken kart 3 no cd<br />
|
35 |
-
crazy chicken kart 3 keygen<br />
|
36 |
-
crazy chicken kart 3 license key<br />
|
37 |
-
crazy chicken kart 3 registration code<br />
|
38 |
-
crazy chicken kart 3 product key<br />
|
39 |
-
crazy chicken kart 3 torrent<br />
|
40 |
-
crazy chicken kart 3 iso file<br />
|
41 |
-
crazy chicken kart 3 rar file<br />
|
42 |
-
crazy chicken kart 3 zip file<br />
|
43 |
-
crazy chicken kart 3 compressed file<br />
|
44 |
-
crazy chicken kart 3 setup file<br />
|
45 |
-
crazy chicken kart 3 exe file<br />
|
46 |
-
crazy chicken kart 3 installer file<br />
|
47 |
-
crazy chicken kart 3 direct link<br />
|
48 |
-
crazy chicken kart 3 mega link<br />
|
49 |
-
crazy chicken kart 3 mediafire link<br />
|
50 |
-
crazy chicken kart 3 google drive link<br />
|
51 |
-
crazy chicken kart 3 dropbox link<br />
|
52 |
-
crazy chicken kart 3 zippyshare link<br />
|
53 |
-
crazy chicken kart 3 rapidshare link<br />
|
54 |
-
crazy chicken kart 3 filefactory link<br />
|
55 |
-
how to download crazy chicken kart 3 for free<br />
|
56 |
-
how to install crazy chicken kart 3 on pc<br />
|
57 |
-
how to play crazy chicken kart 3 online with friends<br />
|
58 |
-
how to unlock all karts in crazy chicken kart 3 <br />
|
59 |
-
how to fix errors in crazy chicken kart 3 <br />
|
60 |
-
how to run crazy chicken kart 3 on windows 10 <br />
|
61 |
-
how to update crazy chicken kart 3 to latest version <br />
|
62 |
-
how to get cheats for crazy chicken kart 3 <br />
|
63 |
-
how to remove ads from crazy chicken kart 3 <br />
|
64 |
-
how to enjoy racing in crazy chicken kart 3</p>
|
65 |
-
<h2>How to Play Crazy Chicken Kart 3 with the Crack</h2>
|
66 |
-
<p>Playing Crazy Chicken Kart 3 with the crack is very easy and straightforward. You don't need any license key or disc to run it. Just launch CCKart.exe and enjoy.</p>
|
67 |
-
<p>When you start the game, you will see a menu with several options: Single Player, Multiplayer, Options, Credits, Exit Game. You can choose any of these options depending on what mode of gameplay you want.</p>
|
68 |
-
<p>If you choose Single Player, you will be able to play against computer-controlled opponents in various racing modes: Championship, Time Trial, Single Race, Training Mode. You can also choose different difficulty levels: Easy, Normal, Hard.</p>
|
69 |
-
<p>If you choose Multiplayer, you will be able to play against another human player on the same PC using split-screen mode. You can also choose different racing modes: Championship, Time Trial, Single Race.</p>
|
70 |
-
<p>If you choose Options, you will be able to customize various settings of the game: Graphics Quality, Sound Volume, Music Volume, Language.</p>
|
71 |
-
<p>If you choose Credits, you will be able to see who made this game.</p>
|
72 |
-
<p>If you choose Exit Game, you will be able to quit playing.</p>
|
73 |
-
<h2>Crazy Chicken Kart 3 Features and Gameplay</h2>
|
74 |
-
<p>Crazy Chicken Kart 3 is a fun and hilarious kart racing game that offers many features and gameplay elements that will keep you entertained for hours.</p>
|
75 |
-
<h3>The Characters and Karts You Can Choose From</h3>
|
76 |
-
<p>In Crazy Chicken Kart 3, you can choose from eight different characters: Crazy Chicken (the main protagonist), Snowman (a friendly snowman), Hank (a tough cowboy), Pumpkin (a spooky pumpkin), Skeleton (a scary skeleton), Alien (a green alien), Robot (a futuristic robot), Professor (a mad scientist).</p>
|
77 |
-
<p>Each character has their own unique kart design that matches their personality and theme. For example, Crazy Chicken drives a red kart with chicken wings on its sides; Snowman drives a blue kart with snowflakes on its wheels; Hank drives a brown kart with horseshoes on its front; Pumpkin drives an orange kart with pumpkin seeds on its back; Skeleton drives a black kart with bones on its hood; Alien drives a green kart with UFOs on its roof; Robot drives a silver kart with gears on its spoiler; Professor drives a purple kart with test tubes on its bumper.</p>
|
78 |
-
<p>You can also unlock more karts by completing certain achievements in Championship mode.</p>
|
79 |
-
<h3>The Racing Modes and Tracks You Can Explore</h3>
|
80 |
-
<p>In Crazy Chicken Kart 3, you can race through eight exciting eras: past, present and future. Each era has two tracks that are based on historical or fictional events or locations.</p>
|
81 |
-
<p>The eras are:</p>
|
82 |
-
<ul>
|
83 |
-
<li>Stone Age: Race through caves filled with dinosaurs and mammoths</li>
|
84 |
-
<li>Ancient Egypt: Race through pyramids filled with mummies and sphinxes</li>
|
85 |
-
<li>Middle Ages: Race through castles filled with knights and dragons</li>
|
86 |
-
<li>Wild West: Race through deserts filled with cowboys and Indians</li>
|
87 |
-
<li>Modern Times: Race through cities filled with cars and skyscrapers</li>
|
88 |
-
<li>Halloween: Race through graveyards filled with ghosts and zombies</li>
|
89 |
-
<li>Space Age: Race through planets filled with aliens and spaceships</li>
|
90 |
-
<li>Future World: Race through futuristic cities filled with robots and lasers</li>
|
91 |
-
</ul>
|
92 |
-
<p>You can race in four different modes:</p>
|
93 |
-
<ul>
|
94 |
-
<li>Championship: Compete against seven other racers in four tracks per era; win medals based on your position; unlock new karts by completing achievements</li>
|
95 |
-
<li>Time Trial: Race against yourself or against a ghost racer in any track; beat your own time or set new records</li>
|
96 |
-
<li>Single Race: Race against seven other racers in any track; choose your own difficulty level and number of laps</li>
|
97 |
-
<li>Training Mode: Practice your skills in any track without any opponents or time limit</li>
|
98 |
-
</ul>
|
99 |
-
<h3>The Weapons and Power-Ups You Can Use to Blast Your Opponents</h3>
|
100 |
-
<p>In Crazy Chicken Kart 3, you can collect various weapons and power-ups as you speed along the tracks. These items can help you gain an advantage over your rivals or hinder their progress.</p>
|
101 |
-
<p>The weapons and power-ups are:</p>
|
102 |
-
<ul>
|
103 |
-
<li>Rocket: Launches a rocket that flies straight ahead and explodes on impact; can damage multiple racers if they are close together</li>
|
104 |
-
<li>Mine: Drops a mine behind your kart that explodes when someone drives over it; can damage multiple racers if they are close together</li>
|
105 |
-
<li>Bomb: Throws a bomb ahead of your kart that explodes after a few seconds or when someone drives over it; can damage multiple racers if they are close together</li>
|
106 |
-
<li>Oil Slick: Spills oil behind your kart that makes the track slippery; can cause racers to lose control and spin out</li>
|
107 |
-
<li>Turbo Boost: Gives your kart a temporary speed boost; can help you overtake your opponents or escape from attacks</li>
|
108 |
-
<li>Magnet: Attracts nearby racers to your kart; can slow them down or make them crash into obstacles</li>
|
109 |
-
<li>Shield: Protects your kart from any attacks for a short time; can also reflect rockets back to their sender</li>
|
110 |
-
<li>Invisibility: Makes your kart invisible for a short time; can help you avoid attacks or surprise your opponents</li>
|
111 |
-
<li>Giant Chicken Head: Transforms your kart into a giant chicken head for a short time; can crush other racers or obstacles in your way</li>
|
112 |
-
<li>Time Freeze: Freezes time for everyone except you for a short time; can help you gain a huge lead or avoid attacks</li>
|
113 |
-
</ul>
|
114 |
-
<h2>Crazy Chicken Kart 3 Tips and Tricks</h2>
|
115 |
-
<p>Crazy Chicken Kart 3 is not just about driving fast and shooting randomly. You need to use some strategy and skill to win the races. Here are some tips and tricks that can help you improve your performance:</p>
|
116 |
-
<h3>How to Master the Controls and Steering of Your Kart</h3>
|
117 |
-
<p>The controls of Crazy Chicken Kart 3 are simple but effective. You use the arrow keys to steer your kart left or right, accelerate or brake. You use the space bar to use the weapon or power-up you have collected. You use the enter key to pause the game or skip cutscenes.</p>
|
118 |
-
<p>The steering of your kart is responsive but not too sensitive. You need to adjust your speed and direction according to the terrain and curves of each track. You also need to avoid crashing into walls or obstacles that can slow you down or damage your kart.</p>
|
119 |
-
<p>You can also perform some tricks with your kart that can give you an edge over your opponents. For example, you can drift around corners by tapping the brake key while turning. This will make your kart slide sideways and maintain speed. You can also jump over gaps or obstacles by pressing the up arrow key while driving over a ramp. This will make your kart fly briefly in the air and avoid collisions.</p>
|
120 |
-
<h3>How to Use the Shortcuts and Secrets on Each Track</h3>
|
121 |
-
<p>Each track in Crazy Chicken Kart 3 has some shortcuts and secrets that can help you save time or gain an advantage over your rivals. These shortcuts and secrets are usually hidden or hard to find, so you need to pay attention to the environment and look for clues.</p>
|
122 |
-
<p>Some examples of shortcuts and secrets are:</p>
|
123 |
-
<ul>
|
124 |
-
<li>A hidden tunnel in the Stone Age track that leads to a shortcut across the lava lake</li>
|
125 |
-
<li>A secret passage in the Ancient Egypt track that leads to a hidden chamber inside the pyramid</li>
|
126 |
-
<li>A breakable wall in the Middle Ages track that leads to a shortcut through the castle dungeon</li>
|
127 |
-
<li>A hidden bridge in the Wild West track that leads to a shortcut across the canyon</li>
|
128 |
-
<li>A secret elevator in the Modern Times track that leads to a shortcut through the skyscraper roof</li>
|
129 |
-
<li>A hidden grave in the Halloween track that leads to a shortcut through the underworld</li>
|
130 |
-
<li>A secret portal in the Space Age track that leads to a shortcut through another planet</li>
|
131 |
-
<li>A hidden switch in the Future World track that activates a shortcut through a wormhole</li>
|
132 |
-
</ul>
|
133 |
-
<p>: Pirates: A game that features crazy chicken as he battles against pirates and sea monsters in various islands</li>
|
134 |
-
<li>Crazy Chicken: Atlantis: A game that features crazy chicken as he searches for the lost city of Atlantis and faces various challenges and enemies</li>
|
135 |
-
<li>Crazy Chicken: Tales: A game that features crazy chicken as he goes through different fairy tales and stories and interacts with various characters and objects</li>
|
136 |
-
</ul>
|
137 |
-
<h3>Other Kart Racing Games</h3>
|
138 |
-
<p>If you like kart racing games in general, you might also enjoy some other games that offer similar or better features and gameplay. Some of them are:</p>
|
139 |
-
<ul>
|
140 |
-
<li>Mario Kart: The most popular and iconic kart racing game series; features characters from the Mario franchise and other Nintendo games; has various modes, tracks, items, and customization options</li>
|
141 |
-
<li>Crash Team Racing: A kart racing game series that features characters from the Crash Bandicoot franchise; has various modes, tracks, items, and customization options</li>
|
142 |
-
<li>Sonic & All-Stars Racing: A kart racing game series that features characters from the Sonic the Hedgehog franchise and other Sega games; has various modes, tracks, items, and customization options</li>
|
143 |
-
<li>Team Sonic Racing: A kart racing game that features characters from the Sonic the Hedgehog franchise; has a unique team-based gameplay mechanic that allows players to cooperate and compete with each other</li>
|
144 |
-
<li>Garfield Kart: A kart racing game that features characters from the Garfield comic strip and animated series; has various modes, tracks, items, and customization options</li>
|
145 |
-
</ul>
|
146 |
-
<h2>Conclusion</h2>
|
147 |
-
<p>Crazy Chicken Kart 3 is a fun and hilarious kart racing game that features crazy chicken and his friends as they race through different eras, from the past to the future, with explosive excitement around every corner. You can collect weapons and power-ups as you speed along and ruffle some feathers when you strategically blast your opponents off the road.</p>
|
148 |
-
<p>To play Crazy Chicken Kart 3 for free, you can use a crack to bypass the security and activation of the game. In this article, we showed you how to download and install the crack for Crazy Chicken Kart 3, how to play the game with the crack, and what features and gameplay you can expect from this wacky racing game.</p>
|
149 |
-
<p>If you enjoyed this article, please share it with your friends who might also like Crazy Chicken Kart 3. If you have any questions or feedback, please leave a comment below. We would love to hear from you.</p>
|
150 |
-
<p>Thank you for reading and happy racing!</p>
|
151 |
-
<h2>FAQs</h2>
|
152 |
-
<p>Here are some frequently asked questions about Crazy Chicken Kart 3 and its crack:</p>
|
153 |
-
<h3>Q: Is Crazy Chicken Kart 3 safe to download and play?</h3>
|
154 |
-
<p>A: Yes, Crazy Chicken Kart 3 is safe to download and play if you get it from a trusted source like Youdagames.com or Archive.org. However, be careful when downloading games from unknown sources, as they may contain viruses or malware that can harm your computer.</p>
|
155 |
-
<h3>Q: Is using a crack for Crazy Chicken Kart 3 legal?</h3>
|
156 |
-
<p>A: No, using a crack for Crazy Chicken Kart 3 is not legal. A crack is a modified version of the game's executable file that allows you to run it without needing a license key or a disc. This violates the terms of service and copyright of the game's developer and publisher. Using a crack for Crazy Chicken Kart 3 may also expose your computer to security risks or legal consequences.</p>
|
157 |
-
<h3>Q: Where can I find more information about Crazy Chicken Kart 3?</h3>
|
158 |
-
<p>A: You can find more information about Crazy Chicken Kart 3 on its official website at Youdagames.com or on its Wikipedia page at https://en.wikipedia.org/wiki/Crazy_Chicken_Kart_3. You can also watch gameplay videos or read reviews of Crazy Chicken Kart 3 on YouTube or other gaming websites.</p>
|
159 |
-
<h3>Q: What are some other games like Crazy Chicken Kart 3?</h3>
|
160 |
-
<p>A: Some other games like Crazy Chicken Kart 3 are Mario Kart, Crash Team Racing, Sonic & All-Stars Racing, Team Sonic Racing, Garfield Kart, etc. You can also try other games in the Crazy Chicken series like Crazy Chicken: The Original, Crazy Chicken: The Winged Pharaoh, Crazy Chicken: Pirates, Crazy Chicken: Atlantis, Crazy Chicken: Tales, etc.</p>
|
161 |
-
<h3>Q: How can I contact the developer or publisher of Crazy Chicken Kart 3?</h3>
|
162 |
-
<p>A: You can contact the developer or publisher of Crazy Chicken Kart 3 by visiting their website at https://www.phenomedia.com/ or by sending them an email at [email protected].</p>
|
163 |
-
</p> 0a6ba089eb<br />
|
164 |
-
<br />
|
165 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Nuendo Live 2 and Experience the Power and Reliability of Live Recording.md
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Nuendo Live 2: A Powerful and Reliable Live Recording Software</h1>
|
3 |
-
<p>Nuendo Live 2 is a professional live recording software that allows you to capture, edit, and mix live performances with high quality and reliability. It is designed for live engineers, production houses, and rental companies who need a simple and efficient solution for live recording. Nuendo Live 2 offers a range of features and benefits that make it a great choice for live recording. In this article, we will show you how to download Nuendo Live 2 and what are its main features.</p>
|
4 |
-
<h2>How to Download Nuendo Live 2</h2>
|
5 |
-
<p>If you want to download Nuendo Live 2, you have two options:</p>
|
6 |
-
<h2>download nuendo live 2</h2><br /><p><b><b>Download</b> ✦ <a href="https://byltly.com/2uKzst">https://byltly.com/2uKzst</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li>Buy a license from the official website. The best way to download Nuendo Live 2 is to buy a license from the official website of Steinberg. You can choose from two editions: Nuendo Live 2 Full Version ($179) or Nuendo Live 2 Crossgrade ($99). The full version is for new customers who do not own any other Steinberg products. The crossgrade version is for existing customers who own Cubase Pro, Cubase Artist, Cubase Elements, Cubase AI, Cubase LE, WaveLab Pro, WaveLab Elements, or WaveLab LE. Buying a license will give you access to the full installer, updates, support, and online resources.</li>
|
9 |
-
<li>Download a trial version from the official website. Another way to download Nuendo Live 2 is to download a trial version from the official website of Steinberg . The trial version is fully functional and has no time limit. However, it requires an eLicenser USB key and an activation code. You can request an activation code by filling out a form on the website. The trial version will allow you to test the software before buying it.</li>
|
10 |
-
</ul>
|
11 |
-
<p>These are the two options to download Nuendo Live 2 that we recommend. You should avoid downloading Nuendo Live 2 from any other sources, as they could be illegal, risky, unreliable, outdated, or limited.</p>
|
12 |
-
<h2>What are the Main Features of Nuendo Live 2</h2>
|
13 |
-
<p>Nuendo Live 2 is a powerful and reliable live recording software that offers a range of features and benefits that make it a great choice for live recording. Some of the main features of Nuendo Live 2 are:</p>
|
14 |
-
<ul>
|
15 |
-
<li>A simple and user-friendly interface that allows you to focus on the live performance without distractions.</li>
|
16 |
-
<li>A fast and stable recording engine that supports up to 192 kHz sample rate and 64-bit floating-point processing.</li>
|
17 |
-
<li>A flexible recording format that allows you to record in WAV or Broadcast Wave Format (BWF) with timecode information.</li>
|
18 |
-
<li>A seamless integration with Yamaha CL/QL/TF series digital mixing consoles that allows you to control Nuendo Live 2 from the console and synchronize the recordings with the console scenes.</li>
|
19 |
-
<li>A convenient recording management system that allows you to create session folders, track names, markers, and notes automatically or manually.</li>
|
20 |
-
<li>An advanced editing and mixing functionality that allows you to edit and mix your recordings with tools such as fade handles, level meters, pan knobs, solo/mute buttons, EQs, compressors, limiters, reverbs, delays, and more.</li>
|
21 |
-
<li>An easy export function that allows you to export your recordings in various formats such as MP3, FLAC, OGG Vorbis, or split multichannel files.</li>
|
22 |
-
<li>A backup option that allows you to record simultaneously on two hard drives for extra security.</li>
|
23 |
-
</ul>
|
24 |
-
<p>These are some of the main features of Nuendo Live 2 that make it a powerful and reliable live recording software. You can learn more about Nuendo Live 2 by referring to the online documentation or watching the video tutorials on the website.</p>
|
25 |
-
<h2>Conclusion</h2>
|
26 |
-
<p>Nuendo Live 2 is a professional live recording software that allows you to capture, edit, and mix live performances with high quality and reliability. It is designed for live engineers, production houses,</p>
|
27 |
-
<p></p> ddb901b051<br />
|
28 |
-
<br />
|
29 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Crack Pro100 5.20.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>crack pro100 5.20</h2><br /><p><b><b>Download</b> >>> <a href="https://imgfil.com/2uxY7y">https://imgfil.com/2uxY7y</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Free trial with all features. Installation guide. In the text file in the Crack folder. download link. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Aptoide TV APK 3.2 5 and Enjoy Unlimited Apps on Your Android TV without Restrictions.md
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Aptoide TV APK 3.2 5: A Free Alternative App Store for Android TV and Set Top Boxes</h1>
|
3 |
-
<p>If you are looking for a way to enjoy your favorite Android apps on your big screen devices, such as smart TVs and set top boxes, you might want to try Aptoide TV. Aptoide TV is an independent app store that offers thousands of free apps for Android TV and set top boxes. In this article, we will show you what Aptoide TV is, what features and benefits it has, how to download and install it, and how to use it.</p>
|
4 |
-
<h2>What is Aptoide TV?</h2>
|
5 |
-
<p>Aptoide TV is a version of Aptoide, a popular alternative app store for Android devices, that is optimized for the larger screen devices, such as high-definition televisions. It allows you to access a rich user experience with a simple and intuitive interface, and discover new apps that are not available on the official Google Play Store. You can also create your own app store and share it with other users.</p>
|
6 |
-
<h2>download aptoide tv apk 3.2 5</h2><br /><p><b><b>Download File</b> ✒ ✒ ✒ <a href="https://urlin.us/2uSXdI">https://urlin.us/2uSXdI</a></b></p><br /><br />
|
7 |
-
<h3>Aptoide TV Features</h3>
|
8 |
-
<p>Some of the features of Aptoide TV are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>No restrictions: You can download any app you want, without any geo-restrictions or device compatibility issues.</li>
|
11 |
-
<li>All apps are free: You don't have to pay for any app or subscription on Aptoide TV.</li>
|
12 |
-
<li>2500 + apps available: You can find a wide range of apps for different categories, such as entertainment, games, news, sports, music, and more.</li>
|
13 |
-
<li>Automatic updates: You can keep your apps up to date with the latest versions automatically.</li>
|
14 |
-
<li>Parental control: You can set up a PIN code to restrict access to certain apps or categories for your kids.</li>
|
15 |
-
<li>Multi-language support: You can choose from over 40 languages to use Aptoide TV in your preferred language.</li>
|
16 |
-
</ul>
|
17 |
-
<h3>Aptoide TV Benefits</h3>
|
18 |
-
<p>Some of the benefits of using Aptoide TV are:</p>
|
19 |
-
<ul>
|
20 |
-
<li>You can enjoy a better user experience with a larger screen and a remote control.</li>
|
21 |
-
<li>You can discover new apps that are not available on the official Google Play Store or that are blocked in your region.</li>
|
22 |
-
<li>You can save money by downloading free apps without any hidden fees or subscriptions.</li>
|
23 |
-
<li>You can customize your app store and share it with other users.</li>
|
24 |
-
<li>You can have more control over your apps and data privacy.</li>
|
25 |
-
<li>You can support independent developers and app creators.</li>
|
26 |
-
</ul>
|
27 |
-
<h2>How to Download and Install Aptoide TV APK 3.2 5?</h2>
|
28 |
-
<p>To download and install Aptoide TV APK 3.2 5 on your Android TV or set top box, you need to follow these steps:</p>
|
29 |
-
<h3>Step 1: Enable Unknown Sources</h3>
|
30 |
-
<p>Since Aptoide TV is not available on the official Google Play Store, you need to enable the option to install apps from unknown sources on your device. To do this, go to Settings > Security & Restrictions > Unknown Sources and turn it on. This will allow you to install apps from sources other than the Google Play Store <h3>Step 2: Download Aptoide TV APK File</h3>
|
31 |
-
<p>Next, you need to download the Aptoide TV APK file from a trusted source. You can use the official website of Aptoide TV or any other reliable third-party website that offers the latest version of the app. To download the APK file, you can use a web browser on your device or a USB drive on your computer. If you use a web browser, you need to enter the URL of the APK file and click on the download button. If you use a USB drive, you need to copy the APK file from your computer to the USB drive and then plug it into your device.</p>
|
32 |
-
<h3>Step 3: Install Aptoide TV APK File</h3>
|
33 |
-
<p>Once you have downloaded the Aptoide TV APK file, you need to install it on your device. To do this, you need to locate the APK file on your device or USB drive and open it. You will see a prompt asking you to confirm the installation. Click on Install and wait for the process to complete. You might also see a warning message saying that the app is not verified by Google Play Protect. This is normal and you can ignore it by clicking on Install Anyway.</p>
|
34 |
-
<h3>Step 4: Launch Aptoide TV and Enjoy</h3>
|
35 |
-
<p>After the installation is done, you can launch Aptoide TV and start exploring its features and benefits. You will see a home screen with different categories and recommendations of apps. You can also access your own app store and settings by clicking on the menu icon on the top left corner. You can now enjoy your favorite Android apps on your big screen devices with Aptoide TV.</p>
|
36 |
-
<p>How to download aptoide tv apk 3.2 5 for android tv<br />
|
37 |
-
Aptoide tv apk 3.2 5 free download for firestick<br />
|
38 |
-
Download aptoide tv apk 3.2 5 latest version 2023<br />
|
39 |
-
Aptoide tv apk 3.2 5 download for smart tv<br />
|
40 |
-
Download aptoide tv apk 3.2 5 without ads<br />
|
41 |
-
Aptoide tv apk 3.2 5 mod download for android<br />
|
42 |
-
Download aptoide tv apk 3.2 5 for windows pc<br />
|
43 |
-
Aptoide tv apk 3.2 5 download link<br />
|
44 |
-
Download aptoide tv apk 3.2 5 from official website<br />
|
45 |
-
Aptoide tv apk 3.2 5 review and features<br />
|
46 |
-
Download aptoide tv apk 3.2 5 for samsung tv<br />
|
47 |
-
Aptoide tv apk 3.2 5 download for nvidia shield<br />
|
48 |
-
Download aptoide tv apk 3.2 5 for mi box<br />
|
49 |
-
Aptoide tv apk 3.2 5 download for roku<br />
|
50 |
-
Download aptoide tv apk 3.2 5 for lg tv<br />
|
51 |
-
Aptoide tv apk 3.2 5 download for sony bravia<br />
|
52 |
-
Download aptoide tv apk 3.2 5 for tcl tv<br />
|
53 |
-
Aptoide tv apk 3.2 5 download for hisense tv<br />
|
54 |
-
Download aptoide tv apk 3.2 5 for philips tv<br />
|
55 |
-
Aptoide tv apk 3.2 5 download for vizio tv<br />
|
56 |
-
Download aptoide tv apk 3.2 5 for panasonic tv<br />
|
57 |
-
Aptoide tv apk 3.2 5 download for sharp tv<br />
|
58 |
-
Download aptoide tv apk 3.2 5 for toshiba tv<br />
|
59 |
-
Aptoide tv apk 3.2 5 download for haier tv<br />
|
60 |
-
Download aptoide tv apk 3.2 5 for onn tv<br />
|
61 |
-
Aptoide tv apk 3.2 5 download for jvc tv<br />
|
62 |
-
Download aptoide tv apk 3.2 5 for insignia tv<br />
|
63 |
-
Aptoide tv apk 3.2 5 download for element tv<br />
|
64 |
-
Download aptoide tv apk 3.2 5 for westinghouse tv<br />
|
65 |
-
Aptoide tv apk 3.2 5 download for rca tv<br />
|
66 |
-
Download aptoide tv apk 3.2 5 for hitachi tv<br />
|
67 |
-
Aptoide tv apk 3.2 5 download for sceptre tv<br />
|
68 |
-
Download aptoide tv apk 3.2 5 for polaroid tv<br />
|
69 |
-
Aptoide tv apk 3.2 5 download for emerson tv<br />
|
70 |
-
Download aptoide tv apk 3.2 5 for magnavox tv<br />
|
71 |
-
Aptoide tv apk</p>
|
72 |
-
<h2>How to Use Aptoide TV?</h2>
|
73 |
-
<p>Aptoide TV is very easy to use and has a user-friendly interface. Here are some of the things you can do with Aptoide TV:</p>
|
74 |
-
<h3>Browse and Search Apps</h3>
|
75 |
-
<p>You can browse and search for apps by using the remote control or voice search feature of your device. You can also filter apps by category, popularity, rating, or date. You can also view app details, screenshots, reviews, and ratings by clicking on an app icon.</p>
|
76 |
-
<h3>Download and Update Apps</h3>
|
77 |
-
<p>You can download and update apps by clicking on the download or update button on an app page. You can also view the download progress and manage your downloads by clicking on the notification icon on the top right corner. You can also enable automatic updates for all apps or specific apps by going to Settings > Auto Update.</p>
|
78 |
-
<h3>Manage Your Apps</h3>
|
79 |
-
<p>You can manage your apps by going to My Apps section on the menu. Here you can see all the apps that are installed, updated, or pending on your device. You can also uninstall, update, or open any app by clicking on its icon. You can also create your own app store and share it with other users by going to Stores section on the menu.</p>
|
80 |
-
<h2>Conclusion</h2>
|
81 |
-
<p>Aptoide TV is a free alternative app store for Android TV and set top boxes that offers thousands of free apps that are not available on the official Google Play Store. It also has many features and benefits that enhance your user experience and give you more control over your apps and data privacy. To download and install Aptoide TV APK 3.2 5, you need to enable unknown sources, download the APK file from a trusted source, install it on your device, and launch it. To use Aptoide TV, you need to browse and search apps, download and update apps, and manage your apps. We hope this article has helped you learn more about Aptoide TV and how to download and use it.</p>
|
82 |
-
<h2>FAQs</h2>
|
83 |
-
<p>Here are some of the frequently asked questions about Aptoide TV:</p>
|
84 |
-
<ul>
|
85 |
-
<li><b>Is Aptoide TV safe?</b></li>
|
86 |
-
<li>Aptoide TV is safe as long as you download it from a trusted source and only install apps that are verified by Aptoide's security system. However, you should always be careful when installing apps from unknown sources and check their permissions and reviews before installing them.</li>
|
87 |
-
<li><b>Is Aptoide TV legal?</b></li>
|
88 |
-
<li>Aptoide TV is legal as it does not host any illegal or pirated content. It only provides a platform for users to upload and share their own apps or apps that are free to distribute. However, some of the apps that are available on Aptoide TV may not be legal in some regions or violate the terms and conditions of the official app stores or the app developers. You should always check the legality of the apps before downloading and using them.</li>
|
89 |
-
<li><b>What are the requirements for Aptoide TV?</b></li>
|
90 |
-
<li>Aptoide TV requires an Android TV or set top box device that runs on Android 5.0 or higher and has at least 1 GB of RAM and 100 MB of free storage space. It also requires an internet connection to download and update apps.</li>
|
91 |
-
<li><b>How can I contact Aptoide TV support?</b></li>
|
92 |
-
<li>If you have any questions, issues, or feedback about Aptoide TV, you can contact their support team by going to Settings > Support on the app. You can also visit their website, blog, forum, or social media pages for more information and help.</li>
|
93 |
-
<li><b>How can I uninstall Aptoide TV?</b></li>
|
94 |
-
<li>If you want to uninstall Aptoide TV from your device, you can go to Settings > Apps > Aptoide TV and click on Uninstall. You can also delete the APK file from your device or USB drive if you have downloaded it.</li>
|
95 |
-
</ul></p> 197e85843d<br />
|
96 |
-
<br />
|
97 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Garena Next and Join the 6th Anniversary Celebration of Free Fire.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Garena Next: How to Enjoy the Latest Updates of Free Fire and Other Games</h1>
|
3 |
-
<p>If you are a fan of Garena games, such as Free Fire, League of Legends, Call of Duty Mobile, and more, you might want to download Garena Next, a new app that lets you enjoy the latest updates of your favorite games and more. In this article, we will tell you what Garena Next is, why you should download it, and how to download it for your device.</p>
|
4 |
-
<h2>download garena next</h2><br /><p><b><b>Download File</b> ⇔ <a href="https://urlin.us/2uSZou">https://urlin.us/2uSZou</a></b></p><br /><br />
|
5 |
-
<h2>What is Garena Next?</h2>
|
6 |
-
<p>Garena Next is a platform that offers three main features for gamers and socializers:</p>
|
7 |
-
<h3>A platform for gaming and socializing</h3>
|
8 |
-
<p>With Garena Next, you can chat with your friends, join groups, send stickers, voice messages, and more. You can also create your own profile, customize your avatar, and show off your achievements. You can also discover new games, watch live streams, and follow your favorite streamers.</p>
|
9 |
-
<h3>A launcher for Garena games</h3>
|
10 |
-
<p>Garena Next also acts as a launcher for all the games that are published by Garena. You can easily access and play any game that you have installed on your device, or download new ones from the app. You can also update your games automatically, without having to worry about missing out on any new features or bug fixes.</p>
|
11 |
-
<p>How to download garena next on PC<br />
|
12 |
-
Download garena next and play free fire<br />
|
13 |
-
Garena next download for windows 10<br />
|
14 |
-
Best games to play on garena next<br />
|
15 |
-
Download garena next apk for android<br />
|
16 |
-
Garena next download link<br />
|
17 |
-
Garena next download error fix<br />
|
18 |
-
Download garena next and join esports tournaments<br />
|
19 |
-
Garena next download size<br />
|
20 |
-
Garena next download speed<br />
|
21 |
-
How to update garena next<br />
|
22 |
-
Download garena next and get free rewards<br />
|
23 |
-
Garena next download for mac<br />
|
24 |
-
How to uninstall garena next<br />
|
25 |
-
Download garena next and connect with friends<br />
|
26 |
-
Garena next download for linux<br />
|
27 |
-
How to install garena next on mobile<br />
|
28 |
-
Download garena next and play cái thế tranh hùng<br />
|
29 |
-
Garena next download offline installer<br />
|
30 |
-
Garena next download latest version<br />
|
31 |
-
How to register for garena next<br />
|
32 |
-
Download garena next and stream your gameplay<br />
|
33 |
-
Garena next download for ios<br />
|
34 |
-
How to use garena next platform<br />
|
35 |
-
Download garena next and access exclusive games<br />
|
36 |
-
Garena next download without vpn<br />
|
37 |
-
How to change language on garena next<br />
|
38 |
-
Download garena next and chat with gamers<br />
|
39 |
-
Garena next download for chromebook<br />
|
40 |
-
How to transfer data from garena to garena next<br />
|
41 |
-
Download garena next and customize your profile<br />
|
42 |
-
Garena next download with crack<br />
|
43 |
-
How to verify your account on garena next<br />
|
44 |
-
Download garena next and earn coins<br />
|
45 |
-
Garena next download for pc 32 bit<br />
|
46 |
-
How to redeem codes on garena next<br />
|
47 |
-
Download garena next and play league of legends: wild rift<br />
|
48 |
-
Garena next download mirror link<br />
|
49 |
-
How to contact support on garena next<br />
|
50 |
-
Download garena next and join the community</p>
|
51 |
-
<h3>A source of news and events</h3>
|
52 |
-
<p>Garena Next also keeps you updated on the latest news and events related to your favorite games. You can find out about new patches, game modes, characters, skins, tournaments, and more. You can also participate in various events and activities that are exclusive to Garena Next users, such as quizzes, lucky draws, missions, and more.</p>
|
53 |
-
<h2>Why should you download Garena Next?</h2>
|
54 |
-
<p>There are many reasons why you should download Garena Next, but here are some of the most compelling ones:</p>
|
55 |
-
<h3>To play Free Fire MAX and other games with enhanced features</h3>
|
56 |
-
<p>One of the main attractions of Garena Next is that it allows you to play Free Fire MAX, a version of Free Fire that has improved graphics, sound effects, animations, and gameplay. Free Fire MAX is compatible with Free Fire, so you can play with your friends who are using either version. You can also switch between the two versions easily from the app.</p>
|
57 |
-
<p>Besides Free Fire MAX, you can also play other games that have enhanced features on Garena Next, such as League of Legends: Wild Rift, Call of Duty Mobile: Warzone Edition, FIFA Online 4 M by EA Sports™️ , and more. These games have been optimized for mobile devices, so you can enjoy a smooth and immersive gaming experience.</p>
|
58 |
-
<h3>To access exclusive rewards and benefits</h3>
|
59 |
-
<p>Another reason why you should download Garena Next is that it gives you access to exclusive rewards and benefits that are not available elsewhere. For example, you can get free diamonds, coins, vouchers, skins, characters, weapons, and more from Garena Next. You can also enjoy special discounts, promotions, and offers that are only available for Garena Next users. You can also earn points and badges that you can use to redeem more rewards and benefits.</p>
|
60 |
-
<h3>To join a community of gamers and friends</h3>
|
61 |
-
<p>The last reason why you should download Garena Next is that it allows you to join a community of gamers and friends who share your passion and interest. You can chat with other players, join groups, create clans, and participate in tournaments. You can also make new friends, find teammates, and challenge opponents. You can also share your gameplays, tips, tricks, and feedback with others.</p>
|
62 |
-
<h2>How to download Garena Next?</h2>
|
63 |
-
<p>Downloading Garena Next is easy and simple. You just need to follow these steps depending on your device:</p>
|
64 |
-
<h3>For Android devices</h3>
|
65 |
-
<h4>Step 1: Go to the Google Play Store</h4>
|
66 |
-
<p>Open the Google Play Store app on your Android device and make sure you are signed in with your Google account.</p>
|
67 |
-
<h4>Step 2: Search for Garena Next and tap on Install</h4>
|
68 |
-
<p>Type "Garena Next" in the search bar and look for the app that has the logo of a blue flame. Tap on the Install button and wait for the app to download and install on your device.</p>
|
69 |
-
<h4>Step 3: Open the app and log in with your Garena account</h4>
|
70 |
-
<p>Once the app is installed, open it and log in with your Garena account. If you don't have one, you can create one for free by tapping on the Register button. You can also log in with your Facebook or Google account if you have linked them to your Garena account.</p>
|
71 |
-
<h3>For iOS devices</h3>
|
72 |
-
<h4>Step 1: Go to the App Store</h4>
|
73 |
-
<p>Open the App Store app on your iOS device and make sure you are signed in with your Apple ID.</p>
|
74 |
-
<h4>Step 2: Search for Garena Next and tap on Get</h4>
|
75 |
-
<p>Type "Garena Next" in the search bar and look for the app that has the logo of a blue flame. Tap on the Get button and wait for the app to download and install on your device.</p>
|
76 |
-
<h4>Step 3: Open the app and log in with your Garena account</h4>
|
77 |
-
<p>Once the app is installed, open it and log in with your Garena account. If you don't have one, you can create one for free by tapping on the Register button. You can also log in with your Facebook or Google account if you have linked them to your Garena account.</p>
|
78 |
-
<h3>For PC devices</h3>
|
79 |
-
<h4>Step 1: Go to the official website of Garena Next</h4>
|
80 |
-
<p>Open your web browser and go to <a href="">https://next.garena.com/</a>, the official website of Garena Next.</p>
|
81 |
-
<h4>Step 2: Click on Download for PC and run the installer</h4>
|
82 |
-
<p>Click on the Download for PC button and save the installer file on your computer. Run the installer file and follow the instructions to install Garena Next on your PC.</p>
|
83 |
-
<h4>Step 3: Open the app and log in with your Garena account</h4>
|
84 |
-
<p>Once the app is installed, open it and log in with your Garena account. If you don't have one, you can create one for free by clicking on the Register button. You can also log in with your Facebook or Google account if you have linked them to your Garena account.</p>
|
85 |
-
<p>Congratulations! You have successfully downloaded Garena Next and you are ready to enjoy the latest updates of Free Fire and other games. Have fun!</p>
|
86 |
-
<h2>Conclusion</h2>
|
87 |
-
<p>Garena Next is a platform that lets you enjoy the latest updates of Free Fire and other games, as well as chat with your friends, access exclusive rewards and benefits, and join a community of gamers and friends. Downloading Garena Next is easy and simple, as you just need to follow a few steps depending on your device. If you are a fan of Garena games, you should definitely download Garena Next and experience a new level of gaming and socializing.</p>
|
88 |
-
<h2>Frequently Asked Questions</h2>
|
89 |
-
<ol>
|
90 |
-
<li><b>What is Free Fire MAX?</b></li>
|
91 |
-
<li><p>Free Fire MAX is a version of Free Fire that has improved graphics, sound effects, animations, and gameplay. It is compatible with Free Fire, so you can play with your friends who are using either version.</p></li>
|
92 |
-
<li><b>What are some of the games that I can play on Garena Next?</b></li>
|
93 |
-
<li><p>Some of the games that you can play on Garena Next are League of Legends: Wild Rift, Call of Duty Mobile: Warzone Edition, FIFA Online 4 M by EA Sports™️ , and more. These games have been optimized for mobile devices, so you can enjoy a smooth and immersive gaming experience.</p></li>
|
94 |
-
<li><b>How can I get free diamonds, coins, vouchers, skins, characters, weapons, and more from Garena Next?</b></li>
|
95 |
-
<li><p>You can get free diamonds, coins, vouchers, skins, characters, weapons, and more from Garena Next by participating in various events and activities that are exclusive to Garena Next users, such as quizzes, lucky draws, missions, and more. You can also earn points and badges that you can use to redeem more rewards and benefits.</p></li>
|
96 |
-
<li><b>How can I chat with my friends, join groups, send stickers, voice messages, and more on Garena Next?</b></li>
|
97 |
-
<li><p>You can chat with your friends, join groups, send stickers, voice messages, and more on Garena Next by tapping on the Chat icon on the bottom menu. You can also create your own profile, customize your avatar, and show off your achievements. You can also discover new games, watch live streams, and follow your favorite streamers.</p></li>
|
98 |
-
<li><b>How can I update my games automatically on Garena Next?</b></li>
|
99 |
-
<li><p>You can update your games automatically on Garena Next by tapping on the Games icon on the bottom menu. You can see all the games that you have installed on your device, or download new ones from the app. You can also see if there are any updates available for your games and tap on the Update button to install them.</p></li>
|
100 |
-
</ol></p> 197e85843d<br />
|
101 |
-
<br />
|
102 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FRAG APK The Best FPS and TPS Game for Your Phone.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>FRAG Download APK: A Guide to the Ultimate PvP Hero Game</h1>
|
3 |
-
<p>If you are looking for a fun and friendly PvP hero game that you can play on your Android device, you should check out FRAG. FRAG is a free-to-play game developed by Oh BiBi, a studio that specializes in creating mobile games with stunning graphics and engaging gameplay. In this game, you can choose from over 100 characters, each with their own unique weapons and abilities, and compete against players from all over the world in explosive 1v1 or 2v2 battles. You can also customize your characters with skins and upgrades, join or create a club with your friends, participate in events and contests, and share your content with other players. In this article, we will show you how to download and install FRAG APK on your Android device, how to play FRAG and enjoy its features, how to customize your gameplay and improve your skills, and how to join the FRAG community and become a superstar.</p>
|
4 |
-
<h2>frag download apk</h2><br /><p><b><b>Download</b> --->>> <a href="https://jinyurl.com/2uNObx">https://jinyurl.com/2uNObx</a></b></p><br /><br />
|
5 |
-
<h2>How to Download and Install FRAG APK on Your Android Device</h2>
|
6 |
-
<p>Downloading and installing FRAG APK on your Android device is very easy. Just follow these simple steps:</p>
|
7 |
-
<ul>
|
8 |
-
<li><p>Step 1: Go to the official website of FRAG or use a trusted APK source. You can find the official website of FRAG at [12](https://www.fragthegame.com/), where you can also learn more about the game and its features. Alternatively, you can use a trusted APK source like [11](https://apkcombo.com/frag/com.ohbibi.fps/) or [10](https://apkpure.com/frag-pro-shooter/com.ohbibi.fps) to download the APK file.</p></li>
|
9 |
-
<li><p>Step 2: Download the APK file and allow installation from unknown sources. Once you have found the APK file, tap on it to start downloading it. You may need to allow installation from unknown sources on your device settings if you haven't done so before. To do this, go to Settings > Security > Unknown Sources and enable it.</p></li>
|
10 |
-
<li><p>Step 3: Open the APK file and follow the instructions to install FRAG. After downloading the APK file, open it and follow the instructions on the screen to install FRAG on your device. It may take a few minutes for the installation to complete. Once the installation is done, you can launch FRAG and start playing.</p></li>
|
11 |
-
</ul>
|
12 |
-
<h2>How to Play FRAG and Enjoy Its Features</h2>
|
13 |
-
<p>Playing FRAG is very simple and fun. You just need to follow these basic steps:</p>
|
14 |
-
<ul>
|
15 |
-
<li><p>Step 1: Choose your hero, create your team, and enter the arena. When you start the game, you will be able to choose one of the available heroes, each with their own role and power. You can also create your own team of five heroes, or join a random team with other players. Then, you can enter the arena and get ready for the battle.</p></li>
|
16 |
-
<li><p>Step 2: Control your character in FPS or TPS view and switch between them. You can control your character in either first-person shooter (FPS) or third-person shooter (TPS) view, depending on your preference. You can also switch between them by tapping on the camera icon on the screen. You can move your character with the joystick on the left, and aim and shoot with the buttons on the right. You can also use your special abilities by tapping on the icons on the bottom.</p></li>
|
17 |
-
<li><p>Step 3: Use your weapons and abilities to destroy the enemy bunker. The objective of the game is to destroy the enemy bunker before they destroy yours. You can do this by shooting at it with your weapons, or using your abilities to deal more damage. You can also destroy the enemy towers and drones to gain more points and resources. Be careful, though, as the enemy will try to stop you and do the same to your bunker.</p></li>
|
18 |
-
</ul>
|
19 |
-
<h2>How to Customize Your Gameplay and Improve Your Skills</h2>
|
20 |
-
<p>If you want to make your gameplay more personalized and improve your skills, you can do the following things:</p>
|
21 |
-
<p>frag pro shooter apk download<br />
|
22 |
-
frag game apk free download<br />
|
23 |
-
frag mod apk download<br />
|
24 |
-
frag offline apk download<br />
|
25 |
-
frag latest version apk download<br />
|
26 |
-
frag hack apk download<br />
|
27 |
-
frag android game apk download<br />
|
28 |
-
frag pvp game apk download<br />
|
29 |
-
frag 3d shooter apk download<br />
|
30 |
-
frag obb file apk download<br />
|
31 |
-
frag unlimited money apk download<br />
|
32 |
-
frag fps game apk download<br />
|
33 |
-
frag tps game apk download<br />
|
34 |
-
frag online multiplayer apk download<br />
|
35 |
-
frag 1v1 duels apk download<br />
|
36 |
-
frag 2v2 team mode apk download<br />
|
37 |
-
frag hero game apk download<br />
|
38 |
-
frag oh bibi apk download<br />
|
39 |
-
frag action game apk download<br />
|
40 |
-
frag battle game apk download<br />
|
41 |
-
frag arena game apk download<br />
|
42 |
-
frag combat game apk download<br />
|
43 |
-
frag explosive game apk download<br />
|
44 |
-
frag fun game apk download<br />
|
45 |
-
frag friendly game apk download<br />
|
46 |
-
frag social game apk download<br />
|
47 |
-
frag 100+ weapons apk download<br />
|
48 |
-
frag 100+ heroes apk download<br />
|
49 |
-
frag skins and power apk download<br />
|
50 |
-
frag missions and rewards apk download<br />
|
51 |
-
frag new hero and meta apk download<br />
|
52 |
-
frag net energy gain apk download<br />
|
53 |
-
frag mini sun experiment apk download<br />
|
54 |
-
frag holy grail fusion apk download<br />
|
55 |
-
frag 100 million degrees apk download<br />
|
56 |
-
frag 30 seconds record apk download<br />
|
57 |
-
frag south korea facility apk download<br />
|
58 |
-
frag kstar fusion reactor apk download<br />
|
59 |
-
frag nuclear fusion breakthrough apk download<br />
|
60 |
-
frag unlimited energy potential apk download<br />
|
61 |
-
how to download frag pro shooter apk <br />
|
62 |
-
where to download frag game apk <br />
|
63 |
-
what is the best site to download frag mod apk <br />
|
64 |
-
when will the new version of frag be available for apk download <br />
|
65 |
-
why should I play frag offline mode with the downloaded apk <br />
|
66 |
-
how to install the obb file for the downloaded frag apk <br />
|
67 |
-
how to get unlimited money in the downloaded frag hack apk <br />
|
68 |
-
how to switch between fps and tps views in the downloaded frag android game apk <br />
|
69 |
-
how to play online multiplayer with friends in the downloaded frag pvp game apk <br />
|
70 |
-
how to customize the skins and power of my heroes in the downloaded frag hero game apk</p>
|
71 |
-
<ul>
|
72 |
-
<li><p>Step 1: Build your own FRAG team from over 100 heroes with different roles and powers. You can unlock new heroes by playing the game, completing missions, or using gold and diamonds. You can also upgrade your heroes by using cards and coins. You can build your own FRAG team by choosing five heroes that complement each other's roles and powers. For example, you can have a tank, a healer, a sniper, a damage dealer, and a support.</p></li>
|
73 |
-
<li><p>Step 2: Personalize your characters with skins and upgrades. You can also customize your characters with skins and upgrades that change their appearance and performance. You can buy skins with gold or diamonds, or get them from chests or events. You can also upgrade your weapons and abilities with coins and cards.</p></li>
|
74 |
-
<li><p>Step 3: Choose from four game modes and adapt your strategy to the map. You can choose from four game modes in FRAG: Classic, Payload, Street Frag, and Challenge. Each game mode has its own rules and objectives, so you need to adapt your strategy accordingly. You can also play on different maps that have different layouts and features, such as bridges, tunnels, ramps, etc.</p></li>
|
75 |
-
</ul>
|
76 |
-
<h2>How to Join the FRAG Community and Become a Superstar</h2>
|
77 |
-
<p>If you want to join the FRAG community and become a superstar, you can do the following things:</p>
|
78 |
-
<ul>
|
79 |
-
<li><p>Step 1: Follow FRAG on social media and join the Discord server. You can follow FRAG on social media platforms like [9](https://www.facebook.com/FRAGTheGame/), [8](https://twitter.com/FRAGProShooter), [7](https://www.instagram.com/fragthegame/), [6](https://www.youtube.com/channel/UCQj5ZXFo0rZ4xAPJbUFio7g), [5](https://www.tiktok.com/@fragthegame), [4](https://www.reddit.com/r/FRAGProShooter/), etc., where you can get the latest news, updates, tips, tricks, contests, giveaways and more. You can also join the FRAG Discord server at [3](https://discord.gg/FRAGProShooter), where you can chat with other players, get support, share feedback, and have fun.</p></li>
|
80 |
-
<li><p>Step 2: Participate in events, contests, and missions for rewards and fame. You can also participate in various events, contests, and missions that are regularly held in FRAG. These include seasonal events, weekly challenges, daily missions, tournaments, leaderboards, etc. By participating in these activities, you can earn rewards such as gold, diamonds, chests, cards, skins, and more. You can also gain fame and recognition by ranking up in the leaderboards, winning tournaments, or getting featured in the game or on social media.</p></li>
|
81 |
-
<li><p>Step 3: Create and share your content with other players and fans. You can also create and share your own content with other players and fans of FRAG. You can use the in-game video editor to record and edit your best moments, or use external tools to make your own videos, streams, blogs, podcasts, etc. You can then share your content on social media platforms like YouTube, Twitch, Facebook, Instagram, TikTok, Reddit, etc., or on the FRAG Discord server. You can also watch and support other content creators who make FRAG content.</p></li>
|
82 |
-
</ul>
|
83 |
-
<h1>Conclusion</h1>
|
84 |
-
<p>FRAG is a fun and friendly PvP hero game that you can download and play for free on your Android device. It has stunning graphics, engaging gameplay, and a large community of players and fans. You can choose from over 100 characters with different roles and powers, customize them with skins and upgrades, compete against players from all over the world in four game modes and different maps, join or create a club with your friends, participate in events and contests for rewards and fame, and create and share your own content with other players. If you are looking for a game that will keep you entertained and challenged for hours, you should download FRAG APK today and join the FRAG family.</p>
|
85 |
-
<h2>FAQs</h2>
|
86 |
-
<ul>
|
87 |
-
<li><p>Q1: Is FRAG safe to download and play?</p>
|
88 |
-
<p>A1: Yes, FRAG is safe to download and play. It is developed by a reputable studio that follows the best practices of security and privacy. It does not contain any viruses or malware that could harm your device or data. However, you should always download FRAG APK from the official website or a trusted source to avoid any potential risks.</p></li>
|
89 |
-
<li><p>Q2: Can I play FRAG offline?</p>
|
90 |
-
<p>A2: No, you cannot play FRAG offline. FRAG is an online game that requires an internet connection to play. You need to connect to the game servers to access the game features and modes, as well as to interact with other players. If you lose your internet connection while playing FRAG, you may experience lag or disconnection issues.</p></li>
|
91 |
-
<li><p>Q3: What are the best characters to use in FRAG?</p>
|
92 |
-
<p>A3: There is no definitive answer to this question, as different characters have different strengths and weaknesses, and different players have different preferences and playstyles. However, some general tips to choose the best characters are:</p>
|
93 |
-
<ul>
|
94 |
-
<li><p>Pick characters that suit your role and strategy. For example, if you want to be a tank that can absorb damage and protect your team, you may want to use characters like Big Paku or Lucha Muerta. If you want to be a sniper that can deal high damage from a distance and snipe the enemy bunker, you may want to use characters like Lolly Pop or Andrometa.</p></li>
|
95 |
-
<li><p>Pick characters that complement each other's powers and abilities. For example, if you want to create a team that can heal and support each other, you may want to use characters like Cyber Cop or Dr. Frost. If you want to create a team that can deal massive damage and stun the enemy, you may want to use characters like R0N1N or Mekkalodon.</p></li>
|
96 |
-
<li><p>Pick characters that match your skill level and playstyle. For example, if you are a beginner or casual player, you may want to use characters that are easy to control and have simple abilities, like Jet or Dan. If you are an advanced or competitive player, you may want to use characters that are more challenging and have complex abilities, like VR-0N1CA or Volcano.</p></li>
|
97 |
-
</ul></li>
|
98 |
-
<li><p>Q4: How can I get more gold and diamonds in FRAG?</p>
|
99 |
-
<p>A4: Gold and diamonds are the main currencies in FRAG. You can use them to buy new characters, skins, upgrades, chests, etc. You can get more gold and diamonds by doing the following things:</p>
|
100 |
-
<ul>
|
101 |
-
<li><p>Play the game regularly and complete missions. You can earn gold and diamonds by playing the game and completing daily, weekly, and seasonal missions. You can also get bonus rewards by logging in every day and watching ads.</p></li>
|
102 |
-
<li><p>Open chests and collect cards. You can open chests that contain gold, diamonds, cards, and other items. You can get chests by winning battles, ranking up in the leaderboards, participating in events and contests, or buying them with gold or diamonds. You can also collect cards that can be converted into gold or diamonds.</p></li>
|
103 |
-
<li><p>Join or create a club and share gifts. You can join or create a club with your friends or other players and share gifts with them. You can send and receive gold and diamonds as gifts every day.</p></li>
|
104 |
-
<li><p>Buy gold and diamonds with real money. You can also buy gold and diamonds with real money if you want to support the game and get more resources faster. You can do this by tapping on the shop icon on the main menu and choosing the amount of gold or diamonds you want to buy.</p></li>
|
105 |
-
</ul></li>
|
106 |
-
<li><p>Q5: How can I contact the developers of FRAG?</p>
|
107 |
-
<p>A5: If you have any questions, feedback, suggestions, bug reports, or issues with FRAG, you can contact the developers of FRAG by doing the following things:</p>
|
108 |
-
<ul>
|
109 |
-
<li><p>Use the in-game support system. You can use the in-game support system to send a message to the developers of FRAG. You can do this by tapping on the settings icon on the main menu and choosing the support option. You can then write your message and attach screenshots if needed.</p></li>
|
110 |
-
<li><p>Email the developers of FRAG. You can also email the developers of FRAG directly at [2](mailto:[email protected]). You can write your message in English or French and attach screenshots if needed.</p></li>
|
111 |
-
<li><p>Post on the FRAG Discord server or social media platforms. You can also post your message on the FRAG Discord server or social media platforms like Facebook, Twitter, Instagram, etc., where the developers of FRAG may see it and respond to it. However, this is not a guaranteed way to contact them, so you may want to use the other methods first.</p></li>
|
112 |
-
</ul></li>
|
113 |
-
</ul></p> 401be4b1e0<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/components/ui/icons.tsx
DELETED
@@ -1,504 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
|
5 |
-
import { cn } from '@/lib/utils'
|
6 |
-
|
7 |
-
function IconNextChat({
|
8 |
-
className,
|
9 |
-
inverted,
|
10 |
-
...props
|
11 |
-
}: React.ComponentProps<'svg'> & { inverted?: boolean }) {
|
12 |
-
const id = React.useId()
|
13 |
-
|
14 |
-
return (
|
15 |
-
<svg
|
16 |
-
viewBox="0 0 17 17"
|
17 |
-
fill="none"
|
18 |
-
xmlns="http://www.w3.org/2000/svg"
|
19 |
-
className={cn('h-4 w-4', className)}
|
20 |
-
{...props}
|
21 |
-
>
|
22 |
-
<defs>
|
23 |
-
<linearGradient
|
24 |
-
id={`gradient-${id}-1`}
|
25 |
-
x1="10.6889"
|
26 |
-
y1="10.3556"
|
27 |
-
x2="13.8445"
|
28 |
-
y2="14.2667"
|
29 |
-
gradientUnits="userSpaceOnUse"
|
30 |
-
>
|
31 |
-
<stop stopColor={inverted ? 'white' : 'black'} />
|
32 |
-
<stop
|
33 |
-
offset={1}
|
34 |
-
stopColor={inverted ? 'white' : 'black'}
|
35 |
-
stopOpacity={0}
|
36 |
-
/>
|
37 |
-
</linearGradient>
|
38 |
-
<linearGradient
|
39 |
-
id={`gradient-${id}-2`}
|
40 |
-
x1="11.7555"
|
41 |
-
y1="4.8"
|
42 |
-
x2="11.7376"
|
43 |
-
y2="9.50002"
|
44 |
-
gradientUnits="userSpaceOnUse"
|
45 |
-
>
|
46 |
-
<stop stopColor={inverted ? 'white' : 'black'} />
|
47 |
-
<stop
|
48 |
-
offset={1}
|
49 |
-
stopColor={inverted ? 'white' : 'black'}
|
50 |
-
stopOpacity={0}
|
51 |
-
/>
|
52 |
-
</linearGradient>
|
53 |
-
</defs>
|
54 |
-
<path
|
55 |
-
d="M1 16L2.58314 11.2506C1.83084 9.74642 1.63835 8.02363 2.04013 6.39052C2.4419 4.75741 3.41171 3.32057 4.776 2.33712C6.1403 1.35367 7.81003 0.887808 9.4864 1.02289C11.1628 1.15798 12.7364 1.8852 13.9256 3.07442C15.1148 4.26363 15.842 5.83723 15.9771 7.5136C16.1122 9.18997 15.6463 10.8597 14.6629 12.224C13.6794 13.5883 12.2426 14.5581 10.6095 14.9599C8.97637 15.3616 7.25358 15.1692 5.74942 14.4169L1 16Z"
|
56 |
-
fill={inverted ? 'black' : 'white'}
|
57 |
-
stroke={inverted ? 'black' : 'white'}
|
58 |
-
strokeWidth={2}
|
59 |
-
strokeLinecap="round"
|
60 |
-
strokeLinejoin="round"
|
61 |
-
/>
|
62 |
-
<mask
|
63 |
-
id="mask0_91_2047"
|
64 |
-
style={{ maskType: 'alpha' }}
|
65 |
-
maskUnits="userSpaceOnUse"
|
66 |
-
x={1}
|
67 |
-
y={0}
|
68 |
-
width={16}
|
69 |
-
height={16}
|
70 |
-
>
|
71 |
-
<circle cx={9} cy={8} r={8} fill={inverted ? 'black' : 'white'} />
|
72 |
-
</mask>
|
73 |
-
<g mask="url(#mask0_91_2047)">
|
74 |
-
<circle cx={9} cy={8} r={8} fill={inverted ? 'black' : 'white'} />
|
75 |
-
<path
|
76 |
-
d="M14.2896 14.0018L7.146 4.8H5.80005V11.1973H6.87681V6.16743L13.4444 14.6529C13.7407 14.4545 14.0231 14.2369 14.2896 14.0018Z"
|
77 |
-
fill={`url(#gradient-${id}-1)`}
|
78 |
-
/>
|
79 |
-
<rect
|
80 |
-
x="11.2222"
|
81 |
-
y="4.8"
|
82 |
-
width="1.06667"
|
83 |
-
height="6.4"
|
84 |
-
fill={`url(#gradient-${id}-2)`}
|
85 |
-
/>
|
86 |
-
</g>
|
87 |
-
</svg>
|
88 |
-
)
|
89 |
-
}
|
90 |
-
|
91 |
-
function IconOpenAI({ className, ...props }: React.ComponentProps<'svg'>) {
|
92 |
-
return (
|
93 |
-
<svg
|
94 |
-
fill="currentColor"
|
95 |
-
viewBox="0 0 24 24"
|
96 |
-
role="img"
|
97 |
-
xmlns="http://www.w3.org/2000/svg"
|
98 |
-
className={cn('h-4 w-4', className)}
|
99 |
-
{...props}
|
100 |
-
>
|
101 |
-
<title>OpenAI icon</title>
|
102 |
-
<path d="M22.2819 9.8211a5.9847 5.9847 0 0 0-.5157-4.9108 6.0462 6.0462 0 0 0-6.5098-2.9A6.0651 6.0651 0 0 0 4.9807 4.1818a5.9847 5.9847 0 0 0-3.9977 2.9 6.0462 6.0462 0 0 0 .7427 7.0966 5.98 5.98 0 0 0 .511 4.9107 6.051 6.051 0 0 0 6.5146 2.9001A5.9847 5.9847 0 0 0 13.2599 24a6.0557 6.0557 0 0 0 5.7718-4.2058 5.9894 5.9894 0 0 0 3.9977-2.9001 6.0557 6.0557 0 0 0-.7475-7.0729zm-9.022 12.6081a4.4755 4.4755 0 0 1-2.8764-1.0408l.1419-.0804 4.7783-2.7582a.7948.7948 0 0 0 .3927-.6813v-6.7369l2.02 1.1686a.071.071 0 0 1 .038.052v5.5826a4.504 4.504 0 0 1-4.4945 4.4944zm-9.6607-4.1254a4.4708 4.4708 0 0 1-.5346-3.0137l.142.0852 4.783 2.7582a.7712.7712 0 0 0 .7806 0l5.8428-3.3685v2.3324a.0804.0804 0 0 1-.0332.0615L9.74 19.9502a4.4992 4.4992 0 0 1-6.1408-1.6464zM2.3408 7.8956a4.485 4.485 0 0 1 2.3655-1.9728V11.6a.7664.7664 0 0 0 .3879.6765l5.8144 3.3543-2.0201 1.1685a.0757.0757 0 0 1-.071 0l-4.8303-2.7865A4.504 4.504 0 0 1 2.3408 7.872zm16.5963 3.8558L13.1038 8.364 15.1192 7.2a.0757.0757 0 0 1 .071 0l4.8303 2.7913a4.4944 4.4944 0 0 1-.6765 8.1042v-5.6772a.79.79 0 0 0-.407-.667zm2.0107-3.0231l-.142-.0852-4.7735-2.7818a.7759.7759 0 0 0-.7854 0L9.409 9.2297V6.8974a.0662.0662 0 0 1 .0284-.0615l4.8303-2.7866a4.4992 4.4992 0 0 1 6.6802 4.66zM8.3065 12.863l-2.02-1.1638a.0804.0804 0 0 1-.038-.0567V6.0742a4.4992 4.4992 0 0 1 7.3757-3.4537l-.142.0805L8.704 5.459a.7948.7948 0 0 0-.3927.6813zm1.0976-2.3654l2.602-1.4998 2.6069 1.4998v2.9994l-2.5974 1.4997-2.6067-1.4997Z" />
|
103 |
-
</svg>
|
104 |
-
)
|
105 |
-
}
|
106 |
-
|
107 |
-
function IconGitHub({ className, ...props }: React.ComponentProps<'svg'>) {
|
108 |
-
return (
|
109 |
-
<svg
|
110 |
-
role="img"
|
111 |
-
viewBox="0 0 24 24"
|
112 |
-
xmlns="http://www.w3.org/2000/svg"
|
113 |
-
fill="currentColor"
|
114 |
-
className={cn('h-4 w-4', className)}
|
115 |
-
{...props}
|
116 |
-
>
|
117 |
-
<title>GitHub</title>
|
118 |
-
<path d="M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12" />
|
119 |
-
</svg>
|
120 |
-
)
|
121 |
-
}
|
122 |
-
|
123 |
-
function IconSeparator({ className, ...props }: React.ComponentProps<'svg'>) {
|
124 |
-
return (
|
125 |
-
<svg
|
126 |
-
fill="none"
|
127 |
-
shapeRendering="geometricPrecision"
|
128 |
-
stroke="currentColor"
|
129 |
-
strokeLinecap="round"
|
130 |
-
strokeLinejoin="round"
|
131 |
-
strokeWidth="1"
|
132 |
-
viewBox="0 0 24 24"
|
133 |
-
aria-hidden="true"
|
134 |
-
className={cn('h-4 w-4', className)}
|
135 |
-
{...props}
|
136 |
-
>
|
137 |
-
<path d="M16.88 3.549L7.12 20.451"></path>
|
138 |
-
</svg>
|
139 |
-
)
|
140 |
-
}
|
141 |
-
|
142 |
-
function IconArrowDown({ className, ...props }: React.ComponentProps<'svg'>) {
|
143 |
-
return (
|
144 |
-
<svg
|
145 |
-
xmlns="http://www.w3.org/2000/svg"
|
146 |
-
viewBox="0 0 256 256"
|
147 |
-
fill="currentColor"
|
148 |
-
className={cn('h-4 w-4', className)}
|
149 |
-
{...props}
|
150 |
-
>
|
151 |
-
<path d="m205.66 149.66-72 72a8 8 0 0 1-11.32 0l-72-72a8 8 0 0 1 11.32-11.32L120 196.69V40a8 8 0 0 1 16 0v156.69l58.34-58.35a8 8 0 0 1 11.32 11.32Z" />
|
152 |
-
</svg>
|
153 |
-
)
|
154 |
-
}
|
155 |
-
|
156 |
-
function IconArrowRight({ className, ...props }: React.ComponentProps<'svg'>) {
|
157 |
-
return (
|
158 |
-
<svg
|
159 |
-
xmlns="http://www.w3.org/2000/svg"
|
160 |
-
viewBox="0 0 256 256"
|
161 |
-
fill="currentColor"
|
162 |
-
className={cn('h-4 w-4', className)}
|
163 |
-
{...props}
|
164 |
-
>
|
165 |
-
<path d="m221.66 133.66-72 72a8 8 0 0 1-11.32-11.32L196.69 136H40a8 8 0 0 1 0-16h156.69l-58.35-58.34a8 8 0 0 1 11.32-11.32l72 72a8 8 0 0 1 0 11.32Z" />
|
166 |
-
</svg>
|
167 |
-
)
|
168 |
-
}
|
169 |
-
|
170 |
-
function IconUser({ className, ...props }: React.ComponentProps<'svg'>) {
|
171 |
-
return (
|
172 |
-
<svg
|
173 |
-
xmlns="http://www.w3.org/2000/svg"
|
174 |
-
viewBox="0 0 256 256"
|
175 |
-
fill="currentColor"
|
176 |
-
className={cn('h-4 w-4', className)}
|
177 |
-
{...props}
|
178 |
-
>
|
179 |
-
<path d="M230.92 212c-15.23-26.33-38.7-45.21-66.09-54.16a72 72 0 1 0-73.66 0c-27.39 8.94-50.86 27.82-66.09 54.16a8 8 0 1 0 13.85 8c18.84-32.56 52.14-52 89.07-52s70.23 19.44 89.07 52a8 8 0 1 0 13.85-8ZM72 96a56 56 0 1 1 56 56 56.06 56.06 0 0 1-56-56Z" />
|
180 |
-
</svg>
|
181 |
-
)
|
182 |
-
}
|
183 |
-
|
184 |
-
function IconPlus({ className, ...props }: React.ComponentProps<'svg'>) {
|
185 |
-
return (
|
186 |
-
<svg
|
187 |
-
xmlns="http://www.w3.org/2000/svg"
|
188 |
-
viewBox="0 0 256 256"
|
189 |
-
fill="currentColor"
|
190 |
-
className={cn('h-4 w-4', className)}
|
191 |
-
{...props}
|
192 |
-
>
|
193 |
-
<path d="M224 128a8 8 0 0 1-8 8h-80v80a8 8 0 0 1-16 0v-80H40a8 8 0 0 1 0-16h80V40a8 8 0 0 1 16 0v80h80a8 8 0 0 1 8 8Z" />
|
194 |
-
</svg>
|
195 |
-
)
|
196 |
-
}
|
197 |
-
|
198 |
-
function IconArrowElbow({ className, ...props }: React.ComponentProps<'svg'>) {
|
199 |
-
return (
|
200 |
-
<svg
|
201 |
-
xmlns="http://www.w3.org/2000/svg"
|
202 |
-
viewBox="0 0 256 256"
|
203 |
-
fill="currentColor"
|
204 |
-
className={cn('h-4 w-4', className)}
|
205 |
-
{...props}
|
206 |
-
>
|
207 |
-
<path d="M200 32v144a8 8 0 0 1-8 8H67.31l34.35 34.34a8 8 0 0 1-11.32 11.32l-48-48a8 8 0 0 1 0-11.32l48-48a8 8 0 0 1 11.32 11.32L67.31 168H184V32a8 8 0 0 1 16 0Z" />
|
208 |
-
</svg>
|
209 |
-
)
|
210 |
-
}
|
211 |
-
|
212 |
-
function IconSpinner({ className, ...props }: React.ComponentProps<'svg'>) {
|
213 |
-
return (
|
214 |
-
<svg
|
215 |
-
xmlns="http://www.w3.org/2000/svg"
|
216 |
-
viewBox="0 0 256 256"
|
217 |
-
fill="currentColor"
|
218 |
-
className={cn('h-4 w-4 animate-spin', className)}
|
219 |
-
{...props}
|
220 |
-
>
|
221 |
-
<path d="M232 128a104 104 0 0 1-208 0c0-41 23.81-78.36 60.66-95.27a8 8 0 0 1 6.68 14.54C60.15 61.59 40 93.27 40 128a88 88 0 0 0 176 0c0-34.73-20.15-66.41-51.34-80.73a8 8 0 0 1 6.68-14.54C208.19 49.64 232 87 232 128Z" />
|
222 |
-
</svg>
|
223 |
-
)
|
224 |
-
}
|
225 |
-
|
226 |
-
function IconMessage({ className, ...props }: React.ComponentProps<'svg'>) {
|
227 |
-
return (
|
228 |
-
<svg
|
229 |
-
xmlns="http://www.w3.org/2000/svg"
|
230 |
-
viewBox="0 0 256 256"
|
231 |
-
fill="currentColor"
|
232 |
-
className={cn('h-4 w-4', className)}
|
233 |
-
{...props}
|
234 |
-
>
|
235 |
-
<path d="M216 48H40a16 16 0 0 0-16 16v160a15.84 15.84 0 0 0 9.25 14.5A16.05 16.05 0 0 0 40 240a15.89 15.89 0 0 0 10.25-3.78.69.69 0 0 0 .13-.11L82.5 208H216a16 16 0 0 0 16-16V64a16 16 0 0 0-16-16ZM40 224Zm176-32H82.5a16 16 0 0 0-10.3 3.75l-.12.11L40 224V64h176Z" />
|
236 |
-
</svg>
|
237 |
-
)
|
238 |
-
}
|
239 |
-
|
240 |
-
function IconTrash({ className, ...props }: React.ComponentProps<'svg'>) {
|
241 |
-
return (
|
242 |
-
<svg
|
243 |
-
xmlns="http://www.w3.org/2000/svg"
|
244 |
-
viewBox="0 0 256 256"
|
245 |
-
fill="currentColor"
|
246 |
-
className={cn('h-4 w-4', className)}
|
247 |
-
{...props}
|
248 |
-
>
|
249 |
-
<path d="M216 48h-40v-8a24 24 0 0 0-24-24h-48a24 24 0 0 0-24 24v8H40a8 8 0 0 0 0 16h8v144a16 16 0 0 0 16 16h128a16 16 0 0 0 16-16V64h8a8 8 0 0 0 0-16ZM96 40a8 8 0 0 1 8-8h48a8 8 0 0 1 8 8v8H96Zm96 168H64V64h128Zm-80-104v64a8 8 0 0 1-16 0v-64a8 8 0 0 1 16 0Zm48 0v64a8 8 0 0 1-16 0v-64a8 8 0 0 1 16 0Z" />
|
250 |
-
</svg>
|
251 |
-
)
|
252 |
-
}
|
253 |
-
|
254 |
-
function IconMore({ className, ...props }: React.ComponentProps<'svg'>) {
|
255 |
-
return (
|
256 |
-
<svg
|
257 |
-
viewBox="0 0 24 24"
|
258 |
-
xmlns="http://www.w3.org/2000/svg"
|
259 |
-
fill="currentColor"
|
260 |
-
className={cn('h-4 w-4', className)}
|
261 |
-
{...props}
|
262 |
-
>
|
263 |
-
<path d="M7.75 12C7.75 12.9665 6.9665 13.75 6 13.75C5.0335 13.75 4.25 12.9665 4.25 12C4.25 11.0335 5.0335 10.25 6 10.25C6.9665 10.25 7.75 11.0335 7.75 12ZM13.75 12C13.75 12.9665 12.9665 13.75 12 13.75C11.0335 13.75 10.25 12.9665 10.25 12C10.25 11.0335 11.0335 10.25 12 10.25C12.9665 10.25 13.75 11.0335 13.75 12ZM18 13.75C18.9665 13.75 19.75 12.9665 19.75 12C19.75 11.0335 18.9665 10.25 18 10.25C17.0335 10.25 16.25 11.0335 16.25 12C16.25 12.9665 17.0335 13.75 18 13.75Z"></path>
|
264 |
-
</svg>
|
265 |
-
)
|
266 |
-
}
|
267 |
-
|
268 |
-
function IconRefresh({ className, ...props }: React.ComponentProps<'svg'>) {
|
269 |
-
return (
|
270 |
-
<svg
|
271 |
-
xmlns="http://www.w3.org/2000/svg"
|
272 |
-
viewBox="0 0 256 256"
|
273 |
-
fill="currentColor"
|
274 |
-
className={cn('h-4 w-4', className)}
|
275 |
-
{...props}
|
276 |
-
>
|
277 |
-
<path d="M197.67 186.37a8 8 0 0 1 0 11.29C196.58 198.73 170.82 224 128 224c-37.39 0-64.53-22.4-80-39.85V208a8 8 0 0 1-16 0v-48a8 8 0 0 1 8-8h48a8 8 0 0 1 0 16H55.44C67.76 183.35 93 208 128 208c36 0 58.14-21.46 58.36-21.68a8 8 0 0 1 11.31.05ZM216 40a8 8 0 0 0-8 8v23.85C192.53 54.4 165.39 32 128 32c-42.82 0-68.58 25.27-69.66 26.34a8 8 0 0 0 11.3 11.34C69.86 69.46 92 48 128 48c35 0 60.24 24.65 72.56 40H168a8 8 0 0 0 0 16h48a8 8 0 0 0 8-8V48a8 8 0 0 0-8-8Z" />
|
278 |
-
</svg>
|
279 |
-
)
|
280 |
-
}
|
281 |
-
|
282 |
-
function IconStop({ className, ...props }: React.ComponentProps<'svg'>) {
|
283 |
-
return (
|
284 |
-
<svg
|
285 |
-
xmlns="http://www.w3.org/2000/svg"
|
286 |
-
viewBox="0 0 256 256"
|
287 |
-
fill="currentColor"
|
288 |
-
className={cn('h-4 w-4', className)}
|
289 |
-
{...props}
|
290 |
-
>
|
291 |
-
<path d="M128 24a104 104 0 1 0 104 104A104.11 104.11 0 0 0 128 24Zm0 192a88 88 0 1 1 88-88 88.1 88.1 0 0 1-88 88Zm24-120h-48a8 8 0 0 0-8 8v48a8 8 0 0 0 8 8h48a8 8 0 0 0 8-8v-48a8 8 0 0 0-8-8Zm-8 48h-32v-32h32Z" />
|
292 |
-
</svg>
|
293 |
-
)
|
294 |
-
}
|
295 |
-
|
296 |
-
function IconSidebar({ className, ...props }: React.ComponentProps<'svg'>) {
|
297 |
-
return (
|
298 |
-
<svg
|
299 |
-
xmlns="http://www.w3.org/2000/svg"
|
300 |
-
viewBox="0 0 256 256"
|
301 |
-
fill="currentColor"
|
302 |
-
className={cn('h-4 w-4', className)}
|
303 |
-
{...props}
|
304 |
-
>
|
305 |
-
<path d="M216 40H40a16 16 0 0 0-16 16v144a16 16 0 0 0 16 16h176a16 16 0 0 0 16-16V56a16 16 0 0 0-16-16ZM40 56h40v144H40Zm176 144H96V56h120v144Z" />
|
306 |
-
</svg>
|
307 |
-
)
|
308 |
-
}
|
309 |
-
|
310 |
-
function IconMoon({ className, ...props }: React.ComponentProps<'svg'>) {
|
311 |
-
return (
|
312 |
-
<svg
|
313 |
-
xmlns="http://www.w3.org/2000/svg"
|
314 |
-
viewBox="0 0 256 256"
|
315 |
-
fill="currentColor"
|
316 |
-
className={cn('h-4 w-4', className)}
|
317 |
-
{...props}
|
318 |
-
>
|
319 |
-
<path d="M233.54 142.23a8 8 0 0 0-8-2 88.08 88.08 0 0 1-109.8-109.8 8 8 0 0 0-10-10 104.84 104.84 0 0 0-52.91 37A104 104 0 0 0 136 224a103.09 103.09 0 0 0 62.52-20.88 104.84 104.84 0 0 0 37-52.91 8 8 0 0 0-1.98-7.98Zm-44.64 48.11A88 88 0 0 1 65.66 67.11a89 89 0 0 1 31.4-26A106 106 0 0 0 96 56a104.11 104.11 0 0 0 104 104 106 106 0 0 0 14.92-1.06 89 89 0 0 1-26.02 31.4Z" />
|
320 |
-
</svg>
|
321 |
-
)
|
322 |
-
}
|
323 |
-
|
324 |
-
function IconSun({ className, ...props }: React.ComponentProps<'svg'>) {
|
325 |
-
return (
|
326 |
-
<svg
|
327 |
-
xmlns="http://www.w3.org/2000/svg"
|
328 |
-
viewBox="0 0 256 256"
|
329 |
-
fill="currentColor"
|
330 |
-
className={cn('h-4 w-4', className)}
|
331 |
-
{...props}
|
332 |
-
>
|
333 |
-
<path d="M120 40V16a8 8 0 0 1 16 0v24a8 8 0 0 1-16 0Zm72 88a64 64 0 1 1-64-64 64.07 64.07 0 0 1 64 64Zm-16 0a48 48 0 1 0-48 48 48.05 48.05 0 0 0 48-48ZM58.34 69.66a8 8 0 0 0 11.32-11.32l-16-16a8 8 0 0 0-11.32 11.32Zm0 116.68-16 16a8 8 0 0 0 11.32 11.32l16-16a8 8 0 0 0-11.32-11.32ZM192 72a8 8 0 0 0 5.66-2.34l16-16a8 8 0 0 0-11.32-11.32l-16 16A8 8 0 0 0 192 72Zm5.66 114.34a8 8 0 0 0-11.32 11.32l16 16a8 8 0 0 0 11.32-11.32ZM48 128a8 8 0 0 0-8-8H16a8 8 0 0 0 0 16h24a8 8 0 0 0 8-8Zm80 80a8 8 0 0 0-8 8v24a8 8 0 0 0 16 0v-24a8 8 0 0 0-8-8Zm112-88h-24a8 8 0 0 0 0 16h24a8 8 0 0 0 0-16Z" />
|
334 |
-
</svg>
|
335 |
-
)
|
336 |
-
}
|
337 |
-
|
338 |
-
function IconCopy({ className, ...props }: React.ComponentProps<'svg'>) {
|
339 |
-
return (
|
340 |
-
<svg
|
341 |
-
xmlns="http://www.w3.org/2000/svg"
|
342 |
-
viewBox="0 0 256 256"
|
343 |
-
fill="currentColor"
|
344 |
-
className={cn('h-4 w-4', className)}
|
345 |
-
{...props}
|
346 |
-
>
|
347 |
-
<path d="M216 32H88a8 8 0 0 0-8 8v40H40a8 8 0 0 0-8 8v128a8 8 0 0 0 8 8h128a8 8 0 0 0 8-8v-40h40a8 8 0 0 0 8-8V40a8 8 0 0 0-8-8Zm-56 176H48V96h112Zm48-48h-32V88a8 8 0 0 0-8-8H96V48h112Z" />
|
348 |
-
</svg>
|
349 |
-
)
|
350 |
-
}
|
351 |
-
|
352 |
-
function IconCheck({ className, ...props }: React.ComponentProps<'svg'>) {
|
353 |
-
return (
|
354 |
-
<svg
|
355 |
-
xmlns="http://www.w3.org/2000/svg"
|
356 |
-
viewBox="0 0 256 256"
|
357 |
-
fill="currentColor"
|
358 |
-
className={cn('h-4 w-4', className)}
|
359 |
-
{...props}
|
360 |
-
>
|
361 |
-
<path d="m229.66 77.66-128 128a8 8 0 0 1-11.32 0l-56-56a8 8 0 0 1 11.32-11.32L96 188.69 218.34 66.34a8 8 0 0 1 11.32 11.32Z" />
|
362 |
-
</svg>
|
363 |
-
)
|
364 |
-
}
|
365 |
-
|
366 |
-
function IconDownload({ className, ...props }: React.ComponentProps<'svg'>) {
|
367 |
-
return (
|
368 |
-
<svg
|
369 |
-
xmlns="http://www.w3.org/2000/svg"
|
370 |
-
viewBox="0 0 256 256"
|
371 |
-
fill="currentColor"
|
372 |
-
className={cn('h-4 w-4', className)}
|
373 |
-
{...props}
|
374 |
-
>
|
375 |
-
<path d="M224 152v56a16 16 0 0 1-16 16H48a16 16 0 0 1-16-16v-56a8 8 0 0 1 16 0v56h160v-56a8 8 0 0 1 16 0Zm-101.66 5.66a8 8 0 0 0 11.32 0l40-40a8 8 0 0 0-11.32-11.32L136 132.69V40a8 8 0 0 0-16 0v92.69l-26.34-26.35a8 8 0 0 0-11.32 11.32Z" />
|
376 |
-
</svg>
|
377 |
-
)
|
378 |
-
}
|
379 |
-
|
380 |
-
function IconClose({ className, ...props }: React.ComponentProps<'svg'>) {
|
381 |
-
return (
|
382 |
-
<svg
|
383 |
-
xmlns="http://www.w3.org/2000/svg"
|
384 |
-
viewBox="0 0 256 256"
|
385 |
-
fill="currentColor"
|
386 |
-
className={cn('h-4 w-4', className)}
|
387 |
-
{...props}
|
388 |
-
>
|
389 |
-
<path d="M205.66 194.34a8 8 0 0 1-11.32 11.32L128 139.31l-66.34 66.35a8 8 0 0 1-11.32-11.32L116.69 128 50.34 61.66a8 8 0 0 1 11.32-11.32L128 116.69l66.34-66.35a8 8 0 0 1 11.32 11.32L139.31 128Z" />
|
390 |
-
</svg>
|
391 |
-
)
|
392 |
-
}
|
393 |
-
|
394 |
-
function IconEdit({ className, ...props }: React.ComponentProps<'svg'>) {
|
395 |
-
return (
|
396 |
-
<svg
|
397 |
-
xmlns="http://www.w3.org/2000/svg"
|
398 |
-
fill="none"
|
399 |
-
viewBox="0 0 24 24"
|
400 |
-
strokeWidth={1.5}
|
401 |
-
stroke="currentColor"
|
402 |
-
className={cn('h-4 w-4', className)}
|
403 |
-
{...props}
|
404 |
-
>
|
405 |
-
<path
|
406 |
-
strokeLinecap="round"
|
407 |
-
strokeLinejoin="round"
|
408 |
-
d="M16.862 4.487l1.687-1.688a1.875 1.875 0 112.652 2.652L10.582 16.07a4.5 4.5 0 01-1.897 1.13L6 18l.8-2.685a4.5 4.5 0 011.13-1.897l8.932-8.931zm0 0L19.5 7.125M18 14v4.75A2.25 2.25 0 0115.75 21H5.25A2.25 2.25 0 013 18.75V8.25A2.25 2.25 0 015.25 6H10"
|
409 |
-
/>
|
410 |
-
</svg>
|
411 |
-
)
|
412 |
-
}
|
413 |
-
|
414 |
-
function IconShare({ className, ...props }: React.ComponentProps<'svg'>) {
|
415 |
-
return (
|
416 |
-
<svg
|
417 |
-
xmlns="http://www.w3.org/2000/svg"
|
418 |
-
fill="currentColor"
|
419 |
-
className={cn('h-4 w-4', className)}
|
420 |
-
viewBox="0 0 256 256"
|
421 |
-
{...props}
|
422 |
-
>
|
423 |
-
<path d="m237.66 106.35-80-80A8 8 0 0 0 144 32v40.35c-25.94 2.22-54.59 14.92-78.16 34.91-28.38 24.08-46.05 55.11-49.76 87.37a12 12 0 0 0 20.68 9.58c11-11.71 50.14-48.74 107.24-52V192a8 8 0 0 0 13.66 5.65l80-80a8 8 0 0 0 0-11.3ZM160 172.69V144a8 8 0 0 0-8-8c-28.08 0-55.43 7.33-81.29 21.8a196.17 196.17 0 0 0-36.57 26.52c5.8-23.84 20.42-46.51 42.05-64.86C99.41 99.77 127.75 88 152 88a8 8 0 0 0 8-8V51.32L220.69 112Z" />
|
424 |
-
</svg>
|
425 |
-
)
|
426 |
-
}
|
427 |
-
|
428 |
-
function IconUsers({ className, ...props }: React.ComponentProps<'svg'>) {
|
429 |
-
return (
|
430 |
-
<svg
|
431 |
-
xmlns="http://www.w3.org/2000/svg"
|
432 |
-
fill="currentColor"
|
433 |
-
className={cn('h-4 w-4', className)}
|
434 |
-
viewBox="0 0 256 256"
|
435 |
-
{...props}
|
436 |
-
>
|
437 |
-
<path d="M117.25 157.92a60 60 0 1 0-66.5 0 95.83 95.83 0 0 0-47.22 37.71 8 8 0 1 0 13.4 8.74 80 80 0 0 1 134.14 0 8 8 0 0 0 13.4-8.74 95.83 95.83 0 0 0-47.22-37.71ZM40 108a44 44 0 1 1 44 44 44.05 44.05 0 0 1-44-44Zm210.14 98.7a8 8 0 0 1-11.07-2.33A79.83 79.83 0 0 0 172 168a8 8 0 0 1 0-16 44 44 0 1 0-16.34-84.87 8 8 0 1 1-5.94-14.85 60 60 0 0 1 55.53 105.64 95.83 95.83 0 0 1 47.22 37.71 8 8 0 0 1-2.33 11.07Z" />
|
438 |
-
</svg>
|
439 |
-
)
|
440 |
-
}
|
441 |
-
|
442 |
-
function IconExternalLink({
|
443 |
-
className,
|
444 |
-
...props
|
445 |
-
}: React.ComponentProps<'svg'>) {
|
446 |
-
return (
|
447 |
-
<svg
|
448 |
-
xmlns="http://www.w3.org/2000/svg"
|
449 |
-
fill="currentColor"
|
450 |
-
className={cn('h-4 w-4', className)}
|
451 |
-
viewBox="0 0 256 256"
|
452 |
-
{...props}
|
453 |
-
>
|
454 |
-
<path d="M224 104a8 8 0 0 1-16 0V59.32l-66.33 66.34a8 8 0 0 1-11.32-11.32L196.68 48H152a8 8 0 0 1 0-16h64a8 8 0 0 1 8 8Zm-40 24a8 8 0 0 0-8 8v72H48V80h72a8 8 0 0 0 0-16H48a16 16 0 0 0-16 16v128a16 16 0 0 0 16 16h128a16 16 0 0 0 16-16v-72a8 8 0 0 0-8-8Z" />
|
455 |
-
</svg>
|
456 |
-
)
|
457 |
-
}
|
458 |
-
|
459 |
-
function IconChevronUpDown({
|
460 |
-
className,
|
461 |
-
...props
|
462 |
-
}: React.ComponentProps<'svg'>) {
|
463 |
-
return (
|
464 |
-
<svg
|
465 |
-
xmlns="http://www.w3.org/2000/svg"
|
466 |
-
fill="currentColor"
|
467 |
-
className={cn('h-4 w-4', className)}
|
468 |
-
viewBox="0 0 256 256"
|
469 |
-
{...props}
|
470 |
-
>
|
471 |
-
<path d="M181.66 170.34a8 8 0 0 1 0 11.32l-48 48a8 8 0 0 1-11.32 0l-48-48a8 8 0 0 1 11.32-11.32L128 212.69l42.34-42.35a8 8 0 0 1 11.32 0Zm-96-84.68L128 43.31l42.34 42.35a8 8 0 0 0 11.32-11.32l-48-48a8 8 0 0 0-11.32 0l-48 48a8 8 0 0 0 11.32 11.32Z" />
|
472 |
-
</svg>
|
473 |
-
)
|
474 |
-
}
|
475 |
-
|
476 |
-
export {
|
477 |
-
IconEdit,
|
478 |
-
IconNextChat,
|
479 |
-
IconOpenAI,
|
480 |
-
IconGitHub,
|
481 |
-
IconSeparator,
|
482 |
-
IconArrowDown,
|
483 |
-
IconArrowRight,
|
484 |
-
IconUser,
|
485 |
-
IconPlus,
|
486 |
-
IconArrowElbow,
|
487 |
-
IconSpinner,
|
488 |
-
IconMessage,
|
489 |
-
IconTrash,
|
490 |
-
IconMore,
|
491 |
-
IconRefresh,
|
492 |
-
IconStop,
|
493 |
-
IconSidebar,
|
494 |
-
IconMoon,
|
495 |
-
IconSun,
|
496 |
-
IconCopy,
|
497 |
-
IconCheck,
|
498 |
-
IconDownload,
|
499 |
-
IconClose,
|
500 |
-
IconShare,
|
501 |
-
IconUsers,
|
502 |
-
IconExternalLink,
|
503 |
-
IconChevronUpDown
|
504 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Code Reviews 2b60c26d2a2e4a348f8f14c77023c385.md
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
# Code Reviews
|
2 |
-
|
3 |
-
Last edited time: March 31, 2023 1:49 PM
|
4 |
-
Owner: Anonymous
|
5 |
-
Tags: Codebase
|
6 |
-
|
7 |
-
<aside>
|
8 |
-
💡 This template documents how to review code. Helpful for new and remote employees to get and stay aligned.
|
9 |
-
|
10 |
-
</aside>
|
11 |
-
|
12 |
-
# Philosophy
|
13 |
-
|
14 |
-
Why do you perform code reviews? What are your guiding principles for these reviews?
|
15 |
-
|
16 |
-
You may want to mention other pages here. Like Engineering Guidelines. To link to another page inline, type `@` followed by the name of the page: [Engineering Guidelines](Engineering%20Guidelines%204208cbd4733d4f6f94982f3fb24f6379.md)
|
17 |
-
|
18 |
-
# Preparing Code for Review
|
19 |
-
|
20 |
-
Preparation sets your reviewers up for success.
|
21 |
-
|
22 |
-
### Commit Messages
|
23 |
-
|
24 |
-
Make sure your commit messages are descriptive.
|
25 |
-
|
26 |
-
### Github PR Descriptions
|
27 |
-
|
28 |
-
Your PR descriptions should be an extension of your commit messages. Write about both what the commit changes, and how you implemented the change.
|
29 |
-
|
30 |
-
# Performing Code Reviews
|
31 |
-
|
32 |
-
### How to Review
|
33 |
-
|
34 |
-
- Make two passes over the PR if it's substantial.
|
35 |
-
- On the first pass, come to an understanding of the code change at a high level.
|
36 |
-
- On the second pass, pay more attention to semantic details.
|
37 |
-
|
38 |
-
# Examples
|
39 |
-
|
40 |
-
```jsx
|
41 |
-
var commentCount = 0;
|
42 |
-
```
|
43 |
-
|
44 |
-
You might suggest that this be a `let` instead of `var`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/Dockerfile
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
FROM nvidia/cuda:11.8.0-base-ubuntu22.04
|
2 |
-
|
3 |
-
ENV DEBIAN_FRONTEND=noninteractive \
|
4 |
-
PYTHONUNBUFFERED=1 \
|
5 |
-
PYTHONIOENCODING=UTF-8
|
6 |
-
RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt apt update &&\
|
7 |
-
apt install -y \
|
8 |
-
wget \
|
9 |
-
git \
|
10 |
-
pkg-config \
|
11 |
-
python3 \
|
12 |
-
python3-pip \
|
13 |
-
python-is-python3 \
|
14 |
-
ffmpeg \
|
15 |
-
libnvrtc11.2 \
|
16 |
-
libtcmalloc-minimal4
|
17 |
-
|
18 |
-
RUN useradd -m -u 1000 ac
|
19 |
-
RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip wheel
|
20 |
-
ENV TORCH_COMMAND="pip install torch==2.0.1+cu118 torchaudio --extra-index-url https://download.pytorch.org/whl/cu118"
|
21 |
-
RUN --mount=type=cache,target=/root/.cache python -m $TORCH_COMMAND
|
22 |
-
RUN ln -s /usr/lib/x86_64-linux-gnu/libnvrtc.so.11.2 /usr/lib/x86_64-linux-gnu/libnvrtc.so
|
23 |
-
USER 1000
|
24 |
-
RUN mkdir ~/.cache
|
25 |
-
RUN --mount=type=cache,target=/home/ac/.cache --mount=source=.,target=/home/ac/audiocraft python -m pip install -r /home/ac/audiocraft/requirements.txt
|
26 |
-
WORKDIR /home/ac/audiocraft
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/version.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
__version__ = '0.2.1'
|
|
|
|
spaces/AIGText/GlyphControl/ldm/modules/diffusionmodules/util.py
DELETED
@@ -1,279 +0,0 @@
|
|
1 |
-
# adopted from
|
2 |
-
# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
|
3 |
-
# and
|
4 |
-
# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
5 |
-
# and
|
6 |
-
# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
|
7 |
-
#
|
8 |
-
# thanks!
|
9 |
-
|
10 |
-
|
11 |
-
import os
|
12 |
-
import math
|
13 |
-
import torch
|
14 |
-
import torch.nn as nn
|
15 |
-
import numpy as np
|
16 |
-
from einops import repeat
|
17 |
-
|
18 |
-
from ldm.util import instantiate_from_config
|
19 |
-
|
20 |
-
|
21 |
-
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
22 |
-
if schedule == "linear":
|
23 |
-
betas = (
|
24 |
-
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
|
25 |
-
)
|
26 |
-
|
27 |
-
elif schedule == "cosine":
|
28 |
-
timesteps = (
|
29 |
-
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
|
30 |
-
)
|
31 |
-
alphas = timesteps / (1 + cosine_s) * np.pi / 2
|
32 |
-
alphas = torch.cos(alphas).pow(2)
|
33 |
-
alphas = alphas / alphas[0]
|
34 |
-
betas = 1 - alphas[1:] / alphas[:-1]
|
35 |
-
betas = np.clip(betas, a_min=0, a_max=0.999)
|
36 |
-
|
37 |
-
elif schedule == "sqrt_linear":
|
38 |
-
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
|
39 |
-
elif schedule == "sqrt":
|
40 |
-
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
|
41 |
-
else:
|
42 |
-
raise ValueError(f"schedule '{schedule}' unknown.")
|
43 |
-
return betas.numpy()
|
44 |
-
|
45 |
-
|
46 |
-
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
|
47 |
-
if ddim_discr_method == 'uniform':
|
48 |
-
c = num_ddpm_timesteps // num_ddim_timesteps
|
49 |
-
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
|
50 |
-
elif ddim_discr_method == 'quad':
|
51 |
-
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
|
52 |
-
else:
|
53 |
-
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
|
54 |
-
|
55 |
-
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
|
56 |
-
# add one to get the final alpha values right (the ones from first scale to data during sampling)
|
57 |
-
steps_out = ddim_timesteps + 1
|
58 |
-
if verbose:
|
59 |
-
print(f'Selected timesteps for ddim sampler: {steps_out}')
|
60 |
-
return steps_out
|
61 |
-
|
62 |
-
|
63 |
-
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
|
64 |
-
# select alphas for computing the variance schedule
|
65 |
-
alphas = alphacums[ddim_timesteps]
|
66 |
-
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
|
67 |
-
|
68 |
-
# according the the formula provided in https://arxiv.org/abs/2010.02502
|
69 |
-
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
|
70 |
-
if verbose:
|
71 |
-
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
|
72 |
-
print(f'For the chosen value of eta, which is {eta}, '
|
73 |
-
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
|
74 |
-
return sigmas, alphas, alphas_prev
|
75 |
-
|
76 |
-
|
77 |
-
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
|
78 |
-
"""
|
79 |
-
Create a beta schedule that discretizes the given alpha_t_bar function,
|
80 |
-
which defines the cumulative product of (1-beta) over time from t = [0,1].
|
81 |
-
:param num_diffusion_timesteps: the number of betas to produce.
|
82 |
-
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
|
83 |
-
produces the cumulative product of (1-beta) up to that
|
84 |
-
part of the diffusion process.
|
85 |
-
:param max_beta: the maximum beta to use; use values lower than 1 to
|
86 |
-
prevent singularities.
|
87 |
-
"""
|
88 |
-
betas = []
|
89 |
-
for i in range(num_diffusion_timesteps):
|
90 |
-
t1 = i / num_diffusion_timesteps
|
91 |
-
t2 = (i + 1) / num_diffusion_timesteps
|
92 |
-
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
93 |
-
return np.array(betas)
|
94 |
-
|
95 |
-
|
96 |
-
def extract_into_tensor(a, t, x_shape):
|
97 |
-
b, *_ = t.shape
|
98 |
-
out = a.gather(-1, t)
|
99 |
-
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
|
100 |
-
|
101 |
-
|
102 |
-
def checkpoint(func, inputs, params, flag):
|
103 |
-
"""
|
104 |
-
Evaluate a function without caching intermediate activations, allowing for
|
105 |
-
reduced memory at the expense of extra compute in the backward pass.
|
106 |
-
:param func: the function to evaluate.
|
107 |
-
:param inputs: the argument sequence to pass to `func`.
|
108 |
-
:param params: a sequence of parameters `func` depends on but does not
|
109 |
-
explicitly take as arguments.
|
110 |
-
:param flag: if False, disable gradient checkpointing.
|
111 |
-
"""
|
112 |
-
if flag:
|
113 |
-
args = tuple(inputs) + tuple(params)
|
114 |
-
return CheckpointFunction.apply(func, len(inputs), *args)
|
115 |
-
else:
|
116 |
-
return func(*inputs)
|
117 |
-
|
118 |
-
|
119 |
-
class CheckpointFunction(torch.autograd.Function):
|
120 |
-
@staticmethod
|
121 |
-
def forward(ctx, run_function, length, *args):
|
122 |
-
ctx.run_function = run_function
|
123 |
-
ctx.input_tensors = list(args[:length])
|
124 |
-
ctx.input_params = list(args[length:])
|
125 |
-
ctx.gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(),
|
126 |
-
"dtype": torch.get_autocast_gpu_dtype(),
|
127 |
-
"cache_enabled": torch.is_autocast_cache_enabled()}
|
128 |
-
with torch.no_grad():
|
129 |
-
output_tensors = ctx.run_function(*ctx.input_tensors)
|
130 |
-
return output_tensors
|
131 |
-
|
132 |
-
@staticmethod
|
133 |
-
def backward(ctx, *output_grads):
|
134 |
-
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
|
135 |
-
with torch.enable_grad(), \
|
136 |
-
torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs):
|
137 |
-
# Fixes a bug where the first op in run_function modifies the
|
138 |
-
# Tensor storage in place, which is not allowed for detach()'d
|
139 |
-
# Tensors.
|
140 |
-
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
|
141 |
-
output_tensors = ctx.run_function(*shallow_copies)
|
142 |
-
input_grads = torch.autograd.grad(
|
143 |
-
output_tensors,
|
144 |
-
ctx.input_tensors + ctx.input_params,
|
145 |
-
output_grads,
|
146 |
-
allow_unused=True,
|
147 |
-
)
|
148 |
-
del ctx.input_tensors
|
149 |
-
del ctx.input_params
|
150 |
-
del output_tensors
|
151 |
-
return (None, None) + input_grads
|
152 |
-
|
153 |
-
|
154 |
-
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
|
155 |
-
"""
|
156 |
-
Create sinusoidal timestep embeddings.
|
157 |
-
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
158 |
-
These may be fractional.
|
159 |
-
:param dim: the dimension of the output.
|
160 |
-
:param max_period: controls the minimum frequency of the embeddings.
|
161 |
-
:return: an [N x dim] Tensor of positional embeddings.
|
162 |
-
"""
|
163 |
-
if not repeat_only:
|
164 |
-
half = dim // 2
|
165 |
-
freqs = torch.exp(
|
166 |
-
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
|
167 |
-
).to(device=timesteps.device)
|
168 |
-
args = timesteps[:, None].float() * freqs[None]
|
169 |
-
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
170 |
-
if dim % 2:
|
171 |
-
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
172 |
-
else:
|
173 |
-
embedding = repeat(timesteps, 'b -> b d', d=dim)
|
174 |
-
return embedding
|
175 |
-
|
176 |
-
|
177 |
-
def zero_module(module):
|
178 |
-
"""
|
179 |
-
Zero out the parameters of a module and return it.
|
180 |
-
"""
|
181 |
-
for p in module.parameters():
|
182 |
-
p.detach().zero_()
|
183 |
-
return module
|
184 |
-
|
185 |
-
def identity_init_fc(module):
|
186 |
-
"""
|
187 |
-
initial weights of a fc module as 1 and bias as 0.
|
188 |
-
"""
|
189 |
-
nn.init.eye_(module.weight)
|
190 |
-
nn.init.constant(module.bias, 0)
|
191 |
-
# for p in module.parameters():
|
192 |
-
# nn.init.ones_(p)
|
193 |
-
return module
|
194 |
-
|
195 |
-
def scale_module(module, scale):
|
196 |
-
"""
|
197 |
-
Scale the parameters of a module and return it.
|
198 |
-
"""
|
199 |
-
for p in module.parameters():
|
200 |
-
p.detach().mul_(scale)
|
201 |
-
return module
|
202 |
-
|
203 |
-
|
204 |
-
def mean_flat(tensor):
|
205 |
-
"""
|
206 |
-
Take the mean over all non-batch dimensions.
|
207 |
-
"""
|
208 |
-
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
209 |
-
|
210 |
-
|
211 |
-
def normalization(channels):
|
212 |
-
"""
|
213 |
-
Make a standard normalization layer.
|
214 |
-
:param channels: number of input channels.
|
215 |
-
:return: an nn.Module for normalization.
|
216 |
-
"""
|
217 |
-
return GroupNorm32(32, channels)
|
218 |
-
|
219 |
-
|
220 |
-
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
|
221 |
-
class SiLU(nn.Module):
|
222 |
-
def forward(self, x):
|
223 |
-
return x * torch.sigmoid(x)
|
224 |
-
|
225 |
-
|
226 |
-
class GroupNorm32(nn.GroupNorm):
|
227 |
-
def forward(self, x):
|
228 |
-
return super().forward(x.float()).type(x.dtype)
|
229 |
-
|
230 |
-
def conv_nd(dims, *args, **kwargs):
|
231 |
-
"""
|
232 |
-
Create a 1D, 2D, or 3D convolution module.
|
233 |
-
"""
|
234 |
-
if dims == 1:
|
235 |
-
return nn.Conv1d(*args, **kwargs)
|
236 |
-
elif dims == 2:
|
237 |
-
return nn.Conv2d(*args, **kwargs)
|
238 |
-
elif dims == 3:
|
239 |
-
return nn.Conv3d(*args, **kwargs)
|
240 |
-
raise ValueError(f"unsupported dimensions: {dims}")
|
241 |
-
|
242 |
-
|
243 |
-
def linear(*args, **kwargs):
|
244 |
-
"""
|
245 |
-
Create a linear module.
|
246 |
-
"""
|
247 |
-
return nn.Linear(*args, **kwargs)
|
248 |
-
|
249 |
-
|
250 |
-
def avg_pool_nd(dims, *args, **kwargs):
|
251 |
-
"""
|
252 |
-
Create a 1D, 2D, or 3D average pooling module.
|
253 |
-
"""
|
254 |
-
if dims == 1:
|
255 |
-
return nn.AvgPool1d(*args, **kwargs)
|
256 |
-
elif dims == 2:
|
257 |
-
return nn.AvgPool2d(*args, **kwargs)
|
258 |
-
elif dims == 3:
|
259 |
-
return nn.AvgPool3d(*args, **kwargs)
|
260 |
-
raise ValueError(f"unsupported dimensions: {dims}")
|
261 |
-
|
262 |
-
|
263 |
-
class HybridConditioner(nn.Module):
|
264 |
-
|
265 |
-
def __init__(self, c_concat_config, c_crossattn_config):
|
266 |
-
super().__init__()
|
267 |
-
self.concat_conditioner = instantiate_from_config(c_concat_config)
|
268 |
-
self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
|
269 |
-
|
270 |
-
def forward(self, c_concat, c_crossattn):
|
271 |
-
c_concat = self.concat_conditioner(c_concat)
|
272 |
-
c_crossattn = self.crossattn_conditioner(c_crossattn)
|
273 |
-
return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
|
274 |
-
|
275 |
-
|
276 |
-
def noise_like(shape, device, repeat=False):
|
277 |
-
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
|
278 |
-
noise = lambda: torch.randn(shape, device=device)
|
279 |
-
return repeat_noise() if repeat else noise()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Debate/src/agents/LLM/__init__.py
DELETED
File without changes
|
spaces/AIWaves/SOP_Generation-single/design_states.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
sys.path.append("../")
|
3 |
-
import re
|
4 |
-
from LLM.base_LLM import *
|
5 |
-
from utils import extract
|
6 |
-
from single_prompts import *
|
7 |
-
|
8 |
-
|
9 |
-
llm = OpenAILLM()
|
10 |
-
# design state
|
11 |
-
|
12 |
-
def get_cot_result(target):
|
13 |
-
chat_history = [{"role":"user","content":f"<target>{target}</target>"}]
|
14 |
-
response = llm.get_response(chat_history,design_states_cot_system_prompt)
|
15 |
-
print(response)
|
16 |
-
return response
|
17 |
-
|
18 |
-
def get_desgin_states(target,index):
|
19 |
-
chat_history = [{"role":"user","content":f"<target>{target}</target>"}]
|
20 |
-
design_state_system_prompt = get_design_state_system_prompt(index)
|
21 |
-
response = llm.get_response(chat_history,system_prompt=design_state_system_prompt)
|
22 |
-
print(response)
|
23 |
-
# 使用正则表达式提取数据
|
24 |
-
role = extract(response,"role")
|
25 |
-
pattern = r'<state>(.*?)<\/state>'
|
26 |
-
states = re.findall(pattern, response, re.DOTALL)
|
27 |
-
style = extract(response,"style")
|
28 |
-
# 创建包含字典的列表
|
29 |
-
result_list = []
|
30 |
-
for state in states:
|
31 |
-
state_name = extract(state,"state_name")
|
32 |
-
rule = extract(state,"rule")
|
33 |
-
task = extract(state,"task")
|
34 |
-
judge = extract(state,"judge")
|
35 |
-
|
36 |
-
# 创建字典并添加到结果列表
|
37 |
-
state_dict = {
|
38 |
-
"style":style,
|
39 |
-
"role":role,
|
40 |
-
"state_name": state_name,
|
41 |
-
"task": task,
|
42 |
-
"rule": rule,
|
43 |
-
"judge" : judge
|
44 |
-
}
|
45 |
-
result_list.append(state_dict)
|
46 |
-
|
47 |
-
# 打印结果
|
48 |
-
print("design states")
|
49 |
-
for item in result_list:
|
50 |
-
print(item)
|
51 |
-
return result_list
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/client/css/style.css
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
@import "./global.css";
|
2 |
-
@import "./hljs.css";
|
3 |
-
@import "./main.css";
|
4 |
-
@import "./sidebar.css";
|
5 |
-
@import "./conversation.css";
|
6 |
-
@import "./message.css";
|
7 |
-
@import "./stop-generating.css";
|
8 |
-
@import "./typing.css";
|
9 |
-
@import "./checkbox.css";
|
10 |
-
@import "./label.css";
|
11 |
-
@import "./button.css";
|
12 |
-
@import "./buttons.css";
|
13 |
-
@import "./dropdown.css";
|
14 |
-
@import "./field.css";
|
15 |
-
@import "./select.css";
|
16 |
-
@import "./options.css";
|
17 |
-
@import "./settings.css";
|
18 |
-
@import "./message-input.css";
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AdamGoyer/is_it_fly/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
license: apache-2.0
|
3 |
-
title: Is It Fly
|
4 |
-
sdk: gradio
|
5 |
-
emoji: 🌖
|
6 |
-
colorFrom: indigo
|
7 |
-
colorTo: pink
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AdithyaSNair/alzheimers_prediction_using_cnn/app.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import os
|
3 |
-
import keras
|
4 |
-
import pandas as pd
|
5 |
-
import seaborn as sns
|
6 |
-
import matplotlib.pyplot as plt
|
7 |
-
from keras.models import Sequential
|
8 |
-
from PIL import Image
|
9 |
-
from keras.layers import Conv2D, Flatten, Dense, Dropout, BatchNormalization, MaxPooling2D
|
10 |
-
from sklearn.preprocessing import OneHotEncoder
|
11 |
-
import pickle
|
12 |
-
import tensorflow as tf
|
13 |
-
import gradio as gr
|
14 |
-
|
15 |
-
model_path = "model.h5"
|
16 |
-
model = tf.keras.models.load_model(model_path)
|
17 |
-
|
18 |
-
# Define the labels
|
19 |
-
labels = ['Non Demented', 'Mild Dementia', 'Moderate Dementia', 'Very Mild Dementia']
|
20 |
-
|
21 |
-
# Define the prediction function
|
22 |
-
def predict_dementia(image):
|
23 |
-
img = Image.fromarray(image.astype('uint8'))
|
24 |
-
img = img.resize((128, 128))
|
25 |
-
img = np.array(img)
|
26 |
-
img = img.reshape(1, 128, 128, 3)
|
27 |
-
|
28 |
-
prediction = model.predict(img)
|
29 |
-
prediction_class = np.argmax(prediction)
|
30 |
-
return labels[prediction_class]
|
31 |
-
|
32 |
-
# Create the Gradio interface
|
33 |
-
iface = gr.Interface(
|
34 |
-
fn=predict_dementia,
|
35 |
-
inputs="image",
|
36 |
-
outputs="text",
|
37 |
-
title="Deep Learning-Based Classification of Dementia Stages Using Brain Images",
|
38 |
-
description="Dementia is a neurodegenerative disorder characterized by a decline in cognitive abilities. Early detection and classification of dementia stages are crucial for effective treatment and care. In this study, we propose a deep learning-based approach for classifying dementia stages using brain images. The objective is to develop a model that can accurately differentiate between different stages of dementia, including non-demented, mild dementia, moderate dementia, and very mild dementia.",
|
39 |
-
article=''' To achieve this, we utilize a dataset consisting of brain images from individuals with varying dementia stages. The dataset is preprocessed to ensure uniformity and eliminate noise. A convolutional neural network (CNN) architecture is designed and trained on the preprocessed images. The model incorporates multiple convolutional layers, batch normalization, max pooling, and dropout layers to capture relevant features from the images. The training procedure involves optimizing the model using the Adamax optimizer and minimizing the categorical cross-entropy loss.
|
40 |
-
The performance of the proposed model is evaluated using various metrics, including accuracy, validation accuracy, loss and validation loss. Additionally, a comparison is made with existing approaches for dementia classification to assess the effectiveness of the proposed method. The results demonstrate promising classification accuracy and highlight the potential of deep learning techniques in accurately diagnosing and classifying dementia stages based on brain images.
|
41 |
-
The findings of this study contribute to the field of dementia research by providing a reliable and automated method for dementia classification. The developed model can assist medical professionals in early diagnosis and treatment planning, potentially improving patient outcomes and quality of life. Further research and refinement of the model could lead to more accurate and efficient diagnosis of dementia, enabling timely intervention and support for affected individuals
|
42 |
-
''',
|
43 |
-
examples=[["Non(1).jpg"],["Mild.jpg"],["Moderate.jpg"],["Very(1).jpg"]],
|
44 |
-
allow_flagging=False
|
45 |
-
)
|
46 |
-
|
47 |
-
iface.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorcomponents/ColorComponents.d.ts
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
import Sizer from '../../sizer/Sizer';
|
2 |
-
import RoundRectangle from '../../roundrectangle/RoundRectangle';
|
3 |
-
import Label from '../../label/Label';
|
4 |
-
import CanvasInput from '../../canvasinput/CanvasInput';
|
5 |
-
|
6 |
-
export default ColorComponents;
|
7 |
-
|
8 |
-
declare namespace ColorComponents {
|
9 |
-
|
10 |
-
interface IFormatLabelConfig {
|
11 |
-
space?: {
|
12 |
-
left?: number, right?: number, top?: number, bottom?: number,
|
13 |
-
},
|
14 |
-
|
15 |
-
background?: RoundRectangle.IConfig,
|
16 |
-
|
17 |
-
text?: Phaser.GameObjects.TextStyle,
|
18 |
-
expandTextWidth?: boolean,
|
19 |
-
expandTextHeight?: boolean,
|
20 |
-
|
21 |
-
align?: Label.AlignTypes,
|
22 |
-
}
|
23 |
-
|
24 |
-
interface IConfig extends Sizer.IConfig {
|
25 |
-
background?: Phaser.GameObjects.GameObject,
|
26 |
-
|
27 |
-
formatLabel?: Phaser.GameObjects.GameObject | IFormatLabelConfig;
|
28 |
-
|
29 |
-
inputText0?: Phaser.GameObjects.GameObject,
|
30 |
-
inputText1?: Phaser.GameObjects.GameObject,
|
31 |
-
inputText2?: Phaser.GameObjects.GameObject,
|
32 |
-
inputText?: CanvasInput.IConfig,
|
33 |
-
|
34 |
-
proportion?: {
|
35 |
-
formatLabel?: number,
|
36 |
-
|
37 |
-
},
|
38 |
-
|
39 |
-
valuechangeCallback: (newValue: number, oldValue: number, colorComponents: ColorComponents) => void,
|
40 |
-
|
41 |
-
value?: number
|
42 |
-
}
|
43 |
-
}
|
44 |
-
|
45 |
-
declare class ColorComponents extends Sizer {
|
46 |
-
constructor(
|
47 |
-
scene: Phaser.Scene,
|
48 |
-
config?: ColorComponents.IConfig
|
49 |
-
);
|
50 |
-
|
51 |
-
setValue(value: number): this;
|
52 |
-
value: number;
|
53 |
-
|
54 |
-
setColor(color: number): this;
|
55 |
-
color: number;
|
56 |
-
|
57 |
-
setColorFormat(colorFormat: 'RGB' | 'HSV'): this;
|
58 |
-
toggleColorFormat(): this;
|
59 |
-
colorFormat: 'RGB' | 'HSV';
|
60 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetTotalRowProportions.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
var GetTotalRowProportions = function () {
|
2 |
-
var result = 0,
|
3 |
-
proportion;
|
4 |
-
for (var i = 0; i < this.rowCount; i++) {
|
5 |
-
proportion = this.rowProportions[i];
|
6 |
-
if (proportion > 0) {
|
7 |
-
result += proportion;
|
8 |
-
}
|
9 |
-
}
|
10 |
-
return result;
|
11 |
-
}
|
12 |
-
|
13 |
-
export default GetTotalRowProportions;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alcedo/yunmedia/resources/chatgpt-plugin/js/app.bf8a14e9.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Alfasign/nomic-ai-gpt4all-13b-snoozy/app.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
|
2 |
-
import gradio as gr
|
3 |
-
import torch
|
4 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
-
|
6 |
-
def generate_text(prompt, style):
|
7 |
-
model_name = "nomic-ai/gpt4all-13b-snoozy"
|
8 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
10 |
-
|
11 |
-
full_prompt = f"{prompt} Schreibe die Antwort im Stil von {style}."
|
12 |
-
inputs = tokenizer.encode(full_prompt, return_tensors='pt')
|
13 |
-
outputs = model.generate(inputs, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2)
|
14 |
-
|
15 |
-
generated = outputs[:,inputs.shape[-1]:]
|
16 |
-
result = tokenizer.decode(generated[0], skip_special_tokens=True)
|
17 |
-
|
18 |
-
return result
|
19 |
-
|
20 |
-
styles = ["eine formelle E-Mail", "eine Kurzgeschichte", "ein Gedicht", "ein wissenschaftlicher Bericht", "eine Zeitungsartikel"]
|
21 |
-
|
22 |
-
css = """
|
23 |
-
body {
|
24 |
-
background-color: #f0f0f0;
|
25 |
-
color: #333;
|
26 |
-
}
|
27 |
-
.gradio-input, .gradio-output {
|
28 |
-
background-color: #fff;
|
29 |
-
color: #333;
|
30 |
-
}
|
31 |
-
"""
|
32 |
-
|
33 |
-
iface = gr.Interface(fn=generate_text, inputs=["textbox", gr.inputs.Dropdown(choices=styles)], outputs="text", css=css)
|
34 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/monotonic_align/core.c
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/custom_pipeline_overview.md
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# 커스텀 파이프라인 불러오기
|
14 |
-
|
15 |
-
[[open-in-colab]]
|
16 |
-
|
17 |
-
커뮤니티 파이프라인은 논문에 명시된 원래의 구현체와 다른 형태로 구현된 모든 [`DiffusionPipeline`] 클래스를 의미합니다. (예를 들어, [`StableDiffusionControlNetPipeline`]는 ["Text-to-Image Generation with ControlNet Conditioning"](https://arxiv.org/abs/2302.05543) 해당) 이들은 추가 기능을 제공하거나 파이프라인의 원래 구현을 확장합니다.
|
18 |
-
|
19 |
-
[Speech to Image](https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image) 또는 [Composable Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#composable-stable-diffusion) 과 같은 멋진 커뮤니티 파이프라인이 많이 있으며 [여기에서](https://github.com/huggingface/diffusers/tree/main/examples/community) 모든 공식 커뮤니티 파이프라인을 찾을 수 있습니다.
|
20 |
-
|
21 |
-
허브에서 커뮤니티 파이프라인을 로드하려면, 커뮤니티 파이프라인의 리포지토리 ID와 (파이프라인 가중치 및 구성 요소를 로드하려는) 모델의 리포지토리 ID를 인자로 전달해야 합니다. 예를 들어, 아래 예시에서는 `hf-internal-testing/diffusers-dummy-pipeline`에서 더미 파이프라인을 불러오고, `google/ddpm-cifar10-32`에서 파이프라인의 가중치와 컴포넌트들을 로드합니다.
|
22 |
-
|
23 |
-
<Tip warning={true}>
|
24 |
-
|
25 |
-
🔒 허깅 페이스 허브에서 커뮤니티 파이프라인을 불러오는 것은 곧 해당 코드가 안전하다고 신뢰하는 것입니다. 코드를 자동으로 불러오고 실행하기 앞서 반드시 온라인으로 해당 코드의 신뢰성을 검사하세요!
|
26 |
-
|
27 |
-
</Tip>
|
28 |
-
|
29 |
-
```py
|
30 |
-
from diffusers import DiffusionPipeline
|
31 |
-
|
32 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
33 |
-
"google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
|
34 |
-
)
|
35 |
-
```
|
36 |
-
|
37 |
-
공식 커뮤니티 파이프라인을 불러오는 것은 비슷하지만, 공식 리포지토리 ID에서 가중치를 불러오는 것과 더불어 해당 파이프라인 내의 컴포넌트를 직접 지정하는 것 역시 가능합니다. 아래 예제를 보면 커뮤니티 [CLIP Guided Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#clip-guided-stable-diffusion) 파이프라인을 로드할 때, 해당 파이프라인에서 사용할 `clip_model` 컴포넌트와 `feature_extractor` 컴포넌트를 직접 설정하는 것을 확인할 수 있습니다.
|
38 |
-
|
39 |
-
```py
|
40 |
-
from diffusers import DiffusionPipeline
|
41 |
-
from transformers import CLIPImageProcessor, CLIPModel
|
42 |
-
|
43 |
-
clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
44 |
-
|
45 |
-
feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id)
|
46 |
-
clip_model = CLIPModel.from_pretrained(clip_model_id)
|
47 |
-
|
48 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
49 |
-
"runwayml/stable-diffusion-v1-5",
|
50 |
-
custom_pipeline="clip_guided_stable_diffusion",
|
51 |
-
clip_model=clip_model,
|
52 |
-
feature_extractor=feature_extractor,
|
53 |
-
)
|
54 |
-
```
|
55 |
-
|
56 |
-
커뮤니티 파이프라인에 대한 자세한 내용은 [커뮤니티 파이프라인](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/custom_pipeline_examples) 가이드를 살펴보세요. 커뮤니티 파이프라인 등록에 관심이 있는 경우 [커뮤니티 파이프라인에 기여하는 방법](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/contribute_pipeline)에 대한 가이드를 확인하세요 !
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w32',
|
4 |
-
backbone=dict(
|
5 |
-
_delete_=True,
|
6 |
-
type='HRNet',
|
7 |
-
extra=dict(
|
8 |
-
stage1=dict(
|
9 |
-
num_modules=1,
|
10 |
-
num_branches=1,
|
11 |
-
block='BOTTLENECK',
|
12 |
-
num_blocks=(4, ),
|
13 |
-
num_channels=(64, )),
|
14 |
-
stage2=dict(
|
15 |
-
num_modules=1,
|
16 |
-
num_branches=2,
|
17 |
-
block='BASIC',
|
18 |
-
num_blocks=(4, 4),
|
19 |
-
num_channels=(32, 64)),
|
20 |
-
stage3=dict(
|
21 |
-
num_modules=4,
|
22 |
-
num_branches=3,
|
23 |
-
block='BASIC',
|
24 |
-
num_blocks=(4, 4, 4),
|
25 |
-
num_channels=(32, 64, 128)),
|
26 |
-
stage4=dict(
|
27 |
-
num_modules=3,
|
28 |
-
num_branches=4,
|
29 |
-
block='BASIC',
|
30 |
-
num_blocks=(4, 4, 4, 4),
|
31 |
-
num_channels=(32, 64, 128, 256)))),
|
32 |
-
neck=dict(
|
33 |
-
_delete_=True,
|
34 |
-
type='HRFPN',
|
35 |
-
in_channels=[32, 64, 128, 256],
|
36 |
-
out_channels=256))
|
37 |
-
# learning policy
|
38 |
-
lr_config = dict(step=[16, 19])
|
39 |
-
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/post_processing/bbox_nms.py
DELETED
@@ -1,168 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from mmcv.ops.nms import batched_nms
|
3 |
-
|
4 |
-
from mmdet.core.bbox.iou_calculators import bbox_overlaps
|
5 |
-
|
6 |
-
|
7 |
-
def multiclass_nms(multi_bboxes,
|
8 |
-
multi_scores,
|
9 |
-
score_thr,
|
10 |
-
nms_cfg,
|
11 |
-
max_num=-1,
|
12 |
-
score_factors=None,
|
13 |
-
return_inds=False):
|
14 |
-
"""NMS for multi-class bboxes.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
|
18 |
-
multi_scores (Tensor): shape (n, #class), where the last column
|
19 |
-
contains scores of the background class, but this will be ignored.
|
20 |
-
score_thr (float): bbox threshold, bboxes with scores lower than it
|
21 |
-
will not be considered.
|
22 |
-
nms_thr (float): NMS IoU threshold
|
23 |
-
max_num (int, optional): if there are more than max_num bboxes after
|
24 |
-
NMS, only top max_num will be kept. Default to -1.
|
25 |
-
score_factors (Tensor, optional): The factors multiplied to scores
|
26 |
-
before applying NMS. Default to None.
|
27 |
-
return_inds (bool, optional): Whether return the indices of kept
|
28 |
-
bboxes. Default to False.
|
29 |
-
|
30 |
-
Returns:
|
31 |
-
tuple: (bboxes, labels, indices (optional)), tensors of shape (k, 5),
|
32 |
-
(k), and (k). Labels are 0-based.
|
33 |
-
"""
|
34 |
-
num_classes = multi_scores.size(1) - 1
|
35 |
-
# exclude background category
|
36 |
-
if multi_bboxes.shape[1] > 4:
|
37 |
-
bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
|
38 |
-
else:
|
39 |
-
bboxes = multi_bboxes[:, None].expand(
|
40 |
-
multi_scores.size(0), num_classes, 4)
|
41 |
-
|
42 |
-
scores = multi_scores[:, :-1]
|
43 |
-
|
44 |
-
labels = torch.arange(num_classes, dtype=torch.long)
|
45 |
-
labels = labels.view(1, -1).expand_as(scores)
|
46 |
-
|
47 |
-
bboxes = bboxes.reshape(-1, 4)
|
48 |
-
scores = scores.reshape(-1)
|
49 |
-
labels = labels.reshape(-1)
|
50 |
-
|
51 |
-
if not torch.onnx.is_in_onnx_export():
|
52 |
-
# NonZero not supported in TensorRT
|
53 |
-
# remove low scoring boxes
|
54 |
-
valid_mask = scores > score_thr
|
55 |
-
# multiply score_factor after threshold to preserve more bboxes, improve
|
56 |
-
# mAP by 1% for YOLOv3
|
57 |
-
if score_factors is not None:
|
58 |
-
# expand the shape to match original shape of score
|
59 |
-
score_factors = score_factors.view(-1, 1).expand(
|
60 |
-
multi_scores.size(0), num_classes)
|
61 |
-
score_factors = score_factors.reshape(-1)
|
62 |
-
scores = scores * score_factors
|
63 |
-
|
64 |
-
if not torch.onnx.is_in_onnx_export():
|
65 |
-
# NonZero not supported in TensorRT
|
66 |
-
inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
|
67 |
-
bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]
|
68 |
-
else:
|
69 |
-
# TensorRT NMS plugin has invalid output filled with -1
|
70 |
-
# add dummy data to make detection output correct.
|
71 |
-
bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0)
|
72 |
-
scores = torch.cat([scores, scores.new_zeros(1)], dim=0)
|
73 |
-
labels = torch.cat([labels, labels.new_zeros(1)], dim=0)
|
74 |
-
|
75 |
-
if bboxes.numel() == 0:
|
76 |
-
if torch.onnx.is_in_onnx_export():
|
77 |
-
raise RuntimeError('[ONNX Error] Can not record NMS '
|
78 |
-
'as it has not been executed this time')
|
79 |
-
if return_inds:
|
80 |
-
return bboxes, labels, inds
|
81 |
-
else:
|
82 |
-
return bboxes, labels
|
83 |
-
|
84 |
-
dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
|
85 |
-
|
86 |
-
if max_num > 0:
|
87 |
-
dets = dets[:max_num]
|
88 |
-
keep = keep[:max_num]
|
89 |
-
|
90 |
-
if return_inds:
|
91 |
-
return dets, labels[keep], keep
|
92 |
-
else:
|
93 |
-
return dets, labels[keep]
|
94 |
-
|
95 |
-
|
96 |
-
def fast_nms(multi_bboxes,
|
97 |
-
multi_scores,
|
98 |
-
multi_coeffs,
|
99 |
-
score_thr,
|
100 |
-
iou_thr,
|
101 |
-
top_k,
|
102 |
-
max_num=-1):
|
103 |
-
"""Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_.
|
104 |
-
|
105 |
-
Fast NMS allows already-removed detections to suppress other detections so
|
106 |
-
that every instance can be decided to be kept or discarded in parallel,
|
107 |
-
which is not possible in traditional NMS. This relaxation allows us to
|
108 |
-
implement Fast NMS entirely in standard GPU-accelerated matrix operations.
|
109 |
-
|
110 |
-
Args:
|
111 |
-
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
|
112 |
-
multi_scores (Tensor): shape (n, #class+1), where the last column
|
113 |
-
contains scores of the background class, but this will be ignored.
|
114 |
-
multi_coeffs (Tensor): shape (n, #class*coeffs_dim).
|
115 |
-
score_thr (float): bbox threshold, bboxes with scores lower than it
|
116 |
-
will not be considered.
|
117 |
-
iou_thr (float): IoU threshold to be considered as conflicted.
|
118 |
-
top_k (int): if there are more than top_k bboxes before NMS,
|
119 |
-
only top top_k will be kept.
|
120 |
-
max_num (int): if there are more than max_num bboxes after NMS,
|
121 |
-
only top max_num will be kept. If -1, keep all the bboxes.
|
122 |
-
Default: -1.
|
123 |
-
|
124 |
-
Returns:
|
125 |
-
tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1),
|
126 |
-
and (k, coeffs_dim). Labels are 0-based.
|
127 |
-
"""
|
128 |
-
|
129 |
-
scores = multi_scores[:, :-1].t() # [#class, n]
|
130 |
-
scores, idx = scores.sort(1, descending=True)
|
131 |
-
|
132 |
-
idx = idx[:, :top_k].contiguous()
|
133 |
-
scores = scores[:, :top_k] # [#class, topk]
|
134 |
-
num_classes, num_dets = idx.size()
|
135 |
-
boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)
|
136 |
-
coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)
|
137 |
-
|
138 |
-
iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk]
|
139 |
-
iou.triu_(diagonal=1)
|
140 |
-
iou_max, _ = iou.max(dim=1)
|
141 |
-
|
142 |
-
# Now just filter out the ones higher than the threshold
|
143 |
-
keep = iou_max <= iou_thr
|
144 |
-
|
145 |
-
# Second thresholding introduces 0.2 mAP gain at negligible time cost
|
146 |
-
keep *= scores > score_thr
|
147 |
-
|
148 |
-
# Assign each kept detection to its corresponding class
|
149 |
-
classes = torch.arange(
|
150 |
-
num_classes, device=boxes.device)[:, None].expand_as(keep)
|
151 |
-
classes = classes[keep]
|
152 |
-
|
153 |
-
boxes = boxes[keep]
|
154 |
-
coeffs = coeffs[keep]
|
155 |
-
scores = scores[keep]
|
156 |
-
|
157 |
-
# Only keep the top max_num highest scores across all classes
|
158 |
-
scores, idx = scores.sort(0, descending=True)
|
159 |
-
if max_num > 0:
|
160 |
-
idx = idx[:max_num]
|
161 |
-
scores = scores[:max_num]
|
162 |
-
|
163 |
-
classes = classes[idx]
|
164 |
-
boxes = boxes[idx]
|
165 |
-
coeffs = coeffs[idx]
|
166 |
-
|
167 |
-
cls_dets = torch.cat([boxes, scores[:, None]], dim=1)
|
168 |
-
return cls_dets, classes, coeffs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/utils.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
|
3 |
-
import mmcv
|
4 |
-
import torch.nn.functional as F
|
5 |
-
|
6 |
-
|
7 |
-
def reduce_loss(loss, reduction):
|
8 |
-
"""Reduce loss as specified.
|
9 |
-
|
10 |
-
Args:
|
11 |
-
loss (Tensor): Elementwise loss tensor.
|
12 |
-
reduction (str): Options are "none", "mean" and "sum".
|
13 |
-
|
14 |
-
Return:
|
15 |
-
Tensor: Reduced loss tensor.
|
16 |
-
"""
|
17 |
-
reduction_enum = F._Reduction.get_enum(reduction)
|
18 |
-
# none: 0, elementwise_mean:1, sum: 2
|
19 |
-
if reduction_enum == 0:
|
20 |
-
return loss
|
21 |
-
elif reduction_enum == 1:
|
22 |
-
return loss.mean()
|
23 |
-
elif reduction_enum == 2:
|
24 |
-
return loss.sum()
|
25 |
-
|
26 |
-
|
27 |
-
@mmcv.jit(derivate=True, coderize=True)
|
28 |
-
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
|
29 |
-
"""Apply element-wise weight and reduce loss.
|
30 |
-
|
31 |
-
Args:
|
32 |
-
loss (Tensor): Element-wise loss.
|
33 |
-
weight (Tensor): Element-wise weights.
|
34 |
-
reduction (str): Same as built-in losses of PyTorch.
|
35 |
-
avg_factor (float): Avarage factor when computing the mean of losses.
|
36 |
-
|
37 |
-
Returns:
|
38 |
-
Tensor: Processed loss values.
|
39 |
-
"""
|
40 |
-
# if weight is specified, apply element-wise weight
|
41 |
-
if weight is not None:
|
42 |
-
loss = loss * weight
|
43 |
-
|
44 |
-
# if avg_factor is not specified, just reduce the loss
|
45 |
-
if avg_factor is None:
|
46 |
-
loss = reduce_loss(loss, reduction)
|
47 |
-
else:
|
48 |
-
# if reduction is mean, then average the loss by avg_factor
|
49 |
-
if reduction == 'mean':
|
50 |
-
loss = loss.sum() / avg_factor
|
51 |
-
# if reduction is 'none', then do nothing, otherwise raise an error
|
52 |
-
elif reduction != 'none':
|
53 |
-
raise ValueError('avg_factor can not be used with reduction="sum"')
|
54 |
-
return loss
|
55 |
-
|
56 |
-
|
57 |
-
def weighted_loss(loss_func):
|
58 |
-
"""Create a weighted version of a given loss function.
|
59 |
-
|
60 |
-
To use this decorator, the loss function must have the signature like
|
61 |
-
`loss_func(pred, target, **kwargs)`. The function only needs to compute
|
62 |
-
element-wise loss without any reduction. This decorator will add weight
|
63 |
-
and reduction arguments to the function. The decorated function will have
|
64 |
-
the signature like `loss_func(pred, target, weight=None, reduction='mean',
|
65 |
-
avg_factor=None, **kwargs)`.
|
66 |
-
|
67 |
-
:Example:
|
68 |
-
|
69 |
-
>>> import torch
|
70 |
-
>>> @weighted_loss
|
71 |
-
>>> def l1_loss(pred, target):
|
72 |
-
>>> return (pred - target).abs()
|
73 |
-
|
74 |
-
>>> pred = torch.Tensor([0, 2, 3])
|
75 |
-
>>> target = torch.Tensor([1, 1, 1])
|
76 |
-
>>> weight = torch.Tensor([1, 0, 1])
|
77 |
-
|
78 |
-
>>> l1_loss(pred, target)
|
79 |
-
tensor(1.3333)
|
80 |
-
>>> l1_loss(pred, target, weight)
|
81 |
-
tensor(1.)
|
82 |
-
>>> l1_loss(pred, target, reduction='none')
|
83 |
-
tensor([1., 1., 2.])
|
84 |
-
>>> l1_loss(pred, target, weight, avg_factor=2)
|
85 |
-
tensor(1.5000)
|
86 |
-
"""
|
87 |
-
|
88 |
-
@functools.wraps(loss_func)
|
89 |
-
def wrapper(pred,
|
90 |
-
target,
|
91 |
-
weight=None,
|
92 |
-
reduction='mean',
|
93 |
-
avg_factor=None,
|
94 |
-
**kwargs):
|
95 |
-
# get element-wise loss
|
96 |
-
loss = loss_func(pred, target, **kwargs)
|
97 |
-
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
|
98 |
-
return loss
|
99 |
-
|
100 |
-
return wrapper
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/deeplabv3plus_r50-d8.py',
|
3 |
-
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_80k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(align_corners=True),
|
8 |
-
auxiliary_head=dict(align_corners=True),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Annotation-AI/fast-segment-everything-with-drawing-prompt/app.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
|
4 |
-
github_user = os.environ.get("GITHUB_USER")
|
5 |
-
github_token = os.environ.get("GITHUB_TOKEN")
|
6 |
-
|
7 |
-
repo_name = "annotation-ai/mlwiz-technical-demo"
|
8 |
-
|
9 |
-
os.system(f"export GITHUB_USER={github_user}")
|
10 |
-
os.system(f"export GITHUB_TOKEN={github_token}")
|
11 |
-
os.system(f"git clone https://{github_user}:{github_token}@github.com/{repo_name}")
|
12 |
-
|
13 |
-
cwd0 = os.getcwd()
|
14 |
-
cwd1 = os.path.join(cwd0, "mlwiz-technical-demo/sam")
|
15 |
-
os.chdir(cwd1)
|
16 |
-
os.system("pip install -r requirements.txt")
|
17 |
-
os.system("python app_everything_brush.py")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/stare.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import os.path as osp
|
2 |
-
|
3 |
-
from .builder import DATASETS
|
4 |
-
from .custom import CustomDataset
|
5 |
-
|
6 |
-
|
7 |
-
@DATASETS.register_module()
|
8 |
-
class STAREDataset(CustomDataset):
|
9 |
-
"""STARE dataset.
|
10 |
-
|
11 |
-
In segmentation map annotation for STARE, 0 stands for background, which is
|
12 |
-
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
13 |
-
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
14 |
-
'.ah.png'.
|
15 |
-
"""
|
16 |
-
|
17 |
-
CLASSES = ('background', 'vessel')
|
18 |
-
|
19 |
-
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
20 |
-
|
21 |
-
def __init__(self, **kwargs):
|
22 |
-
super(STAREDataset, self).__init__(
|
23 |
-
img_suffix='.png',
|
24 |
-
seg_map_suffix='.ah.png',
|
25 |
-
reduce_zero_label=False,
|
26 |
-
**kwargs)
|
27 |
-
assert osp.exists(self.img_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/docs/train.md
DELETED
@@ -1,276 +0,0 @@
|
|
1 |
-
# Train a ControlNet to Control SD
|
2 |
-
|
3 |
-
You are here because you want to control SD in your own way, maybe you have an idea for your perfect research project, and you will annotate some data or have already annotated your own dataset automatically or manually. Herein, the control can be anything that can be converted to images, such as edges, keypoints, segments, etc.
|
4 |
-
|
5 |
-
Before moving on to your own dataset, we highly recommend to first try the toy dataset, Fill50K, as a sanity check. This will help you get a "feeling" for the training. You will know how long it will take for the model to converge and whether your device will be able to complete the training in an acceptable amount of time. And what it "feels" like when the model converges.
|
6 |
-
|
7 |
-
We hope that after you read this page, you will find that training a ControlNet is as easy as (or easier than) training a pix2pix.
|
8 |
-
|
9 |
-
## Step 0 - Design your control
|
10 |
-
|
11 |
-
Let us take a look at a very simple task to control SD to fill color in circles.
|
12 |
-
|
13 |
-

|
14 |
-
|
15 |
-
This is simple: we want to control SD to fill a circle with colors, and the prompt contains some description of our target.
|
16 |
-
|
17 |
-
Stable diffusion is trained on billions of images, and it already knows what is "cyan", what is "circle", what is "pink", and what is "background".
|
18 |
-
|
19 |
-
But it does not know the meaning of that "Control Image (Source Image)". Our target is to let it know.
|
20 |
-
|
21 |
-
## Step 1 - Get a dataset
|
22 |
-
|
23 |
-
Just download the Fill50K dataset from [our huggingface page](https://huggingface.co/lllyasviel/ControlNet) (training/fill50k.zip, the file is only 200M!). Make sure that the data is decompressed as
|
24 |
-
|
25 |
-
ControlNet/training/fill50k/prompt.json
|
26 |
-
ControlNet/training/fill50k/source/X.png
|
27 |
-
ControlNet/training/fill50k/target/X.png
|
28 |
-
|
29 |
-
In the folder "fill50k/source", you will have 50k images of circle lines.
|
30 |
-
|
31 |
-

|
32 |
-
|
33 |
-
In the folder "fill50k/target", you will have 50k images of filled circles.
|
34 |
-
|
35 |
-

|
36 |
-
|
37 |
-
In the "fill50k/prompt.json", you will have their filenames and prompts. Each prompt is like "a balabala color circle in some other color background."
|
38 |
-
|
39 |
-

|
40 |
-
|
41 |
-
## Step 2 - Load the dataset
|
42 |
-
|
43 |
-
Then you need to write a simple script to read this dataset for pytorch. (In fact we have written it for you in "tutorial_dataset.py".)
|
44 |
-
|
45 |
-
```python
|
46 |
-
import json
|
47 |
-
import cv2
|
48 |
-
import numpy as np
|
49 |
-
|
50 |
-
from torch.utils.data import Dataset
|
51 |
-
|
52 |
-
|
53 |
-
class MyDataset(Dataset):
|
54 |
-
def __init__(self):
|
55 |
-
self.data = []
|
56 |
-
with open('./training/fill50k/prompt.json', 'rt') as f:
|
57 |
-
for line in f:
|
58 |
-
self.data.append(json.loads(line))
|
59 |
-
|
60 |
-
def __len__(self):
|
61 |
-
return len(self.data)
|
62 |
-
|
63 |
-
def __getitem__(self, idx):
|
64 |
-
item = self.data[idx]
|
65 |
-
|
66 |
-
source_filename = item['source']
|
67 |
-
target_filename = item['target']
|
68 |
-
prompt = item['prompt']
|
69 |
-
|
70 |
-
source = cv2.imread('./training/fill50k/' + source_filename)
|
71 |
-
target = cv2.imread('./training/fill50k/' + target_filename)
|
72 |
-
|
73 |
-
# Do not forget that OpenCV read images in BGR order.
|
74 |
-
source = cv2.cvtColor(source, cv2.COLOR_BGR2RGB)
|
75 |
-
target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)
|
76 |
-
|
77 |
-
# Normalize source images to [0, 1].
|
78 |
-
source = source.astype(np.float32) / 255.0
|
79 |
-
|
80 |
-
# Normalize target images to [-1, 1].
|
81 |
-
target = (target.astype(np.float32) / 127.5) - 1.0
|
82 |
-
|
83 |
-
return dict(jpg=target, txt=prompt, hint=source)
|
84 |
-
|
85 |
-
```
|
86 |
-
|
87 |
-
This will make your dataset into an array-like object in python. You can test this dataset simply by accessing the array, like this
|
88 |
-
|
89 |
-
```python
|
90 |
-
from tutorial_dataset import MyDataset
|
91 |
-
|
92 |
-
dataset = MyDataset()
|
93 |
-
print(len(dataset))
|
94 |
-
|
95 |
-
item = dataset[1234]
|
96 |
-
jpg = item['jpg']
|
97 |
-
txt = item['txt']
|
98 |
-
hint = item['hint']
|
99 |
-
print(txt)
|
100 |
-
print(jpg.shape)
|
101 |
-
print(hint.shape)
|
102 |
-
|
103 |
-
```
|
104 |
-
|
105 |
-
The outputs of this simple test on my machine are
|
106 |
-
|
107 |
-
50000
|
108 |
-
burly wood circle with orange background
|
109 |
-
(512, 512, 3)
|
110 |
-
(512, 512, 3)
|
111 |
-
|
112 |
-
And this code is in "tutorial_dataset_test.py".
|
113 |
-
|
114 |
-
In this way, the dataset is an array-like object with 50000 items. Each item is a dict with three entry "jpg", "txt", and "hint". The "jpg" is the target image, the "hint" is the control image, and the "txt" is the prompt.
|
115 |
-
|
116 |
-
Do not ask us why we use these three names - this is related to the dark history of a library called LDM.
|
117 |
-
|
118 |
-
## Step 3 - What SD model do you want to control?
|
119 |
-
|
120 |
-
Then you need to decide which Stable Diffusion Model you want to control. In this example, we will just use standard SD1.5. You can download it from the [official page of Stability](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main). You want the file ["v1-5-pruned.ckpt"](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main).
|
121 |
-
|
122 |
-
(Or ["v2-1_512-ema-pruned.ckpt"](https://huggingface.co/stabilityai/stable-diffusion-2-1-base/tree/main) if you are using SD2.)
|
123 |
-
|
124 |
-
Then you need to attach a control net to the SD model. The architecture is
|
125 |
-
|
126 |
-

|
127 |
-
|
128 |
-
Note that all weights inside the ControlNet are also copied from SD so that no layer is trained from scratch, and you are still finetuning the entire model.
|
129 |
-
|
130 |
-
We provide a simple script for you to achieve this easily. If your SD filename is "./models/v1-5-pruned.ckpt" and you want the script to save the processed model (SD+ControlNet) at location "./models/control_sd15_ini.ckpt", you can just run:
|
131 |
-
|
132 |
-
python tool_add_control.py ./models/v1-5-pruned.ckpt ./models/control_sd15_ini.ckpt
|
133 |
-
|
134 |
-
Or if you are using SD2:
|
135 |
-
|
136 |
-
python tool_add_control_sd21.py ./models/v2-1_512-ema-pruned.ckpt ./models/control_sd21_ini.ckpt
|
137 |
-
|
138 |
-
You may also use other filenames as long as the command is "python tool_add_control.py input_path output_path".
|
139 |
-
|
140 |
-
This is the correct output from my machine:
|
141 |
-
|
142 |
-

|
143 |
-
|
144 |
-
## Step 4 - Train!
|
145 |
-
|
146 |
-
Happy! We finally come to the most exciting part: training!
|
147 |
-
|
148 |
-
The training code in "tutorial_train.py" is actually surprisingly simple:
|
149 |
-
|
150 |
-
```python
|
151 |
-
import pytorch_lightning as pl
|
152 |
-
from torch.utils.data import DataLoader
|
153 |
-
from tutorial_dataset import MyDataset
|
154 |
-
from cldm.logger import ImageLogger
|
155 |
-
from cldm.model import create_model, load_state_dict
|
156 |
-
|
157 |
-
|
158 |
-
# Configs
|
159 |
-
resume_path = './models/control_sd15_ini.ckpt'
|
160 |
-
batch_size = 4
|
161 |
-
logger_freq = 300
|
162 |
-
learning_rate = 1e-5
|
163 |
-
sd_locked = True
|
164 |
-
only_mid_control = False
|
165 |
-
|
166 |
-
|
167 |
-
# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.
|
168 |
-
model = create_model('./models/cldm_v15.yaml').cpu()
|
169 |
-
model.load_state_dict(load_state_dict(resume_path, location='cpu'))
|
170 |
-
model.learning_rate = learning_rate
|
171 |
-
model.sd_locked = sd_locked
|
172 |
-
model.only_mid_control = only_mid_control
|
173 |
-
|
174 |
-
|
175 |
-
# Misc
|
176 |
-
dataset = MyDataset()
|
177 |
-
dataloader = DataLoader(dataset, num_workers=0, batch_size=batch_size, shuffle=True)
|
178 |
-
logger = ImageLogger(batch_frequency=logger_freq)
|
179 |
-
trainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger])
|
180 |
-
|
181 |
-
|
182 |
-
# Train!
|
183 |
-
trainer.fit(model, dataloader)
|
184 |
-
|
185 |
-
```
|
186 |
-
(or "tutorial_train_sd21.py" if you are using SD2)
|
187 |
-
|
188 |
-
Thanks to our organized dataset pytorch object and the power of pytorch_lightning, the entire code is just super short.
|
189 |
-
|
190 |
-
Now, you may take a look at [Pytorch Lightning Official DOC](https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.trainer.trainer.Trainer.html#trainer) to find out how to enable many useful features like gradient accumulation, multiple GPU training, accelerated dataset loading, flexible checkpoint saving, etc. All these only need about one line of code. Great!
|
191 |
-
|
192 |
-
Note that if you find OOM, perhaps you need to enable [Low VRAM mode](low_vram.md), and perhaps you also need to use smaller batch size and gradient accumulation. Or you may also want to use some “advanced” tricks like sliced attention or xformers. For example:
|
193 |
-
|
194 |
-
```python
|
195 |
-
# Configs
|
196 |
-
batch_size = 1
|
197 |
-
|
198 |
-
# Misc
|
199 |
-
trainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger], accumulate_grad_batches=4) # But this will be 4x slower
|
200 |
-
```
|
201 |
-
|
202 |
-
Note that training with 8 GB laptop GPU is challenging. We will need some GPU memory optimization at least as good as automatic1111’s UI. This may require expert modifications to the code.
|
203 |
-
|
204 |
-
### Screenshots
|
205 |
-
|
206 |
-
The training is fast. After 4000 steps (batch size 4, learning rate 1e-5, about 50 minutes on PCIE 40G), the results on my machine (in an output folder "image_log") is
|
207 |
-
|
208 |
-
Control:
|
209 |
-
|
210 |
-

|
211 |
-
|
212 |
-
Prompt:
|
213 |
-
|
214 |
-

|
215 |
-
|
216 |
-
Prediction:
|
217 |
-
|
218 |
-

|
219 |
-
|
220 |
-
Ground Truth:
|
221 |
-
|
222 |
-

|
223 |
-
|
224 |
-
Note that the SD's capability is preserved. Even training on this super aligned dataset, it still draws some random textures and those snow decorations. (Besides, note that the ground truth looks a bit modified because it is converted from SD's latent image.)
|
225 |
-
|
226 |
-
Larger batch size and longer training will further improve this. Adequate training will make the filling perfect.
|
227 |
-
|
228 |
-
Of course, training SD to fill circles is meaningless, but this is a successful beginning of your story.
|
229 |
-
|
230 |
-
Let us work together to control large models more and more.
|
231 |
-
|
232 |
-
## Other options
|
233 |
-
|
234 |
-
Beyond standard things, we also provide two important parameters "sd_locked" and "only_mid_control" that you need to know.
|
235 |
-
|
236 |
-
### only_mid_control
|
237 |
-
|
238 |
-
By default, only_mid_control is False. When it is True, you will train the below architecture.
|
239 |
-
|
240 |
-

|
241 |
-
|
242 |
-
This can be helpful when your computation power is limited and want to speed up the training, or when you want to facilitate the "global" context learning. Note that sometimes you may pause training, set it to True, resume training, and pause again, and set it again, and resume again.
|
243 |
-
|
244 |
-
If your computation device is good, perhaps you do not need this. But I also know some artists are willing to train a model on their laptop for a month - in that case, perhaps this option can be useful.
|
245 |
-
|
246 |
-
### sd_locked
|
247 |
-
|
248 |
-
By default, sd_locked is True. When it is False, you will train the below architecture.
|
249 |
-
|
250 |
-

|
251 |
-
|
252 |
-
This will unlock some layers in SD and you will train them as a whole.
|
253 |
-
|
254 |
-
This option is DANGEROUS! If your dataset is not good enough, this may downgrade the capability of your SD model.
|
255 |
-
|
256 |
-
However, this option is also very useful when you are training on images with some specific style, or when you are training with special datasets (like medical dataset with X-ray images or geographic datasets with lots of Google Maps). You can understand this as simultaneously training the ControlNet and something like a DreamBooth.
|
257 |
-
|
258 |
-
Also, if your dataset is large, you may want to end the training with a few thousands of steps with those layer unlocked. This usually improve the "problem-specific" solutions a little. You may try it yourself to feel the difference.
|
259 |
-
|
260 |
-
Also, if you unlock some original layers, you may want a lower learning rate, like 2e-6.
|
261 |
-
|
262 |
-
## More Consideration: Sudden Converge Phenomenon and Gradient Accumulation
|
263 |
-
|
264 |
-

|
265 |
-
|
266 |
-
Because we use zero convolutions, the SD should always be able to predict meaningful images. (If it cannot, the training has already failed.)
|
267 |
-
|
268 |
-
You will always find that at some iterations, the model "suddenly" be able to fit some training conditions. This means that you will get a basically usable model at about 3k to 7k steps (future training will improve it, but that model after the first "sudden converge" should be basically functional).
|
269 |
-
|
270 |
-
Note that 3k to 7k steps is not very large, and you should consider larger batch size rather than more training steps. If you can observe the "sudden converge" at 3k step using batch size 4, then, rather than train it with 300k further steps, a better idea is to use 100× gradient accumulation to re-train that 3k steps with 100× batch size. Note that perhaps we should not do this *too* extremely (perhaps 100x accumulation is too extreme), but you should consider that, since "sudden converge" will *always* happen at that certain point, getting a better converge is more important.
|
271 |
-
|
272 |
-
Because that "sudden converge" always happens, lets say "sudden converge" will happen at 3k step and our money can optimize 90k step, then we have two options: (1) train 3k steps, sudden converge, then train 87k steps. (2) 30x gradient accumulation, train 3k steps (90k real computation steps), then sudden converge.
|
273 |
-
|
274 |
-
In my experiments, (2) is usually better than (1). However, in real cases, perhaps you may need to balance the steps before and after the "sudden converge" on your own to find a balance. The training after "sudden converge" is also important.
|
275 |
-
|
276 |
-
But usually, if your logic batch size is already bigger than 256, then further extending the batch size is not very meaningful. In that case, perhaps a better idea is to train more steps. I tried some "common" logic batch size at 64 or 96 or 128 (by gradient accumulation), it seems that many complicated conditions can be solved very well already.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ariharasudhan/YoloV5/models/experimental.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Experimental modules
|
4 |
-
"""
|
5 |
-
import math
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
|
11 |
-
from utils.downloads import attempt_download
|
12 |
-
|
13 |
-
|
14 |
-
class Sum(nn.Module):
|
15 |
-
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
16 |
-
def __init__(self, n, weight=False): # n: number of inputs
|
17 |
-
super().__init__()
|
18 |
-
self.weight = weight # apply weights boolean
|
19 |
-
self.iter = range(n - 1) # iter object
|
20 |
-
if weight:
|
21 |
-
self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
|
22 |
-
|
23 |
-
def forward(self, x):
|
24 |
-
y = x[0] # no weight
|
25 |
-
if self.weight:
|
26 |
-
w = torch.sigmoid(self.w) * 2
|
27 |
-
for i in self.iter:
|
28 |
-
y = y + x[i + 1] * w[i]
|
29 |
-
else:
|
30 |
-
for i in self.iter:
|
31 |
-
y = y + x[i + 1]
|
32 |
-
return y
|
33 |
-
|
34 |
-
|
35 |
-
class MixConv2d(nn.Module):
|
36 |
-
# Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
|
37 |
-
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
|
38 |
-
super().__init__()
|
39 |
-
n = len(k) # number of convolutions
|
40 |
-
if equal_ch: # equal c_ per group
|
41 |
-
i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
|
42 |
-
c_ = [(i == g).sum() for g in range(n)] # intermediate channels
|
43 |
-
else: # equal weight.numel() per group
|
44 |
-
b = [c2] + [0] * n
|
45 |
-
a = np.eye(n + 1, n, k=-1)
|
46 |
-
a -= np.roll(a, 1, axis=1)
|
47 |
-
a *= np.array(k) ** 2
|
48 |
-
a[0] = 1
|
49 |
-
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
50 |
-
|
51 |
-
self.m = nn.ModuleList([
|
52 |
-
nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
|
53 |
-
self.bn = nn.BatchNorm2d(c2)
|
54 |
-
self.act = nn.SiLU()
|
55 |
-
|
56 |
-
def forward(self, x):
|
57 |
-
return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
58 |
-
|
59 |
-
|
60 |
-
class Ensemble(nn.ModuleList):
|
61 |
-
# Ensemble of models
|
62 |
-
def __init__(self):
|
63 |
-
super().__init__()
|
64 |
-
|
65 |
-
def forward(self, x, augment=False, profile=False, visualize=False):
|
66 |
-
y = [module(x, augment, profile, visualize)[0] for module in self]
|
67 |
-
# y = torch.stack(y).max(0)[0] # max ensemble
|
68 |
-
# y = torch.stack(y).mean(0) # mean ensemble
|
69 |
-
y = torch.cat(y, 1) # nms ensemble
|
70 |
-
return y, None # inference, train output
|
71 |
-
|
72 |
-
|
73 |
-
def attempt_load(weights, device=None, inplace=True, fuse=True):
|
74 |
-
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
75 |
-
from models.yolo import Detect, Model
|
76 |
-
|
77 |
-
model = Ensemble()
|
78 |
-
for w in weights if isinstance(weights, list) else [weights]:
|
79 |
-
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
|
80 |
-
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
|
81 |
-
|
82 |
-
# Model compatibility updates
|
83 |
-
if not hasattr(ckpt, 'stride'):
|
84 |
-
ckpt.stride = torch.tensor([32.])
|
85 |
-
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
|
86 |
-
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
|
87 |
-
|
88 |
-
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
|
89 |
-
|
90 |
-
# Module compatibility updates
|
91 |
-
for m in model.modules():
|
92 |
-
t = type(m)
|
93 |
-
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
|
94 |
-
m.inplace = inplace # torch 1.7.0 compatibility
|
95 |
-
if t is Detect and not isinstance(m.anchor_grid, list):
|
96 |
-
delattr(m, 'anchor_grid')
|
97 |
-
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
|
98 |
-
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
|
99 |
-
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
100 |
-
|
101 |
-
# Return model
|
102 |
-
if len(model) == 1:
|
103 |
-
return model[-1]
|
104 |
-
|
105 |
-
# Return detection ensemble
|
106 |
-
print(f'Ensemble created with {weights}\n')
|
107 |
-
for k in 'names', 'nc', 'yaml':
|
108 |
-
setattr(model, k, getattr(model[0], k))
|
109 |
-
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
|
110 |
-
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
|
111 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asahi402/Real-CUGAN/app.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
from upcunet_v3 import RealWaifuUpScaler
|
2 |
-
import gradio as gr
|
3 |
-
import time
|
4 |
-
import logging
|
5 |
-
import os
|
6 |
-
from PIL import ImageOps
|
7 |
-
import numpy as np
|
8 |
-
import math
|
9 |
-
|
10 |
-
|
11 |
-
def greet(input_img, input_model_name, input_tile_mode):
|
12 |
-
# if input_img.size[0] * input_img.size[1] > 256 * 256:
|
13 |
-
# y = int(math.sqrt(256*256/input_img.size[0]*input_img.size[1]))
|
14 |
-
# x = int(input_img.size[0]/input_img.size[1]*y)
|
15 |
-
# input_img = ImageOps.fit(input_img, (x, y))
|
16 |
-
input_img = np.array(input_img)
|
17 |
-
if input_model_name not in model_cache:
|
18 |
-
t1 = time.time()
|
19 |
-
upscaler = RealWaifuUpScaler(input_model_name[2], ModelPath + input_model_name, half=False, device="cpu")
|
20 |
-
t2 = time.time()
|
21 |
-
logger.info(f'load model time, {t2 - t1}')
|
22 |
-
model_cache[input_model_name] = upscaler
|
23 |
-
else:
|
24 |
-
upscaler = model_cache[input_model_name]
|
25 |
-
logger.info(f'load model from cache')
|
26 |
-
|
27 |
-
start = time.time()
|
28 |
-
result = upscaler(input_img, tile_mode=input_tile_mode)
|
29 |
-
end = time.time()
|
30 |
-
logger.info(f'input_model_name, {input_model_name}')
|
31 |
-
logger.info(f'input_tile_mode, {input_tile_mode}')
|
32 |
-
logger.info(f'input shape, {input_img.shape}')
|
33 |
-
logger.info(f'output shape, {result.shape}')
|
34 |
-
logger.info(f'speed time, {end - start}')
|
35 |
-
return result
|
36 |
-
|
37 |
-
|
38 |
-
if __name__ == '__main__':
|
39 |
-
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s")
|
40 |
-
logger = logging.getLogger()
|
41 |
-
|
42 |
-
ModelPath = "weights_v3/"
|
43 |
-
model_cache = {}
|
44 |
-
|
45 |
-
input_model_name = gr.inputs.Dropdown(os.listdir(ModelPath), default="up2x-latest-denoise2x.pth", label='选择model')
|
46 |
-
input_tile_mode = gr.inputs.Dropdown([0, 1, 2, 3, 4], default=2, label='选择tile_mode')
|
47 |
-
input_img = gr.inputs.Image(label='image', type='pil')
|
48 |
-
|
49 |
-
inputs = [input_img, input_model_name, input_tile_mode]
|
50 |
-
outputs = "image"
|
51 |
-
iface = gr.Interface(fn=greet,
|
52 |
-
inputs=inputs,
|
53 |
-
outputs=outputs,
|
54 |
-
allow_screenshot=False,
|
55 |
-
allow_flagging='never',
|
56 |
-
examples=[['test-img.jpg', "up2x-latest-denoise2x.pth", 2]],
|
57 |
-
article='[https://github.com/bilibili/ailab/tree/main/Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN)<br>'
|
58 |
-
'感谢b站开源的项目,图片过大会导致内存不足,所有我将图片裁剪小,想体验大图片的效果请自行前往上面的链接。<br>'
|
59 |
-
'修改bbb'
|
60 |
-
'The large image will lead to memory limit exceeded. So I crop and resize image. '
|
61 |
-
'If you want to experience the large image, please go to the link above.')
|
62 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/index.py
DELETED
@@ -1,508 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
#
|
3 |
-
# Copyright (C) 2013 Vinay Sajip.
|
4 |
-
# Licensed to the Python Software Foundation under a contributor agreement.
|
5 |
-
# See LICENSE.txt and CONTRIBUTORS.txt.
|
6 |
-
#
|
7 |
-
import hashlib
|
8 |
-
import logging
|
9 |
-
import os
|
10 |
-
import shutil
|
11 |
-
import subprocess
|
12 |
-
import tempfile
|
13 |
-
try:
|
14 |
-
from threading import Thread
|
15 |
-
except ImportError: # pragma: no cover
|
16 |
-
from dummy_threading import Thread
|
17 |
-
|
18 |
-
from . import DistlibException
|
19 |
-
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
|
20 |
-
urlparse, build_opener, string_types)
|
21 |
-
from .util import zip_dir, ServerProxy
|
22 |
-
|
23 |
-
logger = logging.getLogger(__name__)
|
24 |
-
|
25 |
-
DEFAULT_INDEX = 'https://pypi.org/pypi'
|
26 |
-
DEFAULT_REALM = 'pypi'
|
27 |
-
|
28 |
-
class PackageIndex(object):
|
29 |
-
"""
|
30 |
-
This class represents a package index compatible with PyPI, the Python
|
31 |
-
Package Index.
|
32 |
-
"""
|
33 |
-
|
34 |
-
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
|
35 |
-
|
36 |
-
def __init__(self, url=None):
|
37 |
-
"""
|
38 |
-
Initialise an instance.
|
39 |
-
|
40 |
-
:param url: The URL of the index. If not specified, the URL for PyPI is
|
41 |
-
used.
|
42 |
-
"""
|
43 |
-
self.url = url or DEFAULT_INDEX
|
44 |
-
self.read_configuration()
|
45 |
-
scheme, netloc, path, params, query, frag = urlparse(self.url)
|
46 |
-
if params or query or frag or scheme not in ('http', 'https'):
|
47 |
-
raise DistlibException('invalid repository: %s' % self.url)
|
48 |
-
self.password_handler = None
|
49 |
-
self.ssl_verifier = None
|
50 |
-
self.gpg = None
|
51 |
-
self.gpg_home = None
|
52 |
-
with open(os.devnull, 'w') as sink:
|
53 |
-
# Use gpg by default rather than gpg2, as gpg2 insists on
|
54 |
-
# prompting for passwords
|
55 |
-
for s in ('gpg', 'gpg2'):
|
56 |
-
try:
|
57 |
-
rc = subprocess.check_call([s, '--version'], stdout=sink,
|
58 |
-
stderr=sink)
|
59 |
-
if rc == 0:
|
60 |
-
self.gpg = s
|
61 |
-
break
|
62 |
-
except OSError:
|
63 |
-
pass
|
64 |
-
|
65 |
-
def _get_pypirc_command(self):
|
66 |
-
"""
|
67 |
-
Get the distutils command for interacting with PyPI configurations.
|
68 |
-
:return: the command.
|
69 |
-
"""
|
70 |
-
from .util import _get_pypirc_command as cmd
|
71 |
-
return cmd()
|
72 |
-
|
73 |
-
def read_configuration(self):
|
74 |
-
"""
|
75 |
-
Read the PyPI access configuration as supported by distutils. This populates
|
76 |
-
``username``, ``password``, ``realm`` and ``url`` attributes from the
|
77 |
-
configuration.
|
78 |
-
"""
|
79 |
-
from .util import _load_pypirc
|
80 |
-
cfg = _load_pypirc(self)
|
81 |
-
self.username = cfg.get('username')
|
82 |
-
self.password = cfg.get('password')
|
83 |
-
self.realm = cfg.get('realm', 'pypi')
|
84 |
-
self.url = cfg.get('repository', self.url)
|
85 |
-
|
86 |
-
def save_configuration(self):
|
87 |
-
"""
|
88 |
-
Save the PyPI access configuration. You must have set ``username`` and
|
89 |
-
``password`` attributes before calling this method.
|
90 |
-
"""
|
91 |
-
self.check_credentials()
|
92 |
-
from .util import _store_pypirc
|
93 |
-
_store_pypirc(self)
|
94 |
-
|
95 |
-
def check_credentials(self):
|
96 |
-
"""
|
97 |
-
Check that ``username`` and ``password`` have been set, and raise an
|
98 |
-
exception if not.
|
99 |
-
"""
|
100 |
-
if self.username is None or self.password is None:
|
101 |
-
raise DistlibException('username and password must be set')
|
102 |
-
pm = HTTPPasswordMgr()
|
103 |
-
_, netloc, _, _, _, _ = urlparse(self.url)
|
104 |
-
pm.add_password(self.realm, netloc, self.username, self.password)
|
105 |
-
self.password_handler = HTTPBasicAuthHandler(pm)
|
106 |
-
|
107 |
-
def register(self, metadata): # pragma: no cover
|
108 |
-
"""
|
109 |
-
Register a distribution on PyPI, using the provided metadata.
|
110 |
-
|
111 |
-
:param metadata: A :class:`Metadata` instance defining at least a name
|
112 |
-
and version number for the distribution to be
|
113 |
-
registered.
|
114 |
-
:return: The HTTP response received from PyPI upon submission of the
|
115 |
-
request.
|
116 |
-
"""
|
117 |
-
self.check_credentials()
|
118 |
-
metadata.validate()
|
119 |
-
d = metadata.todict()
|
120 |
-
d[':action'] = 'verify'
|
121 |
-
request = self.encode_request(d.items(), [])
|
122 |
-
response = self.send_request(request)
|
123 |
-
d[':action'] = 'submit'
|
124 |
-
request = self.encode_request(d.items(), [])
|
125 |
-
return self.send_request(request)
|
126 |
-
|
127 |
-
def _reader(self, name, stream, outbuf):
|
128 |
-
"""
|
129 |
-
Thread runner for reading lines of from a subprocess into a buffer.
|
130 |
-
|
131 |
-
:param name: The logical name of the stream (used for logging only).
|
132 |
-
:param stream: The stream to read from. This will typically a pipe
|
133 |
-
connected to the output stream of a subprocess.
|
134 |
-
:param outbuf: The list to append the read lines to.
|
135 |
-
"""
|
136 |
-
while True:
|
137 |
-
s = stream.readline()
|
138 |
-
if not s:
|
139 |
-
break
|
140 |
-
s = s.decode('utf-8').rstrip()
|
141 |
-
outbuf.append(s)
|
142 |
-
logger.debug('%s: %s' % (name, s))
|
143 |
-
stream.close()
|
144 |
-
|
145 |
-
def get_sign_command(self, filename, signer, sign_password, keystore=None): # pragma: no cover
|
146 |
-
"""
|
147 |
-
Return a suitable command for signing a file.
|
148 |
-
|
149 |
-
:param filename: The pathname to the file to be signed.
|
150 |
-
:param signer: The identifier of the signer of the file.
|
151 |
-
:param sign_password: The passphrase for the signer's
|
152 |
-
private key used for signing.
|
153 |
-
:param keystore: The path to a directory which contains the keys
|
154 |
-
used in verification. If not specified, the
|
155 |
-
instance's ``gpg_home`` attribute is used instead.
|
156 |
-
:return: The signing command as a list suitable to be
|
157 |
-
passed to :class:`subprocess.Popen`.
|
158 |
-
"""
|
159 |
-
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
|
160 |
-
if keystore is None:
|
161 |
-
keystore = self.gpg_home
|
162 |
-
if keystore:
|
163 |
-
cmd.extend(['--homedir', keystore])
|
164 |
-
if sign_password is not None:
|
165 |
-
cmd.extend(['--batch', '--passphrase-fd', '0'])
|
166 |
-
td = tempfile.mkdtemp()
|
167 |
-
sf = os.path.join(td, os.path.basename(filename) + '.asc')
|
168 |
-
cmd.extend(['--detach-sign', '--armor', '--local-user',
|
169 |
-
signer, '--output', sf, filename])
|
170 |
-
logger.debug('invoking: %s', ' '.join(cmd))
|
171 |
-
return cmd, sf
|
172 |
-
|
173 |
-
def run_command(self, cmd, input_data=None):
|
174 |
-
"""
|
175 |
-
Run a command in a child process , passing it any input data specified.
|
176 |
-
|
177 |
-
:param cmd: The command to run.
|
178 |
-
:param input_data: If specified, this must be a byte string containing
|
179 |
-
data to be sent to the child process.
|
180 |
-
:return: A tuple consisting of the subprocess' exit code, a list of
|
181 |
-
lines read from the subprocess' ``stdout``, and a list of
|
182 |
-
lines read from the subprocess' ``stderr``.
|
183 |
-
"""
|
184 |
-
kwargs = {
|
185 |
-
'stdout': subprocess.PIPE,
|
186 |
-
'stderr': subprocess.PIPE,
|
187 |
-
}
|
188 |
-
if input_data is not None:
|
189 |
-
kwargs['stdin'] = subprocess.PIPE
|
190 |
-
stdout = []
|
191 |
-
stderr = []
|
192 |
-
p = subprocess.Popen(cmd, **kwargs)
|
193 |
-
# We don't use communicate() here because we may need to
|
194 |
-
# get clever with interacting with the command
|
195 |
-
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
|
196 |
-
t1.start()
|
197 |
-
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
|
198 |
-
t2.start()
|
199 |
-
if input_data is not None:
|
200 |
-
p.stdin.write(input_data)
|
201 |
-
p.stdin.close()
|
202 |
-
|
203 |
-
p.wait()
|
204 |
-
t1.join()
|
205 |
-
t2.join()
|
206 |
-
return p.returncode, stdout, stderr
|
207 |
-
|
208 |
-
def sign_file(self, filename, signer, sign_password, keystore=None): # pragma: no cover
|
209 |
-
"""
|
210 |
-
Sign a file.
|
211 |
-
|
212 |
-
:param filename: The pathname to the file to be signed.
|
213 |
-
:param signer: The identifier of the signer of the file.
|
214 |
-
:param sign_password: The passphrase for the signer's
|
215 |
-
private key used for signing.
|
216 |
-
:param keystore: The path to a directory which contains the keys
|
217 |
-
used in signing. If not specified, the instance's
|
218 |
-
``gpg_home`` attribute is used instead.
|
219 |
-
:return: The absolute pathname of the file where the signature is
|
220 |
-
stored.
|
221 |
-
"""
|
222 |
-
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
|
223 |
-
keystore)
|
224 |
-
rc, stdout, stderr = self.run_command(cmd,
|
225 |
-
sign_password.encode('utf-8'))
|
226 |
-
if rc != 0:
|
227 |
-
raise DistlibException('sign command failed with error '
|
228 |
-
'code %s' % rc)
|
229 |
-
return sig_file
|
230 |
-
|
231 |
-
def upload_file(self, metadata, filename, signer=None, sign_password=None,
|
232 |
-
filetype='sdist', pyversion='source', keystore=None):
|
233 |
-
"""
|
234 |
-
Upload a release file to the index.
|
235 |
-
|
236 |
-
:param metadata: A :class:`Metadata` instance defining at least a name
|
237 |
-
and version number for the file to be uploaded.
|
238 |
-
:param filename: The pathname of the file to be uploaded.
|
239 |
-
:param signer: The identifier of the signer of the file.
|
240 |
-
:param sign_password: The passphrase for the signer's
|
241 |
-
private key used for signing.
|
242 |
-
:param filetype: The type of the file being uploaded. This is the
|
243 |
-
distutils command which produced that file, e.g.
|
244 |
-
``sdist`` or ``bdist_wheel``.
|
245 |
-
:param pyversion: The version of Python which the release relates
|
246 |
-
to. For code compatible with any Python, this would
|
247 |
-
be ``source``, otherwise it would be e.g. ``3.2``.
|
248 |
-
:param keystore: The path to a directory which contains the keys
|
249 |
-
used in signing. If not specified, the instance's
|
250 |
-
``gpg_home`` attribute is used instead.
|
251 |
-
:return: The HTTP response received from PyPI upon submission of the
|
252 |
-
request.
|
253 |
-
"""
|
254 |
-
self.check_credentials()
|
255 |
-
if not os.path.exists(filename):
|
256 |
-
raise DistlibException('not found: %s' % filename)
|
257 |
-
metadata.validate()
|
258 |
-
d = metadata.todict()
|
259 |
-
sig_file = None
|
260 |
-
if signer:
|
261 |
-
if not self.gpg:
|
262 |
-
logger.warning('no signing program available - not signed')
|
263 |
-
else:
|
264 |
-
sig_file = self.sign_file(filename, signer, sign_password,
|
265 |
-
keystore)
|
266 |
-
with open(filename, 'rb') as f:
|
267 |
-
file_data = f.read()
|
268 |
-
md5_digest = hashlib.md5(file_data).hexdigest()
|
269 |
-
sha256_digest = hashlib.sha256(file_data).hexdigest()
|
270 |
-
d.update({
|
271 |
-
':action': 'file_upload',
|
272 |
-
'protocol_version': '1',
|
273 |
-
'filetype': filetype,
|
274 |
-
'pyversion': pyversion,
|
275 |
-
'md5_digest': md5_digest,
|
276 |
-
'sha256_digest': sha256_digest,
|
277 |
-
})
|
278 |
-
files = [('content', os.path.basename(filename), file_data)]
|
279 |
-
if sig_file:
|
280 |
-
with open(sig_file, 'rb') as f:
|
281 |
-
sig_data = f.read()
|
282 |
-
files.append(('gpg_signature', os.path.basename(sig_file),
|
283 |
-
sig_data))
|
284 |
-
shutil.rmtree(os.path.dirname(sig_file))
|
285 |
-
request = self.encode_request(d.items(), files)
|
286 |
-
return self.send_request(request)
|
287 |
-
|
288 |
-
def upload_documentation(self, metadata, doc_dir): # pragma: no cover
|
289 |
-
"""
|
290 |
-
Upload documentation to the index.
|
291 |
-
|
292 |
-
:param metadata: A :class:`Metadata` instance defining at least a name
|
293 |
-
and version number for the documentation to be
|
294 |
-
uploaded.
|
295 |
-
:param doc_dir: The pathname of the directory which contains the
|
296 |
-
documentation. This should be the directory that
|
297 |
-
contains the ``index.html`` for the documentation.
|
298 |
-
:return: The HTTP response received from PyPI upon submission of the
|
299 |
-
request.
|
300 |
-
"""
|
301 |
-
self.check_credentials()
|
302 |
-
if not os.path.isdir(doc_dir):
|
303 |
-
raise DistlibException('not a directory: %r' % doc_dir)
|
304 |
-
fn = os.path.join(doc_dir, 'index.html')
|
305 |
-
if not os.path.exists(fn):
|
306 |
-
raise DistlibException('not found: %r' % fn)
|
307 |
-
metadata.validate()
|
308 |
-
name, version = metadata.name, metadata.version
|
309 |
-
zip_data = zip_dir(doc_dir).getvalue()
|
310 |
-
fields = [(':action', 'doc_upload'),
|
311 |
-
('name', name), ('version', version)]
|
312 |
-
files = [('content', name, zip_data)]
|
313 |
-
request = self.encode_request(fields, files)
|
314 |
-
return self.send_request(request)
|
315 |
-
|
316 |
-
def get_verify_command(self, signature_filename, data_filename,
|
317 |
-
keystore=None):
|
318 |
-
"""
|
319 |
-
Return a suitable command for verifying a file.
|
320 |
-
|
321 |
-
:param signature_filename: The pathname to the file containing the
|
322 |
-
signature.
|
323 |
-
:param data_filename: The pathname to the file containing the
|
324 |
-
signed data.
|
325 |
-
:param keystore: The path to a directory which contains the keys
|
326 |
-
used in verification. If not specified, the
|
327 |
-
instance's ``gpg_home`` attribute is used instead.
|
328 |
-
:return: The verifying command as a list suitable to be
|
329 |
-
passed to :class:`subprocess.Popen`.
|
330 |
-
"""
|
331 |
-
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
|
332 |
-
if keystore is None:
|
333 |
-
keystore = self.gpg_home
|
334 |
-
if keystore:
|
335 |
-
cmd.extend(['--homedir', keystore])
|
336 |
-
cmd.extend(['--verify', signature_filename, data_filename])
|
337 |
-
logger.debug('invoking: %s', ' '.join(cmd))
|
338 |
-
return cmd
|
339 |
-
|
340 |
-
def verify_signature(self, signature_filename, data_filename,
|
341 |
-
keystore=None):
|
342 |
-
"""
|
343 |
-
Verify a signature for a file.
|
344 |
-
|
345 |
-
:param signature_filename: The pathname to the file containing the
|
346 |
-
signature.
|
347 |
-
:param data_filename: The pathname to the file containing the
|
348 |
-
signed data.
|
349 |
-
:param keystore: The path to a directory which contains the keys
|
350 |
-
used in verification. If not specified, the
|
351 |
-
instance's ``gpg_home`` attribute is used instead.
|
352 |
-
:return: True if the signature was verified, else False.
|
353 |
-
"""
|
354 |
-
if not self.gpg:
|
355 |
-
raise DistlibException('verification unavailable because gpg '
|
356 |
-
'unavailable')
|
357 |
-
cmd = self.get_verify_command(signature_filename, data_filename,
|
358 |
-
keystore)
|
359 |
-
rc, stdout, stderr = self.run_command(cmd)
|
360 |
-
if rc not in (0, 1):
|
361 |
-
raise DistlibException('verify command failed with error '
|
362 |
-
'code %s' % rc)
|
363 |
-
return rc == 0
|
364 |
-
|
365 |
-
def download_file(self, url, destfile, digest=None, reporthook=None):
|
366 |
-
"""
|
367 |
-
This is a convenience method for downloading a file from an URL.
|
368 |
-
Normally, this will be a file from the index, though currently
|
369 |
-
no check is made for this (i.e. a file can be downloaded from
|
370 |
-
anywhere).
|
371 |
-
|
372 |
-
The method is just like the :func:`urlretrieve` function in the
|
373 |
-
standard library, except that it allows digest computation to be
|
374 |
-
done during download and checking that the downloaded data
|
375 |
-
matched any expected value.
|
376 |
-
|
377 |
-
:param url: The URL of the file to be downloaded (assumed to be
|
378 |
-
available via an HTTP GET request).
|
379 |
-
:param destfile: The pathname where the downloaded file is to be
|
380 |
-
saved.
|
381 |
-
:param digest: If specified, this must be a (hasher, value)
|
382 |
-
tuple, where hasher is the algorithm used (e.g.
|
383 |
-
``'md5'``) and ``value`` is the expected value.
|
384 |
-
:param reporthook: The same as for :func:`urlretrieve` in the
|
385 |
-
standard library.
|
386 |
-
"""
|
387 |
-
if digest is None:
|
388 |
-
digester = None
|
389 |
-
logger.debug('No digest specified')
|
390 |
-
else:
|
391 |
-
if isinstance(digest, (list, tuple)):
|
392 |
-
hasher, digest = digest
|
393 |
-
else:
|
394 |
-
hasher = 'md5'
|
395 |
-
digester = getattr(hashlib, hasher)()
|
396 |
-
logger.debug('Digest specified: %s' % digest)
|
397 |
-
# The following code is equivalent to urlretrieve.
|
398 |
-
# We need to do it this way so that we can compute the
|
399 |
-
# digest of the file as we go.
|
400 |
-
with open(destfile, 'wb') as dfp:
|
401 |
-
# addinfourl is not a context manager on 2.x
|
402 |
-
# so we have to use try/finally
|
403 |
-
sfp = self.send_request(Request(url))
|
404 |
-
try:
|
405 |
-
headers = sfp.info()
|
406 |
-
blocksize = 8192
|
407 |
-
size = -1
|
408 |
-
read = 0
|
409 |
-
blocknum = 0
|
410 |
-
if "content-length" in headers:
|
411 |
-
size = int(headers["Content-Length"])
|
412 |
-
if reporthook:
|
413 |
-
reporthook(blocknum, blocksize, size)
|
414 |
-
while True:
|
415 |
-
block = sfp.read(blocksize)
|
416 |
-
if not block:
|
417 |
-
break
|
418 |
-
read += len(block)
|
419 |
-
dfp.write(block)
|
420 |
-
if digester:
|
421 |
-
digester.update(block)
|
422 |
-
blocknum += 1
|
423 |
-
if reporthook:
|
424 |
-
reporthook(blocknum, blocksize, size)
|
425 |
-
finally:
|
426 |
-
sfp.close()
|
427 |
-
|
428 |
-
# check that we got the whole file, if we can
|
429 |
-
if size >= 0 and read < size:
|
430 |
-
raise DistlibException(
|
431 |
-
'retrieval incomplete: got only %d out of %d bytes'
|
432 |
-
% (read, size))
|
433 |
-
# if we have a digest, it must match.
|
434 |
-
if digester:
|
435 |
-
actual = digester.hexdigest()
|
436 |
-
if digest != actual:
|
437 |
-
raise DistlibException('%s digest mismatch for %s: expected '
|
438 |
-
'%s, got %s' % (hasher, destfile,
|
439 |
-
digest, actual))
|
440 |
-
logger.debug('Digest verified: %s', digest)
|
441 |
-
|
442 |
-
def send_request(self, req):
|
443 |
-
"""
|
444 |
-
Send a standard library :class:`Request` to PyPI and return its
|
445 |
-
response.
|
446 |
-
|
447 |
-
:param req: The request to send.
|
448 |
-
:return: The HTTP response from PyPI (a standard library HTTPResponse).
|
449 |
-
"""
|
450 |
-
handlers = []
|
451 |
-
if self.password_handler:
|
452 |
-
handlers.append(self.password_handler)
|
453 |
-
if self.ssl_verifier:
|
454 |
-
handlers.append(self.ssl_verifier)
|
455 |
-
opener = build_opener(*handlers)
|
456 |
-
return opener.open(req)
|
457 |
-
|
458 |
-
def encode_request(self, fields, files):
|
459 |
-
"""
|
460 |
-
Encode fields and files for posting to an HTTP server.
|
461 |
-
|
462 |
-
:param fields: The fields to send as a list of (fieldname, value)
|
463 |
-
tuples.
|
464 |
-
:param files: The files to send as a list of (fieldname, filename,
|
465 |
-
file_bytes) tuple.
|
466 |
-
"""
|
467 |
-
# Adapted from packaging, which in turn was adapted from
|
468 |
-
# http://code.activestate.com/recipes/146306
|
469 |
-
|
470 |
-
parts = []
|
471 |
-
boundary = self.boundary
|
472 |
-
for k, values in fields:
|
473 |
-
if not isinstance(values, (list, tuple)):
|
474 |
-
values = [values]
|
475 |
-
|
476 |
-
for v in values:
|
477 |
-
parts.extend((
|
478 |
-
b'--' + boundary,
|
479 |
-
('Content-Disposition: form-data; name="%s"' %
|
480 |
-
k).encode('utf-8'),
|
481 |
-
b'',
|
482 |
-
v.encode('utf-8')))
|
483 |
-
for key, filename, value in files:
|
484 |
-
parts.extend((
|
485 |
-
b'--' + boundary,
|
486 |
-
('Content-Disposition: form-data; name="%s"; filename="%s"' %
|
487 |
-
(key, filename)).encode('utf-8'),
|
488 |
-
b'',
|
489 |
-
value))
|
490 |
-
|
491 |
-
parts.extend((b'--' + boundary + b'--', b''))
|
492 |
-
|
493 |
-
body = b'\r\n'.join(parts)
|
494 |
-
ct = b'multipart/form-data; boundary=' + boundary
|
495 |
-
headers = {
|
496 |
-
'Content-type': ct,
|
497 |
-
'Content-length': str(len(body))
|
498 |
-
}
|
499 |
-
return Request(self.url, body, headers)
|
500 |
-
|
501 |
-
def search(self, terms, operator=None): # pragma: no cover
|
502 |
-
if isinstance(terms, string_types):
|
503 |
-
terms = {'name': terms}
|
504 |
-
rpc_proxy = ServerProxy(self.url, timeout=3.0)
|
505 |
-
try:
|
506 |
-
return rpc_proxy.search(terms, operator or 'and')
|
507 |
-
finally:
|
508 |
-
rpc_proxy('close')()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal256.py
DELETED
@@ -1,338 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.formatters.terminal256
|
3 |
-
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Formatter for 256-color terminal output with ANSI sequences.
|
6 |
-
|
7 |
-
RGB-to-XTERM color conversion routines adapted from xterm256-conv
|
8 |
-
tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
|
9 |
-
by Wolfgang Frisch.
|
10 |
-
|
11 |
-
Formatter version 1.
|
12 |
-
|
13 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
14 |
-
:license: BSD, see LICENSE for details.
|
15 |
-
"""
|
16 |
-
|
17 |
-
# TODO:
|
18 |
-
# - Options to map style's bold/underline/italic/border attributes
|
19 |
-
# to some ANSI attrbutes (something like 'italic=underline')
|
20 |
-
# - An option to output "style RGB to xterm RGB/index" conversion table
|
21 |
-
# - An option to indicate that we are running in "reverse background"
|
22 |
-
# xterm. This means that default colors are white-on-black, not
|
23 |
-
# black-on-while, so colors like "white background" need to be converted
|
24 |
-
# to "white background, black foreground", etc...
|
25 |
-
|
26 |
-
from pip._vendor.pygments.formatter import Formatter
|
27 |
-
from pip._vendor.pygments.console import codes
|
28 |
-
from pip._vendor.pygments.style import ansicolors
|
29 |
-
|
30 |
-
|
31 |
-
__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
|
32 |
-
|
33 |
-
|
34 |
-
class EscapeSequence:
|
35 |
-
def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
|
36 |
-
self.fg = fg
|
37 |
-
self.bg = bg
|
38 |
-
self.bold = bold
|
39 |
-
self.underline = underline
|
40 |
-
self.italic = italic
|
41 |
-
|
42 |
-
def escape(self, attrs):
|
43 |
-
if len(attrs):
|
44 |
-
return "\x1b[" + ";".join(attrs) + "m"
|
45 |
-
return ""
|
46 |
-
|
47 |
-
def color_string(self):
|
48 |
-
attrs = []
|
49 |
-
if self.fg is not None:
|
50 |
-
if self.fg in ansicolors:
|
51 |
-
esc = codes[self.fg.replace('ansi','')]
|
52 |
-
if ';01m' in esc:
|
53 |
-
self.bold = True
|
54 |
-
# extract fg color code.
|
55 |
-
attrs.append(esc[2:4])
|
56 |
-
else:
|
57 |
-
attrs.extend(("38", "5", "%i" % self.fg))
|
58 |
-
if self.bg is not None:
|
59 |
-
if self.bg in ansicolors:
|
60 |
-
esc = codes[self.bg.replace('ansi','')]
|
61 |
-
# extract fg color code, add 10 for bg.
|
62 |
-
attrs.append(str(int(esc[2:4])+10))
|
63 |
-
else:
|
64 |
-
attrs.extend(("48", "5", "%i" % self.bg))
|
65 |
-
if self.bold:
|
66 |
-
attrs.append("01")
|
67 |
-
if self.underline:
|
68 |
-
attrs.append("04")
|
69 |
-
if self.italic:
|
70 |
-
attrs.append("03")
|
71 |
-
return self.escape(attrs)
|
72 |
-
|
73 |
-
def true_color_string(self):
|
74 |
-
attrs = []
|
75 |
-
if self.fg:
|
76 |
-
attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
|
77 |
-
if self.bg:
|
78 |
-
attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
|
79 |
-
if self.bold:
|
80 |
-
attrs.append("01")
|
81 |
-
if self.underline:
|
82 |
-
attrs.append("04")
|
83 |
-
if self.italic:
|
84 |
-
attrs.append("03")
|
85 |
-
return self.escape(attrs)
|
86 |
-
|
87 |
-
def reset_string(self):
|
88 |
-
attrs = []
|
89 |
-
if self.fg is not None:
|
90 |
-
attrs.append("39")
|
91 |
-
if self.bg is not None:
|
92 |
-
attrs.append("49")
|
93 |
-
if self.bold or self.underline or self.italic:
|
94 |
-
attrs.append("00")
|
95 |
-
return self.escape(attrs)
|
96 |
-
|
97 |
-
|
98 |
-
class Terminal256Formatter(Formatter):
|
99 |
-
"""
|
100 |
-
Format tokens with ANSI color sequences, for output in a 256-color
|
101 |
-
terminal or console. Like in `TerminalFormatter` color sequences
|
102 |
-
are terminated at newlines, so that paging the output works correctly.
|
103 |
-
|
104 |
-
The formatter takes colors from a style defined by the `style` option
|
105 |
-
and converts them to nearest ANSI 256-color escape sequences. Bold and
|
106 |
-
underline attributes from the style are preserved (and displayed).
|
107 |
-
|
108 |
-
.. versionadded:: 0.9
|
109 |
-
|
110 |
-
.. versionchanged:: 2.2
|
111 |
-
If the used style defines foreground colors in the form ``#ansi*``, then
|
112 |
-
`Terminal256Formatter` will map these to non extended foreground color.
|
113 |
-
See :ref:`AnsiTerminalStyle` for more information.
|
114 |
-
|
115 |
-
.. versionchanged:: 2.4
|
116 |
-
The ANSI color names have been updated with names that are easier to
|
117 |
-
understand and align with colornames of other projects and terminals.
|
118 |
-
See :ref:`this table <new-ansi-color-names>` for more information.
|
119 |
-
|
120 |
-
|
121 |
-
Options accepted:
|
122 |
-
|
123 |
-
`style`
|
124 |
-
The style to use, can be a string or a Style subclass (default:
|
125 |
-
``'default'``).
|
126 |
-
|
127 |
-
`linenos`
|
128 |
-
Set to ``True`` to have line numbers on the terminal output as well
|
129 |
-
(default: ``False`` = no line numbers).
|
130 |
-
"""
|
131 |
-
name = 'Terminal256'
|
132 |
-
aliases = ['terminal256', 'console256', '256']
|
133 |
-
filenames = []
|
134 |
-
|
135 |
-
def __init__(self, **options):
|
136 |
-
Formatter.__init__(self, **options)
|
137 |
-
|
138 |
-
self.xterm_colors = []
|
139 |
-
self.best_match = {}
|
140 |
-
self.style_string = {}
|
141 |
-
|
142 |
-
self.usebold = 'nobold' not in options
|
143 |
-
self.useunderline = 'nounderline' not in options
|
144 |
-
self.useitalic = 'noitalic' not in options
|
145 |
-
|
146 |
-
self._build_color_table() # build an RGB-to-256 color conversion table
|
147 |
-
self._setup_styles() # convert selected style's colors to term. colors
|
148 |
-
|
149 |
-
self.linenos = options.get('linenos', False)
|
150 |
-
self._lineno = 0
|
151 |
-
|
152 |
-
def _build_color_table(self):
|
153 |
-
# colors 0..15: 16 basic colors
|
154 |
-
|
155 |
-
self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
|
156 |
-
self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
|
157 |
-
self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
|
158 |
-
self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
|
159 |
-
self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
|
160 |
-
self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
|
161 |
-
self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
|
162 |
-
self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
|
163 |
-
self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
|
164 |
-
self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
|
165 |
-
self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
|
166 |
-
self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
|
167 |
-
self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
|
168 |
-
self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
|
169 |
-
self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
|
170 |
-
self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
|
171 |
-
|
172 |
-
# colors 16..232: the 6x6x6 color cube
|
173 |
-
|
174 |
-
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
|
175 |
-
|
176 |
-
for i in range(217):
|
177 |
-
r = valuerange[(i // 36) % 6]
|
178 |
-
g = valuerange[(i // 6) % 6]
|
179 |
-
b = valuerange[i % 6]
|
180 |
-
self.xterm_colors.append((r, g, b))
|
181 |
-
|
182 |
-
# colors 233..253: grayscale
|
183 |
-
|
184 |
-
for i in range(1, 22):
|
185 |
-
v = 8 + i * 10
|
186 |
-
self.xterm_colors.append((v, v, v))
|
187 |
-
|
188 |
-
def _closest_color(self, r, g, b):
|
189 |
-
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
|
190 |
-
match = 0
|
191 |
-
|
192 |
-
for i in range(0, 254):
|
193 |
-
values = self.xterm_colors[i]
|
194 |
-
|
195 |
-
rd = r - values[0]
|
196 |
-
gd = g - values[1]
|
197 |
-
bd = b - values[2]
|
198 |
-
d = rd*rd + gd*gd + bd*bd
|
199 |
-
|
200 |
-
if d < distance:
|
201 |
-
match = i
|
202 |
-
distance = d
|
203 |
-
return match
|
204 |
-
|
205 |
-
def _color_index(self, color):
|
206 |
-
index = self.best_match.get(color, None)
|
207 |
-
if color in ansicolors:
|
208 |
-
# strip the `ansi/#ansi` part and look up code
|
209 |
-
index = color
|
210 |
-
self.best_match[color] = index
|
211 |
-
if index is None:
|
212 |
-
try:
|
213 |
-
rgb = int(str(color), 16)
|
214 |
-
except ValueError:
|
215 |
-
rgb = 0
|
216 |
-
|
217 |
-
r = (rgb >> 16) & 0xff
|
218 |
-
g = (rgb >> 8) & 0xff
|
219 |
-
b = rgb & 0xff
|
220 |
-
index = self._closest_color(r, g, b)
|
221 |
-
self.best_match[color] = index
|
222 |
-
return index
|
223 |
-
|
224 |
-
def _setup_styles(self):
|
225 |
-
for ttype, ndef in self.style:
|
226 |
-
escape = EscapeSequence()
|
227 |
-
# get foreground from ansicolor if set
|
228 |
-
if ndef['ansicolor']:
|
229 |
-
escape.fg = self._color_index(ndef['ansicolor'])
|
230 |
-
elif ndef['color']:
|
231 |
-
escape.fg = self._color_index(ndef['color'])
|
232 |
-
if ndef['bgansicolor']:
|
233 |
-
escape.bg = self._color_index(ndef['bgansicolor'])
|
234 |
-
elif ndef['bgcolor']:
|
235 |
-
escape.bg = self._color_index(ndef['bgcolor'])
|
236 |
-
if self.usebold and ndef['bold']:
|
237 |
-
escape.bold = True
|
238 |
-
if self.useunderline and ndef['underline']:
|
239 |
-
escape.underline = True
|
240 |
-
if self.useitalic and ndef['italic']:
|
241 |
-
escape.italic = True
|
242 |
-
self.style_string[str(ttype)] = (escape.color_string(),
|
243 |
-
escape.reset_string())
|
244 |
-
|
245 |
-
def _write_lineno(self, outfile):
|
246 |
-
self._lineno += 1
|
247 |
-
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
|
248 |
-
|
249 |
-
def format(self, tokensource, outfile):
|
250 |
-
return Formatter.format(self, tokensource, outfile)
|
251 |
-
|
252 |
-
def format_unencoded(self, tokensource, outfile):
|
253 |
-
if self.linenos:
|
254 |
-
self._write_lineno(outfile)
|
255 |
-
|
256 |
-
for ttype, value in tokensource:
|
257 |
-
not_found = True
|
258 |
-
while ttype and not_found:
|
259 |
-
try:
|
260 |
-
# outfile.write( "<" + str(ttype) + ">" )
|
261 |
-
on, off = self.style_string[str(ttype)]
|
262 |
-
|
263 |
-
# Like TerminalFormatter, add "reset colors" escape sequence
|
264 |
-
# on newline.
|
265 |
-
spl = value.split('\n')
|
266 |
-
for line in spl[:-1]:
|
267 |
-
if line:
|
268 |
-
outfile.write(on + line + off)
|
269 |
-
if self.linenos:
|
270 |
-
self._write_lineno(outfile)
|
271 |
-
else:
|
272 |
-
outfile.write('\n')
|
273 |
-
|
274 |
-
if spl[-1]:
|
275 |
-
outfile.write(on + spl[-1] + off)
|
276 |
-
|
277 |
-
not_found = False
|
278 |
-
# outfile.write( '#' + str(ttype) + '#' )
|
279 |
-
|
280 |
-
except KeyError:
|
281 |
-
# ottype = ttype
|
282 |
-
ttype = ttype.parent
|
283 |
-
# outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
|
284 |
-
|
285 |
-
if not_found:
|
286 |
-
outfile.write(value)
|
287 |
-
|
288 |
-
if self.linenos:
|
289 |
-
outfile.write("\n")
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
class TerminalTrueColorFormatter(Terminal256Formatter):
|
294 |
-
r"""
|
295 |
-
Format tokens with ANSI color sequences, for output in a true-color
|
296 |
-
terminal or console. Like in `TerminalFormatter` color sequences
|
297 |
-
are terminated at newlines, so that paging the output works correctly.
|
298 |
-
|
299 |
-
.. versionadded:: 2.1
|
300 |
-
|
301 |
-
Options accepted:
|
302 |
-
|
303 |
-
`style`
|
304 |
-
The style to use, can be a string or a Style subclass (default:
|
305 |
-
``'default'``).
|
306 |
-
"""
|
307 |
-
name = 'TerminalTrueColor'
|
308 |
-
aliases = ['terminal16m', 'console16m', '16m']
|
309 |
-
filenames = []
|
310 |
-
|
311 |
-
def _build_color_table(self):
|
312 |
-
pass
|
313 |
-
|
314 |
-
def _color_tuple(self, color):
|
315 |
-
try:
|
316 |
-
rgb = int(str(color), 16)
|
317 |
-
except ValueError:
|
318 |
-
return None
|
319 |
-
r = (rgb >> 16) & 0xff
|
320 |
-
g = (rgb >> 8) & 0xff
|
321 |
-
b = rgb & 0xff
|
322 |
-
return (r, g, b)
|
323 |
-
|
324 |
-
def _setup_styles(self):
|
325 |
-
for ttype, ndef in self.style:
|
326 |
-
escape = EscapeSequence()
|
327 |
-
if ndef['color']:
|
328 |
-
escape.fg = self._color_tuple(ndef['color'])
|
329 |
-
if ndef['bgcolor']:
|
330 |
-
escape.bg = self._color_tuple(ndef['bgcolor'])
|
331 |
-
if self.usebold and ndef['bold']:
|
332 |
-
escape.bold = True
|
333 |
-
if self.useunderline and ndef['underline']:
|
334 |
-
escape.underline = True
|
335 |
-
if self.useitalic and ndef['italic']:
|
336 |
-
escape.italic = True
|
337 |
-
self.style_string[str(ttype)] = (escape.true_color_string(),
|
338 |
-
escape.reset_string())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/common.py
DELETED
@@ -1,424 +0,0 @@
|
|
1 |
-
# common.py
|
2 |
-
from .core import *
|
3 |
-
from .helpers import delimited_list, any_open_tag, any_close_tag
|
4 |
-
from datetime import datetime
|
5 |
-
|
6 |
-
|
7 |
-
# some other useful expressions - using lower-case class name since we are really using this as a namespace
|
8 |
-
class pyparsing_common:
|
9 |
-
"""Here are some common low-level expressions that may be useful in
|
10 |
-
jump-starting parser development:
|
11 |
-
|
12 |
-
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
|
13 |
-
:class:`scientific notation<sci_real>`)
|
14 |
-
- common :class:`programming identifiers<identifier>`
|
15 |
-
- network addresses (:class:`MAC<mac_address>`,
|
16 |
-
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
|
17 |
-
- ISO8601 :class:`dates<iso8601_date>` and
|
18 |
-
:class:`datetime<iso8601_datetime>`
|
19 |
-
- :class:`UUID<uuid>`
|
20 |
-
- :class:`comma-separated list<comma_separated_list>`
|
21 |
-
- :class:`url`
|
22 |
-
|
23 |
-
Parse actions:
|
24 |
-
|
25 |
-
- :class:`convertToInteger`
|
26 |
-
- :class:`convertToFloat`
|
27 |
-
- :class:`convertToDate`
|
28 |
-
- :class:`convertToDatetime`
|
29 |
-
- :class:`stripHTMLTags`
|
30 |
-
- :class:`upcaseTokens`
|
31 |
-
- :class:`downcaseTokens`
|
32 |
-
|
33 |
-
Example::
|
34 |
-
|
35 |
-
pyparsing_common.number.runTests('''
|
36 |
-
# any int or real number, returned as the appropriate type
|
37 |
-
100
|
38 |
-
-100
|
39 |
-
+100
|
40 |
-
3.14159
|
41 |
-
6.02e23
|
42 |
-
1e-12
|
43 |
-
''')
|
44 |
-
|
45 |
-
pyparsing_common.fnumber.runTests('''
|
46 |
-
# any int or real number, returned as float
|
47 |
-
100
|
48 |
-
-100
|
49 |
-
+100
|
50 |
-
3.14159
|
51 |
-
6.02e23
|
52 |
-
1e-12
|
53 |
-
''')
|
54 |
-
|
55 |
-
pyparsing_common.hex_integer.runTests('''
|
56 |
-
# hex numbers
|
57 |
-
100
|
58 |
-
FF
|
59 |
-
''')
|
60 |
-
|
61 |
-
pyparsing_common.fraction.runTests('''
|
62 |
-
# fractions
|
63 |
-
1/2
|
64 |
-
-3/4
|
65 |
-
''')
|
66 |
-
|
67 |
-
pyparsing_common.mixed_integer.runTests('''
|
68 |
-
# mixed fractions
|
69 |
-
1
|
70 |
-
1/2
|
71 |
-
-3/4
|
72 |
-
1-3/4
|
73 |
-
''')
|
74 |
-
|
75 |
-
import uuid
|
76 |
-
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
|
77 |
-
pyparsing_common.uuid.runTests('''
|
78 |
-
# uuid
|
79 |
-
12345678-1234-5678-1234-567812345678
|
80 |
-
''')
|
81 |
-
|
82 |
-
prints::
|
83 |
-
|
84 |
-
# any int or real number, returned as the appropriate type
|
85 |
-
100
|
86 |
-
[100]
|
87 |
-
|
88 |
-
-100
|
89 |
-
[-100]
|
90 |
-
|
91 |
-
+100
|
92 |
-
[100]
|
93 |
-
|
94 |
-
3.14159
|
95 |
-
[3.14159]
|
96 |
-
|
97 |
-
6.02e23
|
98 |
-
[6.02e+23]
|
99 |
-
|
100 |
-
1e-12
|
101 |
-
[1e-12]
|
102 |
-
|
103 |
-
# any int or real number, returned as float
|
104 |
-
100
|
105 |
-
[100.0]
|
106 |
-
|
107 |
-
-100
|
108 |
-
[-100.0]
|
109 |
-
|
110 |
-
+100
|
111 |
-
[100.0]
|
112 |
-
|
113 |
-
3.14159
|
114 |
-
[3.14159]
|
115 |
-
|
116 |
-
6.02e23
|
117 |
-
[6.02e+23]
|
118 |
-
|
119 |
-
1e-12
|
120 |
-
[1e-12]
|
121 |
-
|
122 |
-
# hex numbers
|
123 |
-
100
|
124 |
-
[256]
|
125 |
-
|
126 |
-
FF
|
127 |
-
[255]
|
128 |
-
|
129 |
-
# fractions
|
130 |
-
1/2
|
131 |
-
[0.5]
|
132 |
-
|
133 |
-
-3/4
|
134 |
-
[-0.75]
|
135 |
-
|
136 |
-
# mixed fractions
|
137 |
-
1
|
138 |
-
[1]
|
139 |
-
|
140 |
-
1/2
|
141 |
-
[0.5]
|
142 |
-
|
143 |
-
-3/4
|
144 |
-
[-0.75]
|
145 |
-
|
146 |
-
1-3/4
|
147 |
-
[1.75]
|
148 |
-
|
149 |
-
# uuid
|
150 |
-
12345678-1234-5678-1234-567812345678
|
151 |
-
[UUID('12345678-1234-5678-1234-567812345678')]
|
152 |
-
"""
|
153 |
-
|
154 |
-
convert_to_integer = token_map(int)
|
155 |
-
"""
|
156 |
-
Parse action for converting parsed integers to Python int
|
157 |
-
"""
|
158 |
-
|
159 |
-
convert_to_float = token_map(float)
|
160 |
-
"""
|
161 |
-
Parse action for converting parsed numbers to Python float
|
162 |
-
"""
|
163 |
-
|
164 |
-
integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
|
165 |
-
"""expression that parses an unsigned integer, returns an int"""
|
166 |
-
|
167 |
-
hex_integer = (
|
168 |
-
Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
|
169 |
-
)
|
170 |
-
"""expression that parses a hexadecimal integer, returns an int"""
|
171 |
-
|
172 |
-
signed_integer = (
|
173 |
-
Regex(r"[+-]?\d+")
|
174 |
-
.set_name("signed integer")
|
175 |
-
.set_parse_action(convert_to_integer)
|
176 |
-
)
|
177 |
-
"""expression that parses an integer with optional leading sign, returns an int"""
|
178 |
-
|
179 |
-
fraction = (
|
180 |
-
signed_integer().set_parse_action(convert_to_float)
|
181 |
-
+ "/"
|
182 |
-
+ signed_integer().set_parse_action(convert_to_float)
|
183 |
-
).set_name("fraction")
|
184 |
-
"""fractional expression of an integer divided by an integer, returns a float"""
|
185 |
-
fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
|
186 |
-
|
187 |
-
mixed_integer = (
|
188 |
-
fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
|
189 |
-
).set_name("fraction or mixed integer-fraction")
|
190 |
-
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
|
191 |
-
mixed_integer.add_parse_action(sum)
|
192 |
-
|
193 |
-
real = (
|
194 |
-
Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
|
195 |
-
.set_name("real number")
|
196 |
-
.set_parse_action(convert_to_float)
|
197 |
-
)
|
198 |
-
"""expression that parses a floating point number and returns a float"""
|
199 |
-
|
200 |
-
sci_real = (
|
201 |
-
Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
|
202 |
-
.set_name("real number with scientific notation")
|
203 |
-
.set_parse_action(convert_to_float)
|
204 |
-
)
|
205 |
-
"""expression that parses a floating point number with optional
|
206 |
-
scientific notation and returns a float"""
|
207 |
-
|
208 |
-
# streamlining this expression makes the docs nicer-looking
|
209 |
-
number = (sci_real | real | signed_integer).setName("number").streamline()
|
210 |
-
"""any numeric expression, returns the corresponding Python type"""
|
211 |
-
|
212 |
-
fnumber = (
|
213 |
-
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
|
214 |
-
.set_name("fnumber")
|
215 |
-
.set_parse_action(convert_to_float)
|
216 |
-
)
|
217 |
-
"""any int or real number, returned as float"""
|
218 |
-
|
219 |
-
identifier = Word(identchars, identbodychars).set_name("identifier")
|
220 |
-
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
|
221 |
-
|
222 |
-
ipv4_address = Regex(
|
223 |
-
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
|
224 |
-
).set_name("IPv4 address")
|
225 |
-
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
|
226 |
-
|
227 |
-
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
|
228 |
-
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
|
229 |
-
"full IPv6 address"
|
230 |
-
)
|
231 |
-
_short_ipv6_address = (
|
232 |
-
Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
|
233 |
-
+ "::"
|
234 |
-
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
|
235 |
-
).set_name("short IPv6 address")
|
236 |
-
_short_ipv6_address.add_condition(
|
237 |
-
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
|
238 |
-
)
|
239 |
-
_mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
|
240 |
-
ipv6_address = Combine(
|
241 |
-
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
|
242 |
-
"IPv6 address"
|
243 |
-
)
|
244 |
-
).set_name("IPv6 address")
|
245 |
-
"IPv6 address (long, short, or mixed form)"
|
246 |
-
|
247 |
-
mac_address = Regex(
|
248 |
-
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
|
249 |
-
).set_name("MAC address")
|
250 |
-
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
|
251 |
-
|
252 |
-
@staticmethod
|
253 |
-
def convert_to_date(fmt: str = "%Y-%m-%d"):
|
254 |
-
"""
|
255 |
-
Helper to create a parse action for converting parsed date string to Python datetime.date
|
256 |
-
|
257 |
-
Params -
|
258 |
-
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
|
259 |
-
|
260 |
-
Example::
|
261 |
-
|
262 |
-
date_expr = pyparsing_common.iso8601_date.copy()
|
263 |
-
date_expr.setParseAction(pyparsing_common.convertToDate())
|
264 |
-
print(date_expr.parseString("1999-12-31"))
|
265 |
-
|
266 |
-
prints::
|
267 |
-
|
268 |
-
[datetime.date(1999, 12, 31)]
|
269 |
-
"""
|
270 |
-
|
271 |
-
def cvt_fn(ss, ll, tt):
|
272 |
-
try:
|
273 |
-
return datetime.strptime(tt[0], fmt).date()
|
274 |
-
except ValueError as ve:
|
275 |
-
raise ParseException(ss, ll, str(ve))
|
276 |
-
|
277 |
-
return cvt_fn
|
278 |
-
|
279 |
-
@staticmethod
|
280 |
-
def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
|
281 |
-
"""Helper to create a parse action for converting parsed
|
282 |
-
datetime string to Python datetime.datetime
|
283 |
-
|
284 |
-
Params -
|
285 |
-
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
|
286 |
-
|
287 |
-
Example::
|
288 |
-
|
289 |
-
dt_expr = pyparsing_common.iso8601_datetime.copy()
|
290 |
-
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
|
291 |
-
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
|
292 |
-
|
293 |
-
prints::
|
294 |
-
|
295 |
-
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
|
296 |
-
"""
|
297 |
-
|
298 |
-
def cvt_fn(s, l, t):
|
299 |
-
try:
|
300 |
-
return datetime.strptime(t[0], fmt)
|
301 |
-
except ValueError as ve:
|
302 |
-
raise ParseException(s, l, str(ve))
|
303 |
-
|
304 |
-
return cvt_fn
|
305 |
-
|
306 |
-
iso8601_date = Regex(
|
307 |
-
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
|
308 |
-
).set_name("ISO8601 date")
|
309 |
-
"ISO8601 date (``yyyy-mm-dd``)"
|
310 |
-
|
311 |
-
iso8601_datetime = Regex(
|
312 |
-
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
|
313 |
-
).set_name("ISO8601 datetime")
|
314 |
-
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
|
315 |
-
|
316 |
-
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
|
317 |
-
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
|
318 |
-
|
319 |
-
_html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
|
320 |
-
|
321 |
-
@staticmethod
|
322 |
-
def strip_html_tags(s: str, l: int, tokens: ParseResults):
|
323 |
-
"""Parse action to remove HTML tags from web page HTML source
|
324 |
-
|
325 |
-
Example::
|
326 |
-
|
327 |
-
# strip HTML links from normal text
|
328 |
-
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
|
329 |
-
td, td_end = makeHTMLTags("TD")
|
330 |
-
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
|
331 |
-
print(table_text.parseString(text).body)
|
332 |
-
|
333 |
-
Prints::
|
334 |
-
|
335 |
-
More info at the pyparsing wiki page
|
336 |
-
"""
|
337 |
-
return pyparsing_common._html_stripper.transform_string(tokens[0])
|
338 |
-
|
339 |
-
_commasepitem = (
|
340 |
-
Combine(
|
341 |
-
OneOrMore(
|
342 |
-
~Literal(",")
|
343 |
-
+ ~LineEnd()
|
344 |
-
+ Word(printables, exclude_chars=",")
|
345 |
-
+ Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
|
346 |
-
)
|
347 |
-
)
|
348 |
-
.streamline()
|
349 |
-
.set_name("commaItem")
|
350 |
-
)
|
351 |
-
comma_separated_list = delimited_list(
|
352 |
-
Opt(quoted_string.copy() | _commasepitem, default="")
|
353 |
-
).set_name("comma separated list")
|
354 |
-
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
|
355 |
-
|
356 |
-
upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
|
357 |
-
"""Parse action to convert tokens to upper case."""
|
358 |
-
|
359 |
-
downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
|
360 |
-
"""Parse action to convert tokens to lower case."""
|
361 |
-
|
362 |
-
# fmt: off
|
363 |
-
url = Regex(
|
364 |
-
# https://mathiasbynens.be/demo/url-regex
|
365 |
-
# https://gist.github.com/dperini/729294
|
366 |
-
r"^" +
|
367 |
-
# protocol identifier (optional)
|
368 |
-
# short syntax // still required
|
369 |
-
r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
|
370 |
-
# user:pass BasicAuth (optional)
|
371 |
-
r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
|
372 |
-
r"(?P<host>" +
|
373 |
-
# IP address exclusion
|
374 |
-
# private & local networks
|
375 |
-
r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
|
376 |
-
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
|
377 |
-
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
|
378 |
-
# IP address dotted notation octets
|
379 |
-
# excludes loopback network 0.0.0.0
|
380 |
-
# excludes reserved space >= 224.0.0.0
|
381 |
-
# excludes network & broadcast addresses
|
382 |
-
# (first & last IP address of each class)
|
383 |
-
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
|
384 |
-
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
|
385 |
-
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
|
386 |
-
r"|" +
|
387 |
-
# host & domain names, may end with dot
|
388 |
-
# can be replaced by a shortest alternative
|
389 |
-
# (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
|
390 |
-
r"(?:" +
|
391 |
-
r"(?:" +
|
392 |
-
r"[a-z0-9\u00a1-\uffff]" +
|
393 |
-
r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
|
394 |
-
r")?" +
|
395 |
-
r"[a-z0-9\u00a1-\uffff]\." +
|
396 |
-
r")+" +
|
397 |
-
# TLD identifier name, may end with dot
|
398 |
-
r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
|
399 |
-
r")" +
|
400 |
-
# port number (optional)
|
401 |
-
r"(:(?P<port>\d{2,5}))?" +
|
402 |
-
# resource path (optional)
|
403 |
-
r"(?P<path>\/[^?# ]*)?" +
|
404 |
-
# query string (optional)
|
405 |
-
r"(\?(?P<query>[^#]*))?" +
|
406 |
-
# fragment (optional)
|
407 |
-
r"(#(?P<fragment>\S*))?" +
|
408 |
-
r"$"
|
409 |
-
).set_name("url")
|
410 |
-
# fmt: on
|
411 |
-
|
412 |
-
# pre-PEP8 compatibility names
|
413 |
-
convertToInteger = convert_to_integer
|
414 |
-
convertToFloat = convert_to_float
|
415 |
-
convertToDate = convert_to_date
|
416 |
-
convertToDatetime = convert_to_datetime
|
417 |
-
stripHTMLTags = strip_html_tags
|
418 |
-
upcaseTokens = upcase_tokens
|
419 |
-
downcaseTokens = downcase_tokens
|
420 |
-
|
421 |
-
|
422 |
-
_builtin_exprs = [
|
423 |
-
v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
|
424 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/backbone/build.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from detectron2.layers import ShapeSpec
|
3 |
-
from detectron2.utils.registry import Registry
|
4 |
-
|
5 |
-
from .backbone import Backbone
|
6 |
-
|
7 |
-
BACKBONE_REGISTRY = Registry("BACKBONE")
|
8 |
-
BACKBONE_REGISTRY.__doc__ = """
|
9 |
-
Registry for backbones, which extract feature maps from images
|
10 |
-
|
11 |
-
The registered object must be a callable that accepts two arguments:
|
12 |
-
|
13 |
-
1. A :class:`detectron2.config.CfgNode`
|
14 |
-
2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification.
|
15 |
-
|
16 |
-
Registered object must return instance of :class:`Backbone`.
|
17 |
-
"""
|
18 |
-
|
19 |
-
|
20 |
-
def build_backbone(cfg, input_shape=None):
|
21 |
-
"""
|
22 |
-
Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
|
23 |
-
|
24 |
-
Returns:
|
25 |
-
an instance of :class:`Backbone`
|
26 |
-
"""
|
27 |
-
if input_shape is None:
|
28 |
-
input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
|
29 |
-
|
30 |
-
backbone_name = cfg.MODEL.BACKBONE.NAME
|
31 |
-
backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)
|
32 |
-
assert isinstance(backbone, Backbone)
|
33 |
-
return backbone
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Axesys/Private-WebUI/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Waifu AI
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
duplicated_from: Axesys/Waifu-AI-WebUI
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Coche Extremo Simulador De Conduccin Mod Apk Hack Descargar Para Pc.md
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Simulador de conducción de coche extremo Mod APK Hack Descargar para PC</h1>
|
3 |
-
<p>¿Te encanta conducir coches rápidos y realizar acrobacias increíbles? ¿Quieres experimentar la emoción de conducir en un entorno realista de mundo abierto? Si es así, entonces usted debe probar Extreme Car Driving Simulator, uno de los mejores juegos de conducción de coches simulador para Android. Y si usted quiere hacer el juego aún más divertido y emocionante, usted debe descargar la versión hack apk mod para PC, que le da dinero ilimitado, coches, y otros beneficios. En este artículo, le diremos todo lo que necesita saber sobre Extreme Car Driving Simulator, por qué debe descargar el hack apk mod, y cómo instalarlo en su PC.</p>
|
4 |
-
<h2>¿Qué es Extreme Car Driving Simulator? </h2>
|
5 |
-
<p>Extreme Car Driving Simulator es un simulador de conducción de coches en 3D desarrollado por AxesInMotion Racing. Está disponible de forma gratuita en Google Play Store y tiene más de 100 millones de descargas. El juego te permite conducir varios tipos de coches, desde coches deportivos hasta SUV, en una gran ciudad de mundo abierto. Puede conducir libremente, seguir las reglas de tráfico, o romperlas y causar caos. También puede realizar acrobacias, derivas, saltos y accidentes con física realista y daños en el coche. El juego tiene diferentes modos, como el modo libre, el modo de punto de control, el modo de tráfico y el modo fantasma. También puede personalizar sus coches con diferentes colores, ruedas y vinilos. </p>
|
6 |
-
<h2>coche extremo simulador de conducción mod apk hack descargar para pc</h2><br /><p><b><b>Download File</b> >>> <a href="https://bltlly.com/2v6LEr">https://bltlly.com/2v6LEr</a></b></p><br /><br />
|
7 |
-
<h3>Características del simulador de conducción de automóviles extremos</h3>
|
8 |
-
<p>Extreme Car Driving Simulator tiene muchas características que lo convierten en uno de los mejores juegos de simulador de conducción de automóviles para Android. Aquí están algunos de ellos:</p>
|
9 |
-
<h4>Unidad con tráfico</h4>
|
10 |
-
<p>Puedes elegir conducir con o sin tráfico en el juego. Conducir con tráfico añade más realismo y desafío al juego, ya que tienes que evitar colisiones y seguir las reglas de tráfico. También puede tocar la bocina, encender las luces y usar indicadores para comunicarse con otros conductores. </p>
|
11 |
-
<h4>HUD real completo</h4>
|
12 |
-
|
13 |
-
<h4>Simulación de ABS, TC y ESP</h4>
|
14 |
-
<p>El juego simula el sistema de frenos antibloqueo (ABS), el control de tracción (TC) y el programa de estabilidad electrónica (ESP) de los coches. También puede desactivarlos si desea tener más control sobre el comportamiento de su automóvil. </p>
|
15 |
-
<h4>Explora un entorno de mundo abierto detallado</h4>
|
16 |
-
<p>El juego tiene una gran ciudad de mundo abierto que se puede explorar libremente. La ciudad tiene diferentes áreas, como el centro, el aeropuerto, la zona industrial y el campo. La ciudad también tiene un clima dinámico y un ciclo día-noche que afectan las condiciones de conducción. </p>
|
17 |
-
<h4>Daños realistas en el coche</h4>
|
18 |
-
<p>El juego tiene daños realistas coche que muestra el impacto de sus accidentes y colisiones. Puede ver las partes del cuerpo de su automóvil abolladas, rayadas o cayéndose. También puede reparar su automóvil presionando un botón o visitando un garaje. </p>
|
19 |
-
<h4>Física precisa</h4>
|
20 |
-
<p>El juego tiene la física precisa que hacen la experiencia de conducción más realista y divertido. Puede sentir el peso, la velocidad y la inercia de su automóvil mientras conduce. También puedes realizar acrobacias, derrapes, saltos y volteretas con tu auto usando rampas, <h4>Controla tu auto con diferentes opciones</h4>
|
21 |
-
<p>El juego te da diferentes opciones para controlar tu coche, como inclinación, botones o volante. También puede ajustar la sensibilidad y la retroalimentación de los controles para adaptarse a sus preferencias. También puede cambiar el modo de engranaje de automático a manual. </p>
|
22 |
-
<h3> ¿Por qué descargar Extreme Car Driving Simulator mod apk hack? </h3>
|
23 |
-
<p>Extreme Car Driving Simulator es un juego divertido y adictivo, pero también puede ser frustrante y consume mucho tiempo si desea desbloquear todos los coches y características. Es por eso que usted debe descargar la versión mod apk hack para PC, que le da muchas ventajas sobre el juego original. Aquí están algunos de ellos:</p>
|
24 |
-
<p></p>
|
25 |
-
<h4>Dinero y coches ilimitados</h4>
|
26 |
-
|
27 |
-
<h4>No se requieren anuncios ni root</h4>
|
28 |
-
<p>El mod apk hack versión también elimina todos los anuncios molestos que interrumpen su juego. Usted puede jugar el juego sin ninguna distracción o interrupciones. Además, usted no necesita rootear su dispositivo para instalar la versión mod apk hack. Puedes simplemente descargarlo e instalarlo en tu PC usando un emulador de Android. </p>
|
29 |
-
<h3>Cómo descargar e instalar Extreme Car Driving Simulator mod apk hack para PC? </h3>
|
30 |
-
<p>Si desea descargar e instalar Extreme Car Driving Simulator mod apk hack para PC, debe seguir estos sencillos pasos:</p>
|
31 |
-
<h4>Paso 1: Descargar un emulador de Android</h4>
|
32 |
-
<p>Un emulador de Android es un software que le permite ejecutar aplicaciones y juegos de Android en su PC. Hay muchos emuladores de Android disponibles en línea, como BlueStacks, NoxPlayer, MEmu, etc. Puede elegir cualquiera de ellos y descargarlo desde su sitio web oficial. Luego, instálalo en tu PC siguiendo las instrucciones. </p>
|
33 |
-
<h4>Paso 2: Descargar el archivo apk mod de una fuente de confianza</h4>
|
34 |
-
<p>El siguiente paso es descargar el archivo apk mod de Extreme Car Driving Simulator de una fuente de confianza. Puede buscarlo en Google o utilizar el enlace que se proporciona a continuación. Asegúrese de descargar la última versión del archivo apk mod que es compatible con su emulador. </p>
|
35 |
-
<p><a href="">Descargar Extreme Car Driving Simulator mod apk hack</a></p>
|
36 |
-
<h4>Paso 3: Instalar el archivo apk mod en el emulador</h4>
|
37 |
-
<p>Después de descargar el archivo apk mod, es necesario instalarlo en el emulador. Puede hacer esto arrastrando y soltando el archivo en la ventana del emulador o navegando y seleccionándolo desde la carpeta de su PC. El emulador instalará automáticamente el archivo mod apk en tu dispositivo virtual. </p>
|
38 |
-
<h4>Paso 4: Iniciar el juego y disfrutar de</h4>
|
39 |
-
|
40 |
-
<h2>Conclusión</h2>
|
41 |
-
<p>Extreme Car Driving Simulator es uno de los mejores juegos de simulador de conducción de coches para Android que te permite conducir varios tipos de coches en un entorno realista de mundo abierto. También puede descargar la versión mod apk hack para PC que le da dinero ilimitado, coches, y no hay anuncios. Solo tienes que seguir los pasos mencionados anteriormente para descargarlo e instalarlo en tu PC usando un emulador de Android. Entonces, ¿qué estás esperando? Descargar Extreme Car Driving Simulator mod apk hack para PC hoy y divertirse conduciendo coches rápidos y realizar acrobacias increíbles. </p>
|
42 |
-
Q: ¿Es Extreme Car Driving Simulator mod apk hack seguro de usar? A: Sí, Extreme Car Driving Simulator mod apk hack es seguro de usar siempre y cuando se descarga de una fuente de confianza y utilizar un emulador de Android confiable. P: ¿Puedo jugar Extreme Car Driving Simulator en línea con otros jugadores? R: No, Extreme Car Driving Simulator es un juego sin conexión que no admite el modo multijugador en línea. P: ¿Cómo puedo actualizar Extreme Car Driving Simulator mod apk hack? A: Para actualizar Extreme Car Driving Simulator mod apk hack, es necesario descargar e instalar la última versión del archivo apk mod de la misma fuente que antes. P: ¿Cuáles son algunos otros juegos similares a Extreme Car Driving Simulator? R: Algunos otros juegos similares a Extreme Car Driving Simulator son Real Racing 3, Asphalt 9: Legends, CSR Racing 2, Need for Speed: No Limits, etc. P: ¿Cómo puedo contactar con el desarrollador de Extreme Car Driving Simulator? R: Puede ponerse en contacto con el desarrollador de Extreme Car Driving Simulator enviando un correo electrónico a [email protected] o visitando Ya he escrito el artículo según sus instrucciones. No hay nada más que escribir. Espero que estén satisfechos con mi trabajo. Si tienen algún comentario o sugerencia, por favor háganmelo saber. Gracias por elegirme como tu escritor de contenido. </p> 64aa2da5cf<br />
|
43 |
-
<br />
|
44 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/__init__.py
DELETED
@@ -1,182 +0,0 @@
|
|
1 |
-
# __
|
2 |
-
# /__) _ _ _ _ _/ _
|
3 |
-
# / ( (- (/ (/ (- _) / _)
|
4 |
-
# /
|
5 |
-
|
6 |
-
"""
|
7 |
-
Requests HTTP Library
|
8 |
-
~~~~~~~~~~~~~~~~~~~~~
|
9 |
-
|
10 |
-
Requests is an HTTP library, written in Python, for human beings.
|
11 |
-
Basic GET usage:
|
12 |
-
|
13 |
-
>>> import requests
|
14 |
-
>>> r = requests.get('https://www.python.org')
|
15 |
-
>>> r.status_code
|
16 |
-
200
|
17 |
-
>>> b'Python is a programming language' in r.content
|
18 |
-
True
|
19 |
-
|
20 |
-
... or POST:
|
21 |
-
|
22 |
-
>>> payload = dict(key1='value1', key2='value2')
|
23 |
-
>>> r = requests.post('https://httpbin.org/post', data=payload)
|
24 |
-
>>> print(r.text)
|
25 |
-
{
|
26 |
-
...
|
27 |
-
"form": {
|
28 |
-
"key1": "value1",
|
29 |
-
"key2": "value2"
|
30 |
-
},
|
31 |
-
...
|
32 |
-
}
|
33 |
-
|
34 |
-
The other HTTP methods are supported - see `requests.api`. Full documentation
|
35 |
-
is at <https://requests.readthedocs.io>.
|
36 |
-
|
37 |
-
:copyright: (c) 2017 by Kenneth Reitz.
|
38 |
-
:license: Apache 2.0, see LICENSE for more details.
|
39 |
-
"""
|
40 |
-
|
41 |
-
import warnings
|
42 |
-
|
43 |
-
from pip._vendor import urllib3
|
44 |
-
|
45 |
-
from .exceptions import RequestsDependencyWarning
|
46 |
-
|
47 |
-
charset_normalizer_version = None
|
48 |
-
|
49 |
-
try:
|
50 |
-
from pip._vendor.chardet import __version__ as chardet_version
|
51 |
-
except ImportError:
|
52 |
-
chardet_version = None
|
53 |
-
|
54 |
-
|
55 |
-
def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
|
56 |
-
urllib3_version = urllib3_version.split(".")
|
57 |
-
assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git.
|
58 |
-
|
59 |
-
# Sometimes, urllib3 only reports its version as 16.1.
|
60 |
-
if len(urllib3_version) == 2:
|
61 |
-
urllib3_version.append("0")
|
62 |
-
|
63 |
-
# Check urllib3 for compatibility.
|
64 |
-
major, minor, patch = urllib3_version # noqa: F811
|
65 |
-
major, minor, patch = int(major), int(minor), int(patch)
|
66 |
-
# urllib3 >= 1.21.1, <= 1.26
|
67 |
-
assert major == 1
|
68 |
-
assert minor >= 21
|
69 |
-
assert minor <= 26
|
70 |
-
|
71 |
-
# Check charset_normalizer for compatibility.
|
72 |
-
if chardet_version:
|
73 |
-
major, minor, patch = chardet_version.split(".")[:3]
|
74 |
-
major, minor, patch = int(major), int(minor), int(patch)
|
75 |
-
# chardet_version >= 3.0.2, < 6.0.0
|
76 |
-
assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0)
|
77 |
-
elif charset_normalizer_version:
|
78 |
-
major, minor, patch = charset_normalizer_version.split(".")[:3]
|
79 |
-
major, minor, patch = int(major), int(minor), int(patch)
|
80 |
-
# charset_normalizer >= 2.0.0 < 4.0.0
|
81 |
-
assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0)
|
82 |
-
else:
|
83 |
-
raise Exception("You need either charset_normalizer or chardet installed")
|
84 |
-
|
85 |
-
|
86 |
-
def _check_cryptography(cryptography_version):
|
87 |
-
# cryptography < 1.3.4
|
88 |
-
try:
|
89 |
-
cryptography_version = list(map(int, cryptography_version.split(".")))
|
90 |
-
except ValueError:
|
91 |
-
return
|
92 |
-
|
93 |
-
if cryptography_version < [1, 3, 4]:
|
94 |
-
warning = "Old version of cryptography ({}) may cause slowdown.".format(
|
95 |
-
cryptography_version
|
96 |
-
)
|
97 |
-
warnings.warn(warning, RequestsDependencyWarning)
|
98 |
-
|
99 |
-
|
100 |
-
# Check imported dependencies for compatibility.
|
101 |
-
try:
|
102 |
-
check_compatibility(
|
103 |
-
urllib3.__version__, chardet_version, charset_normalizer_version
|
104 |
-
)
|
105 |
-
except (AssertionError, ValueError):
|
106 |
-
warnings.warn(
|
107 |
-
"urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
|
108 |
-
"version!".format(
|
109 |
-
urllib3.__version__, chardet_version, charset_normalizer_version
|
110 |
-
),
|
111 |
-
RequestsDependencyWarning,
|
112 |
-
)
|
113 |
-
|
114 |
-
# Attempt to enable urllib3's fallback for SNI support
|
115 |
-
# if the standard library doesn't support SNI or the
|
116 |
-
# 'ssl' library isn't available.
|
117 |
-
try:
|
118 |
-
# Note: This logic prevents upgrading cryptography on Windows, if imported
|
119 |
-
# as part of pip.
|
120 |
-
from pip._internal.utils.compat import WINDOWS
|
121 |
-
if not WINDOWS:
|
122 |
-
raise ImportError("pip internals: don't import cryptography on Windows")
|
123 |
-
try:
|
124 |
-
import ssl
|
125 |
-
except ImportError:
|
126 |
-
ssl = None
|
127 |
-
|
128 |
-
if not getattr(ssl, "HAS_SNI", False):
|
129 |
-
from pip._vendor.urllib3.contrib import pyopenssl
|
130 |
-
|
131 |
-
pyopenssl.inject_into_urllib3()
|
132 |
-
|
133 |
-
# Check cryptography version
|
134 |
-
from cryptography import __version__ as cryptography_version
|
135 |
-
|
136 |
-
_check_cryptography(cryptography_version)
|
137 |
-
except ImportError:
|
138 |
-
pass
|
139 |
-
|
140 |
-
# urllib3's DependencyWarnings should be silenced.
|
141 |
-
from pip._vendor.urllib3.exceptions import DependencyWarning
|
142 |
-
|
143 |
-
warnings.simplefilter("ignore", DependencyWarning)
|
144 |
-
|
145 |
-
# Set default logging handler to avoid "No handler found" warnings.
|
146 |
-
import logging
|
147 |
-
from logging import NullHandler
|
148 |
-
|
149 |
-
from . import packages, utils
|
150 |
-
from .__version__ import (
|
151 |
-
__author__,
|
152 |
-
__author_email__,
|
153 |
-
__build__,
|
154 |
-
__cake__,
|
155 |
-
__copyright__,
|
156 |
-
__description__,
|
157 |
-
__license__,
|
158 |
-
__title__,
|
159 |
-
__url__,
|
160 |
-
__version__,
|
161 |
-
)
|
162 |
-
from .api import delete, get, head, options, patch, post, put, request
|
163 |
-
from .exceptions import (
|
164 |
-
ConnectionError,
|
165 |
-
ConnectTimeout,
|
166 |
-
FileModeWarning,
|
167 |
-
HTTPError,
|
168 |
-
JSONDecodeError,
|
169 |
-
ReadTimeout,
|
170 |
-
RequestException,
|
171 |
-
Timeout,
|
172 |
-
TooManyRedirects,
|
173 |
-
URLRequired,
|
174 |
-
)
|
175 |
-
from .models import PreparedRequest, Request, Response
|
176 |
-
from .sessions import Session, session
|
177 |
-
from .status_codes import codes
|
178 |
-
|
179 |
-
logging.getLogger(__name__).addHandler(NullHandler())
|
180 |
-
|
181 |
-
# FileModeWarnings go off per the default.
|
182 |
-
warnings.simplefilter("default", FileModeWarning, append=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/memory.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
|
3 |
-
import logging
|
4 |
-
from contextlib import contextmanager
|
5 |
-
from functools import wraps
|
6 |
-
import torch
|
7 |
-
|
8 |
-
__all__ = ["retry_if_cuda_oom"]
|
9 |
-
|
10 |
-
|
11 |
-
@contextmanager
|
12 |
-
def _ignore_torch_cuda_oom():
|
13 |
-
"""
|
14 |
-
A context which ignores CUDA OOM exception from pytorch.
|
15 |
-
"""
|
16 |
-
try:
|
17 |
-
yield
|
18 |
-
except RuntimeError as e:
|
19 |
-
# NOTE: the string may change?
|
20 |
-
if "CUDA out of memory. " in str(e):
|
21 |
-
pass
|
22 |
-
else:
|
23 |
-
raise
|
24 |
-
|
25 |
-
|
26 |
-
def retry_if_cuda_oom(func):
|
27 |
-
"""
|
28 |
-
Makes a function retry itself after encountering
|
29 |
-
pytorch's CUDA OOM error.
|
30 |
-
It will first retry after calling `torch.cuda.empty_cache()`.
|
31 |
-
|
32 |
-
If that still fails, it will then retry by trying to convert inputs to CPUs.
|
33 |
-
In this case, it expects the function to dispatch to CPU implementation.
|
34 |
-
The return values may become CPU tensors as well and it's user's
|
35 |
-
responsibility to convert it back to CUDA tensor if needed.
|
36 |
-
|
37 |
-
Args:
|
38 |
-
func: a stateless callable that takes tensor-like objects as arguments
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
a callable which retries `func` if OOM is encountered.
|
42 |
-
|
43 |
-
Examples:
|
44 |
-
|
45 |
-
.. code-block:: python
|
46 |
-
|
47 |
-
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
|
48 |
-
# output may be on CPU even if inputs are on GPU
|
49 |
-
|
50 |
-
Note:
|
51 |
-
1. When converting inputs to CPU, it will only look at each argument and check
|
52 |
-
if it has `.device` and `.to` for conversion. Nested structures of tensors
|
53 |
-
are not supported.
|
54 |
-
|
55 |
-
2. Since the function might be called more than once, it has to be
|
56 |
-
stateless.
|
57 |
-
"""
|
58 |
-
|
59 |
-
def maybe_to_cpu(x):
|
60 |
-
try:
|
61 |
-
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
|
62 |
-
except AttributeError:
|
63 |
-
like_gpu_tensor = False
|
64 |
-
if like_gpu_tensor:
|
65 |
-
return x.to(device="cpu")
|
66 |
-
else:
|
67 |
-
return x
|
68 |
-
|
69 |
-
@wraps(func)
|
70 |
-
def wrapped(*args, **kwargs):
|
71 |
-
with _ignore_torch_cuda_oom():
|
72 |
-
return func(*args, **kwargs)
|
73 |
-
|
74 |
-
# Clear cache and retry
|
75 |
-
torch.cuda.empty_cache()
|
76 |
-
with _ignore_torch_cuda_oom():
|
77 |
-
return func(*args, **kwargs)
|
78 |
-
|
79 |
-
# Try on CPU. This slows down the code significantly, therefore print a notice.
|
80 |
-
logger = logging.getLogger(__name__)
|
81 |
-
logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
|
82 |
-
new_args = (maybe_to_cpu(x) for x in args)
|
83 |
-
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
|
84 |
-
return func(*new_args, **new_kwargs)
|
85 |
-
|
86 |
-
return wrapped
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_checkpoint.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import unittest
|
3 |
-
from collections import OrderedDict
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
|
7 |
-
from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts
|
8 |
-
from detectron2.utils.logger import setup_logger
|
9 |
-
|
10 |
-
|
11 |
-
class TestCheckpointer(unittest.TestCase):
|
12 |
-
def setUp(self):
|
13 |
-
setup_logger()
|
14 |
-
|
15 |
-
def create_complex_model(self):
|
16 |
-
m = nn.Module()
|
17 |
-
m.block1 = nn.Module()
|
18 |
-
m.block1.layer1 = nn.Linear(2, 3)
|
19 |
-
m.layer2 = nn.Linear(3, 2)
|
20 |
-
m.res = nn.Module()
|
21 |
-
m.res.layer2 = nn.Linear(3, 2)
|
22 |
-
|
23 |
-
state_dict = OrderedDict()
|
24 |
-
state_dict["layer1.weight"] = torch.rand(3, 2)
|
25 |
-
state_dict["layer1.bias"] = torch.rand(3)
|
26 |
-
state_dict["layer2.weight"] = torch.rand(2, 3)
|
27 |
-
state_dict["layer2.bias"] = torch.rand(2)
|
28 |
-
state_dict["res.layer2.weight"] = torch.rand(2, 3)
|
29 |
-
state_dict["res.layer2.bias"] = torch.rand(2)
|
30 |
-
return m, state_dict
|
31 |
-
|
32 |
-
def test_complex_model_loaded(self):
|
33 |
-
for add_data_parallel in [False, True]:
|
34 |
-
model, state_dict = self.create_complex_model()
|
35 |
-
if add_data_parallel:
|
36 |
-
model = nn.DataParallel(model)
|
37 |
-
model_sd = model.state_dict()
|
38 |
-
|
39 |
-
align_and_update_state_dicts(model_sd, state_dict)
|
40 |
-
for loaded, stored in zip(model_sd.values(), state_dict.values()):
|
41 |
-
# different tensor references
|
42 |
-
self.assertFalse(id(loaded) == id(stored))
|
43 |
-
# same content
|
44 |
-
self.assertTrue(loaded.equal(stored))
|
45 |
-
|
46 |
-
|
47 |
-
if __name__ == "__main__":
|
48 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/parallel.cpp
DELETED
@@ -1,273 +0,0 @@
|
|
1 |
-
#include "parallel.h"
|
2 |
-
#include <list>
|
3 |
-
#include <thread>
|
4 |
-
#include <condition_variable>
|
5 |
-
#include <vector>
|
6 |
-
#include <cassert>
|
7 |
-
|
8 |
-
// From https://github.com/mmp/pbrt-v3/blob/master/src/core/parallel.cpp
|
9 |
-
|
10 |
-
static std::vector<std::thread> threads;
|
11 |
-
static bool shutdownThreads = false;
|
12 |
-
struct ParallelForLoop;
|
13 |
-
static ParallelForLoop *workList = nullptr;
|
14 |
-
static std::mutex workListMutex;
|
15 |
-
|
16 |
-
struct ParallelForLoop {
|
17 |
-
ParallelForLoop(std::function<void(int64_t)> func1D, int64_t maxIndex, int chunkSize)
|
18 |
-
: func1D(std::move(func1D)), maxIndex(maxIndex), chunkSize(chunkSize) {
|
19 |
-
}
|
20 |
-
ParallelForLoop(const std::function<void(Vector2i)> &f, const Vector2i count)
|
21 |
-
: func2D(f), maxIndex(count[0] * count[1]), chunkSize(1) {
|
22 |
-
nX = count[0];
|
23 |
-
}
|
24 |
-
|
25 |
-
std::function<void(int64_t)> func1D;
|
26 |
-
std::function<void(Vector2i)> func2D;
|
27 |
-
const int64_t maxIndex;
|
28 |
-
const int chunkSize;
|
29 |
-
int64_t nextIndex = 0;
|
30 |
-
int activeWorkers = 0;
|
31 |
-
ParallelForLoop *next = nullptr;
|
32 |
-
int nX = -1;
|
33 |
-
|
34 |
-
bool Finished() const {
|
35 |
-
return nextIndex >= maxIndex && activeWorkers == 0;
|
36 |
-
}
|
37 |
-
};
|
38 |
-
|
39 |
-
void Barrier::Wait() {
|
40 |
-
std::unique_lock<std::mutex> lock(mutex);
|
41 |
-
assert(count > 0);
|
42 |
-
if (--count == 0) {
|
43 |
-
// This is the last thread to reach the barrier; wake up all of the
|
44 |
-
// other ones before exiting.
|
45 |
-
cv.notify_all();
|
46 |
-
} else {
|
47 |
-
// Otherwise there are still threads that haven't reached it. Give
|
48 |
-
// up the lock and wait to be notified.
|
49 |
-
cv.wait(lock, [this] { return count == 0; });
|
50 |
-
}
|
51 |
-
}
|
52 |
-
|
53 |
-
static std::condition_variable workListCondition;
|
54 |
-
|
55 |
-
static void worker_thread_func(const int tIndex, std::shared_ptr<Barrier> barrier) {
|
56 |
-
ThreadIndex = tIndex;
|
57 |
-
|
58 |
-
// The main thread sets up a barrier so that it can be sure that all
|
59 |
-
// workers have called ProfilerWorkerThreadInit() before it continues
|
60 |
-
// (and actually starts the profiling system).
|
61 |
-
barrier->Wait();
|
62 |
-
|
63 |
-
// Release our reference to the Barrier so that it's freed once all of
|
64 |
-
// the threads have cleared it.
|
65 |
-
barrier.reset();
|
66 |
-
|
67 |
-
std::unique_lock<std::mutex> lock(workListMutex);
|
68 |
-
while (!shutdownThreads) {
|
69 |
-
if (!workList) {
|
70 |
-
// Sleep until there are more tasks to run
|
71 |
-
workListCondition.wait(lock);
|
72 |
-
} else {
|
73 |
-
// Get work from _workList_ and run loop iterations
|
74 |
-
ParallelForLoop &loop = *workList;
|
75 |
-
|
76 |
-
// Run a chunk of loop iterations for _loop_
|
77 |
-
|
78 |
-
// Find the set of loop iterations to run next
|
79 |
-
int64_t indexStart = loop.nextIndex;
|
80 |
-
int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex);
|
81 |
-
|
82 |
-
// Update _loop_ to reflect iterations this thread will run
|
83 |
-
loop.nextIndex = indexEnd;
|
84 |
-
if (loop.nextIndex == loop.maxIndex)
|
85 |
-
workList = loop.next;
|
86 |
-
loop.activeWorkers++;
|
87 |
-
|
88 |
-
// Run loop indices in _[indexStart, indexEnd)_
|
89 |
-
lock.unlock();
|
90 |
-
for (int64_t index = indexStart; index < indexEnd; ++index) {
|
91 |
-
if (loop.func1D) {
|
92 |
-
loop.func1D(index);
|
93 |
-
}
|
94 |
-
// Handle other types of loops
|
95 |
-
else {
|
96 |
-
assert(loop.func2D != nullptr);
|
97 |
-
loop.func2D(Vector2i{int(index % loop.nX),
|
98 |
-
int(index / loop.nX)});
|
99 |
-
}
|
100 |
-
}
|
101 |
-
lock.lock();
|
102 |
-
|
103 |
-
// Update _loop_ to reflect completion of iterations
|
104 |
-
loop.activeWorkers--;
|
105 |
-
if (loop.Finished()) {
|
106 |
-
workListCondition.notify_all();
|
107 |
-
}
|
108 |
-
}
|
109 |
-
}
|
110 |
-
}
|
111 |
-
|
112 |
-
void parallel_for_host(const std::function<void(int64_t)> &func,
|
113 |
-
int64_t count,
|
114 |
-
int chunkSize) {
|
115 |
-
// Run iterations immediately if not using threads or if _count_ is small
|
116 |
-
if (threads.empty() || count < chunkSize) {
|
117 |
-
for (int64_t i = 0; i < count; ++i) {
|
118 |
-
func(i);
|
119 |
-
}
|
120 |
-
return;
|
121 |
-
}
|
122 |
-
|
123 |
-
// Create and enqueue _ParallelForLoop_ for this loop
|
124 |
-
ParallelForLoop loop(func, count, chunkSize);
|
125 |
-
workListMutex.lock();
|
126 |
-
loop.next = workList;
|
127 |
-
workList = &loop;
|
128 |
-
workListMutex.unlock();
|
129 |
-
|
130 |
-
// Notify worker threads of work to be done
|
131 |
-
std::unique_lock<std::mutex> lock(workListMutex);
|
132 |
-
workListCondition.notify_all();
|
133 |
-
|
134 |
-
// Help out with parallel loop iterations in the current thread
|
135 |
-
while (!loop.Finished()) {
|
136 |
-
// Run a chunk of loop iterations for _loop_
|
137 |
-
|
138 |
-
// Find the set of loop iterations to run next
|
139 |
-
int64_t indexStart = loop.nextIndex;
|
140 |
-
int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex);
|
141 |
-
|
142 |
-
// Update _loop_ to reflect iterations this thread will run
|
143 |
-
loop.nextIndex = indexEnd;
|
144 |
-
if (loop.nextIndex == loop.maxIndex) {
|
145 |
-
workList = loop.next;
|
146 |
-
}
|
147 |
-
loop.activeWorkers++;
|
148 |
-
|
149 |
-
// Run loop indices in _[indexStart, indexEnd)_
|
150 |
-
lock.unlock();
|
151 |
-
for (int64_t index = indexStart; index < indexEnd; ++index) {
|
152 |
-
if (loop.func1D) {
|
153 |
-
loop.func1D(index);
|
154 |
-
}
|
155 |
-
// Handle other types of loops
|
156 |
-
else {
|
157 |
-
assert(loop.func2D != nullptr);
|
158 |
-
loop.func2D(Vector2i{int(index % loop.nX),
|
159 |
-
int(index / loop.nX)});
|
160 |
-
}
|
161 |
-
}
|
162 |
-
lock.lock();
|
163 |
-
|
164 |
-
// Update _loop_ to reflect completion of iterations
|
165 |
-
loop.activeWorkers--;
|
166 |
-
}
|
167 |
-
}
|
168 |
-
|
169 |
-
thread_local int ThreadIndex;
|
170 |
-
|
171 |
-
void parallel_for_host(
|
172 |
-
std::function<void(Vector2i)> func, const Vector2i count) {
|
173 |
-
// Launch worker threads if needed
|
174 |
-
if (threads.empty() || count.x * count.y <= 1) {
|
175 |
-
for (int y = 0; y < count.y; ++y) {
|
176 |
-
for (int x = 0; x < count.x; ++x) {
|
177 |
-
func(Vector2i{x, y});
|
178 |
-
}
|
179 |
-
}
|
180 |
-
return;
|
181 |
-
}
|
182 |
-
|
183 |
-
ParallelForLoop loop(std::move(func), count);
|
184 |
-
{
|
185 |
-
std::lock_guard<std::mutex> lock(workListMutex);
|
186 |
-
loop.next = workList;
|
187 |
-
workList = &loop;
|
188 |
-
}
|
189 |
-
|
190 |
-
std::unique_lock<std::mutex> lock(workListMutex);
|
191 |
-
workListCondition.notify_all();
|
192 |
-
|
193 |
-
// Help out with parallel loop iterations in the current thread
|
194 |
-
while (!loop.Finished()) {
|
195 |
-
// Run a chunk of loop iterations for _loop_
|
196 |
-
|
197 |
-
// Find the set of loop iterations to run next
|
198 |
-
int64_t indexStart = loop.nextIndex;
|
199 |
-
int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex);
|
200 |
-
|
201 |
-
// Update _loop_ to reflect iterations this thread will run
|
202 |
-
loop.nextIndex = indexEnd;
|
203 |
-
if (loop.nextIndex == loop.maxIndex) {
|
204 |
-
workList = loop.next;
|
205 |
-
}
|
206 |
-
loop.activeWorkers++;
|
207 |
-
|
208 |
-
// Run loop indices in _[indexStart, indexEnd)_
|
209 |
-
lock.unlock();
|
210 |
-
for (int64_t index = indexStart; index < indexEnd; ++index) {
|
211 |
-
if (loop.func1D) {
|
212 |
-
loop.func1D(index);
|
213 |
-
}
|
214 |
-
// Handle other types of loops
|
215 |
-
else {
|
216 |
-
assert(loop.func2D != nullptr);
|
217 |
-
loop.func2D(Vector2i{int(index % loop.nX),
|
218 |
-
int(index / loop.nX)});
|
219 |
-
}
|
220 |
-
}
|
221 |
-
lock.lock();
|
222 |
-
|
223 |
-
// Update _loop_ to reflect completion of iterations
|
224 |
-
loop.activeWorkers--;
|
225 |
-
}
|
226 |
-
}
|
227 |
-
|
228 |
-
int num_system_cores() {
|
229 |
-
// return 1;
|
230 |
-
int ret = std::thread::hardware_concurrency();
|
231 |
-
if (ret == 0) {
|
232 |
-
return 16;
|
233 |
-
}
|
234 |
-
return ret;
|
235 |
-
}
|
236 |
-
|
237 |
-
void parallel_init() {
|
238 |
-
assert(threads.size() == 0);
|
239 |
-
int nThreads = num_system_cores();
|
240 |
-
ThreadIndex = 0;
|
241 |
-
|
242 |
-
// Create a barrier so that we can be sure all worker threads get past
|
243 |
-
// their call to ProfilerWorkerThreadInit() before we return from this
|
244 |
-
// function. In turn, we can be sure that the profiling system isn't
|
245 |
-
// started until after all worker threads have done that.
|
246 |
-
std::shared_ptr<Barrier> barrier = std::make_shared<Barrier>(nThreads);
|
247 |
-
|
248 |
-
// Launch one fewer worker thread than the total number we want doing
|
249 |
-
// work, since the main thread helps out, too.
|
250 |
-
for (int i = 0; i < nThreads - 1; ++i) {
|
251 |
-
threads.push_back(std::thread(worker_thread_func, i + 1, barrier));
|
252 |
-
}
|
253 |
-
|
254 |
-
barrier->Wait();
|
255 |
-
}
|
256 |
-
|
257 |
-
void parallel_cleanup() {
|
258 |
-
if (threads.empty()) {
|
259 |
-
return;
|
260 |
-
}
|
261 |
-
|
262 |
-
{
|
263 |
-
std::lock_guard<std::mutex> lock(workListMutex);
|
264 |
-
shutdownThreads = true;
|
265 |
-
workListCondition.notify_all();
|
266 |
-
}
|
267 |
-
|
268 |
-
for (std::thread &thread : threads) {
|
269 |
-
thread.join();
|
270 |
-
}
|
271 |
-
threads.erase(threads.begin(), threads.end());
|
272 |
-
shutdownThreads = false;
|
273 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/solver/build.py
DELETED
@@ -1,252 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import copy
|
3 |
-
import itertools
|
4 |
-
import logging
|
5 |
-
from enum import Enum
|
6 |
-
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union
|
7 |
-
import torch
|
8 |
-
from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
|
9 |
-
|
10 |
-
from detectron2.config import CfgNode
|
11 |
-
|
12 |
-
from .lr_scheduler import LRMultiplier, WarmupParamScheduler
|
13 |
-
|
14 |
-
_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]
|
15 |
-
_GradientClipper = Callable[[_GradientClipperInput], None]
|
16 |
-
|
17 |
-
|
18 |
-
class GradientClipType(Enum):
|
19 |
-
VALUE = "value"
|
20 |
-
NORM = "norm"
|
21 |
-
|
22 |
-
|
23 |
-
def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
|
24 |
-
"""
|
25 |
-
Creates gradient clipping closure to clip by value or by norm,
|
26 |
-
according to the provided config.
|
27 |
-
"""
|
28 |
-
cfg = copy.deepcopy(cfg)
|
29 |
-
|
30 |
-
def clip_grad_norm(p: _GradientClipperInput):
|
31 |
-
torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
|
32 |
-
|
33 |
-
def clip_grad_value(p: _GradientClipperInput):
|
34 |
-
torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
|
35 |
-
|
36 |
-
_GRADIENT_CLIP_TYPE_TO_CLIPPER = {
|
37 |
-
GradientClipType.VALUE: clip_grad_value,
|
38 |
-
GradientClipType.NORM: clip_grad_norm,
|
39 |
-
}
|
40 |
-
return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]
|
41 |
-
|
42 |
-
|
43 |
-
def _generate_optimizer_class_with_gradient_clipping(
|
44 |
-
optimizer: Type[torch.optim.Optimizer],
|
45 |
-
*,
|
46 |
-
per_param_clipper: Optional[_GradientClipper] = None,
|
47 |
-
global_clipper: Optional[_GradientClipper] = None,
|
48 |
-
) -> Type[torch.optim.Optimizer]:
|
49 |
-
"""
|
50 |
-
Dynamically creates a new type that inherits the type of a given instance
|
51 |
-
and overrides the `step` method to add gradient clipping
|
52 |
-
"""
|
53 |
-
assert (
|
54 |
-
per_param_clipper is None or global_clipper is None
|
55 |
-
), "Not allowed to use both per-parameter clipping and global clipping"
|
56 |
-
|
57 |
-
def optimizer_wgc_step(self, closure=None):
|
58 |
-
if per_param_clipper is not None:
|
59 |
-
for group in self.param_groups:
|
60 |
-
for p in group["params"]:
|
61 |
-
per_param_clipper(p)
|
62 |
-
else:
|
63 |
-
# global clipper for future use with detr
|
64 |
-
# (https://github.com/facebookresearch/detr/pull/287)
|
65 |
-
all_params = itertools.chain(*[g["params"] for g in self.param_groups])
|
66 |
-
global_clipper(all_params)
|
67 |
-
super(type(self), self).step(closure)
|
68 |
-
|
69 |
-
OptimizerWithGradientClip = type(
|
70 |
-
optimizer.__name__ + "WithGradientClip",
|
71 |
-
(optimizer,),
|
72 |
-
{"step": optimizer_wgc_step},
|
73 |
-
)
|
74 |
-
return OptimizerWithGradientClip
|
75 |
-
|
76 |
-
|
77 |
-
def maybe_add_gradient_clipping(
|
78 |
-
cfg: CfgNode, optimizer: Type[torch.optim.Optimizer]
|
79 |
-
) -> Type[torch.optim.Optimizer]:
|
80 |
-
"""
|
81 |
-
If gradient clipping is enabled through config options, wraps the existing
|
82 |
-
optimizer type to become a new dynamically created class OptimizerWithGradientClip
|
83 |
-
that inherits the given optimizer and overrides the `step` method to
|
84 |
-
include gradient clipping.
|
85 |
-
|
86 |
-
Args:
|
87 |
-
cfg: CfgNode, configuration options
|
88 |
-
optimizer: type. A subclass of torch.optim.Optimizer
|
89 |
-
|
90 |
-
Return:
|
91 |
-
type: either the input `optimizer` (if gradient clipping is disabled), or
|
92 |
-
a subclass of it with gradient clipping included in the `step` method.
|
93 |
-
"""
|
94 |
-
if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
|
95 |
-
return optimizer
|
96 |
-
if isinstance(optimizer, torch.optim.Optimizer):
|
97 |
-
optimizer_type = type(optimizer)
|
98 |
-
else:
|
99 |
-
assert issubclass(optimizer, torch.optim.Optimizer), optimizer
|
100 |
-
optimizer_type = optimizer
|
101 |
-
|
102 |
-
grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS)
|
103 |
-
OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(
|
104 |
-
optimizer_type, per_param_clipper=grad_clipper
|
105 |
-
)
|
106 |
-
if isinstance(optimizer, torch.optim.Optimizer):
|
107 |
-
optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended
|
108 |
-
return optimizer
|
109 |
-
else:
|
110 |
-
return OptimizerWithGradientClip
|
111 |
-
|
112 |
-
|
113 |
-
def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
|
114 |
-
"""
|
115 |
-
Build an optimizer from config.
|
116 |
-
"""
|
117 |
-
params = get_default_optimizer_params(
|
118 |
-
model,
|
119 |
-
base_lr=cfg.SOLVER.BASE_LR,
|
120 |
-
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
|
121 |
-
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
|
122 |
-
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
|
123 |
-
)
|
124 |
-
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
|
125 |
-
params,
|
126 |
-
lr=cfg.SOLVER.BASE_LR,
|
127 |
-
momentum=cfg.SOLVER.MOMENTUM,
|
128 |
-
nesterov=cfg.SOLVER.NESTEROV,
|
129 |
-
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
|
130 |
-
)
|
131 |
-
|
132 |
-
|
133 |
-
def get_default_optimizer_params(
|
134 |
-
model: torch.nn.Module,
|
135 |
-
base_lr: Optional[float] = None,
|
136 |
-
weight_decay: Optional[float] = None,
|
137 |
-
weight_decay_norm: Optional[float] = None,
|
138 |
-
bias_lr_factor: Optional[float] = 1.0,
|
139 |
-
weight_decay_bias: Optional[float] = None,
|
140 |
-
overrides: Optional[Dict[str, Dict[str, float]]] = None,
|
141 |
-
):
|
142 |
-
"""
|
143 |
-
Get default param list for optimizer, with support for a few types of
|
144 |
-
overrides. If no overrides needed, this is equivalent to `model.parameters()`.
|
145 |
-
|
146 |
-
Args:
|
147 |
-
base_lr: lr for every group by default. Can be omitted to use the one in optimizer.
|
148 |
-
weight_decay: weight decay for every group by default. Can be omitted to use the one
|
149 |
-
in optimizer.
|
150 |
-
weight_decay_norm: override weight decay for params in normalization layers
|
151 |
-
bias_lr_factor: multiplier of lr for bias parameters.
|
152 |
-
weight_decay_bias: override weight decay for bias parameters
|
153 |
-
overrides: if not `None`, provides values for optimizer hyperparameters
|
154 |
-
(LR, weight decay) for module parameters with a given name; e.g.
|
155 |
-
``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and
|
156 |
-
weight decay values for all module parameters named `embedding`.
|
157 |
-
|
158 |
-
For common detection models, ``weight_decay_norm`` is the only option
|
159 |
-
needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings
|
160 |
-
from Detectron1 that are not found useful.
|
161 |
-
|
162 |
-
Example:
|
163 |
-
::
|
164 |
-
torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0),
|
165 |
-
lr=0.01, weight_decay=1e-4, momentum=0.9)
|
166 |
-
"""
|
167 |
-
if overrides is None:
|
168 |
-
overrides = {}
|
169 |
-
defaults = {}
|
170 |
-
if base_lr is not None:
|
171 |
-
defaults["lr"] = base_lr
|
172 |
-
if weight_decay is not None:
|
173 |
-
defaults["weight_decay"] = weight_decay
|
174 |
-
bias_overrides = {}
|
175 |
-
if bias_lr_factor is not None and bias_lr_factor != 1.0:
|
176 |
-
# NOTE: unlike Detectron v1, we now by default make bias hyperparameters
|
177 |
-
# exactly the same as regular weights.
|
178 |
-
if base_lr is None:
|
179 |
-
raise ValueError("bias_lr_factor requires base_lr")
|
180 |
-
bias_overrides["lr"] = base_lr * bias_lr_factor
|
181 |
-
if weight_decay_bias is not None:
|
182 |
-
bias_overrides["weight_decay"] = weight_decay_bias
|
183 |
-
if len(bias_overrides):
|
184 |
-
if "bias" in overrides:
|
185 |
-
raise ValueError("Conflicting overrides for 'bias'")
|
186 |
-
overrides["bias"] = bias_overrides
|
187 |
-
|
188 |
-
norm_module_types = (
|
189 |
-
torch.nn.BatchNorm1d,
|
190 |
-
torch.nn.BatchNorm2d,
|
191 |
-
torch.nn.BatchNorm3d,
|
192 |
-
torch.nn.SyncBatchNorm,
|
193 |
-
# NaiveSyncBatchNorm inherits from BatchNorm2d
|
194 |
-
torch.nn.GroupNorm,
|
195 |
-
torch.nn.InstanceNorm1d,
|
196 |
-
torch.nn.InstanceNorm2d,
|
197 |
-
torch.nn.InstanceNorm3d,
|
198 |
-
torch.nn.LayerNorm,
|
199 |
-
torch.nn.LocalResponseNorm,
|
200 |
-
)
|
201 |
-
params: List[Dict[str, Any]] = []
|
202 |
-
memo: Set[torch.nn.parameter.Parameter] = set()
|
203 |
-
for module in model.modules():
|
204 |
-
for module_param_name, value in module.named_parameters(recurse=False):
|
205 |
-
if not value.requires_grad:
|
206 |
-
continue
|
207 |
-
# Avoid duplicating parameters
|
208 |
-
if value in memo:
|
209 |
-
continue
|
210 |
-
memo.add(value)
|
211 |
-
|
212 |
-
hyperparams = copy.copy(defaults)
|
213 |
-
if isinstance(module, norm_module_types) and weight_decay_norm is not None:
|
214 |
-
hyperparams["weight_decay"] = weight_decay_norm
|
215 |
-
hyperparams.update(overrides.get(module_param_name, {}))
|
216 |
-
params.append({"params": [value], **hyperparams})
|
217 |
-
return params
|
218 |
-
|
219 |
-
|
220 |
-
def build_lr_scheduler(
|
221 |
-
cfg: CfgNode, optimizer: torch.optim.Optimizer
|
222 |
-
) -> torch.optim.lr_scheduler._LRScheduler:
|
223 |
-
"""
|
224 |
-
Build a LR scheduler from config.
|
225 |
-
"""
|
226 |
-
name = cfg.SOLVER.LR_SCHEDULER_NAME
|
227 |
-
|
228 |
-
if name == "WarmupMultiStepLR":
|
229 |
-
steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER]
|
230 |
-
if len(steps) != len(cfg.SOLVER.STEPS):
|
231 |
-
logger = logging.getLogger(__name__)
|
232 |
-
logger.warning(
|
233 |
-
"SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. "
|
234 |
-
"These values will be ignored."
|
235 |
-
)
|
236 |
-
sched = MultiStepParamScheduler(
|
237 |
-
values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)],
|
238 |
-
milestones=steps,
|
239 |
-
num_updates=cfg.SOLVER.MAX_ITER,
|
240 |
-
)
|
241 |
-
elif name == "WarmupCosineLR":
|
242 |
-
sched = CosineParamScheduler(1, 0)
|
243 |
-
else:
|
244 |
-
raise ValueError("Unknown LR scheduler: {}".format(name))
|
245 |
-
|
246 |
-
sched = WarmupParamScheduler(
|
247 |
-
sched,
|
248 |
-
cfg.SOLVER.WARMUP_FACTOR,
|
249 |
-
min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0),
|
250 |
-
cfg.SOLVER.WARMUP_METHOD,
|
251 |
-
)
|
252 |
-
return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CarlDennis/HYTTS/text/cleaners.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from text.japanese import japanese_to_romaji_with_accent
|
3 |
-
from text.mandarin import chinese_to_romaji
|
4 |
-
from text.english import english_to_ipa2
|
5 |
-
from text.german import german_to_ipa
|
6 |
-
from text.croatia_to_ipa import croatian_to_ipa
|
7 |
-
|
8 |
-
def cjehd_cleaners(text):
|
9 |
-
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
10 |
-
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
11 |
-
croatian_texts = re.findall(r'\[CR\].*?\[CR\]', text)
|
12 |
-
english_texts = re.findall(r'\[EN\].*?\[EN\]', text)
|
13 |
-
german_texts = re.findall(r'\[DE\].*?\[DE\]', text)
|
14 |
-
for chinese_text in chinese_texts:
|
15 |
-
cleaned_text = chinese_to_romaji(chinese_text[4:-4])
|
16 |
-
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
17 |
-
for japanese_text in japanese_texts:
|
18 |
-
cleaned_text = japanese_to_romaji_with_accent(
|
19 |
-
japanese_text[4:-4]).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')
|
20 |
-
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
21 |
-
for english_text in english_texts:
|
22 |
-
cleaned_text = english_to_ipa2(english_text[4:-4])
|
23 |
-
text = text.replace(english_text, cleaned_text+' ', 1)
|
24 |
-
for croatian_text in croatian_texts:
|
25 |
-
cleaned_text = croatian_to_ipa(croatian_text[4:-4])
|
26 |
-
cleaned_text = cleaned_text.replace('ḱ','k')
|
27 |
-
text = text.replace(croatian_text, cleaned_text + ' ', 1)
|
28 |
-
for german_text in german_texts:
|
29 |
-
german_text = german_text.replace('...','').replace('--','').replace('-','')
|
30 |
-
cleaned_text = german_to_ipa(german_text[4:-4])
|
31 |
-
text = text.replace(german_text, cleaned_text + ' ', 1)
|
32 |
-
text = text[:-1]
|
33 |
-
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
34 |
-
text += '.'
|
35 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chitranshu/Dashboard-Zomato/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Zomato-Dashboard
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: red
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cletrason/Cletrason-toad-in-the-mario-movie/trainer_pt_utils.py
DELETED
@@ -1,1106 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020-present the HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""
|
16 |
-
Torch utilities for the Trainer class.
|
17 |
-
"""
|
18 |
-
|
19 |
-
import datetime
|
20 |
-
import json
|
21 |
-
import math
|
22 |
-
import os
|
23 |
-
import sys
|
24 |
-
import warnings
|
25 |
-
from collections.abc import Mapping
|
26 |
-
from contextlib import contextmanager
|
27 |
-
from dataclasses import dataclass
|
28 |
-
from logging import StreamHandler
|
29 |
-
from typing import Any, Dict, Iterator, List, Optional, Union
|
30 |
-
|
31 |
-
import numpy as np
|
32 |
-
import torch
|
33 |
-
import torch.distributed as dist
|
34 |
-
from torch import nn
|
35 |
-
from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler
|
36 |
-
from torch.utils.data.distributed import DistributedSampler
|
37 |
-
|
38 |
-
from .tokenization_utils_base import BatchEncoding
|
39 |
-
from .utils import is_sagemaker_mp_enabled, is_torch_tpu_available, is_training_run_on_sagemaker, logging
|
40 |
-
|
41 |
-
|
42 |
-
if is_training_run_on_sagemaker():
|
43 |
-
logging.add_handler(StreamHandler(sys.stdout))
|
44 |
-
|
45 |
-
if is_torch_tpu_available(check_device=False):
|
46 |
-
import torch_xla.core.xla_model as xm
|
47 |
-
|
48 |
-
# this is used to suppress an undesired warning emitted by pytorch versions 1.4.2-1.7.0
|
49 |
-
try:
|
50 |
-
from torch.optim.lr_scheduler import SAVE_STATE_WARNING
|
51 |
-
except ImportError:
|
52 |
-
SAVE_STATE_WARNING = ""
|
53 |
-
|
54 |
-
logger = logging.get_logger(__name__)
|
55 |
-
|
56 |
-
|
57 |
-
def atleast_1d(tensor_or_array: Union[torch.Tensor, np.ndarray]):
|
58 |
-
if isinstance(tensor_or_array, torch.Tensor):
|
59 |
-
if hasattr(torch, "atleast_1d"):
|
60 |
-
tensor_or_array = torch.atleast_1d(tensor_or_array)
|
61 |
-
elif tensor_or_array.ndim < 1:
|
62 |
-
tensor_or_array = tensor_or_array[None]
|
63 |
-
else:
|
64 |
-
tensor_or_array = np.atleast_1d(tensor_or_array)
|
65 |
-
return tensor_or_array
|
66 |
-
|
67 |
-
|
68 |
-
def torch_pad_and_concatenate(tensor1, tensor2, padding_index=-100):
|
69 |
-
"""Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary."""
|
70 |
-
tensor1 = atleast_1d(tensor1)
|
71 |
-
tensor2 = atleast_1d(tensor2)
|
72 |
-
|
73 |
-
if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]:
|
74 |
-
return torch.cat((tensor1, tensor2), dim=0)
|
75 |
-
|
76 |
-
# Let's figure out the new shape
|
77 |
-
new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tensor1.shape[2:]
|
78 |
-
|
79 |
-
# Now let's fill the result tensor
|
80 |
-
result = tensor1.new_full(new_shape, padding_index)
|
81 |
-
result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1
|
82 |
-
result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2
|
83 |
-
return result
|
84 |
-
|
85 |
-
|
86 |
-
def numpy_pad_and_concatenate(array1, array2, padding_index=-100):
|
87 |
-
"""Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary."""
|
88 |
-
array1 = atleast_1d(array1)
|
89 |
-
array2 = atleast_1d(array2)
|
90 |
-
|
91 |
-
if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]:
|
92 |
-
return np.concatenate((array1, array2), axis=0)
|
93 |
-
|
94 |
-
# Let's figure out the new shape
|
95 |
-
new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:]
|
96 |
-
|
97 |
-
# Now let's fill the result tensor
|
98 |
-
result = np.full_like(array1, padding_index, shape=new_shape)
|
99 |
-
result[: array1.shape[0], : array1.shape[1]] = array1
|
100 |
-
result[array1.shape[0] :, : array2.shape[1]] = array2
|
101 |
-
return result
|
102 |
-
|
103 |
-
|
104 |
-
def nested_concat(tensors, new_tensors, padding_index=-100):
|
105 |
-
"""
|
106 |
-
Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or
|
107 |
-
nested list/tuples/dict of tensors.
|
108 |
-
"""
|
109 |
-
assert type(tensors) == type(
|
110 |
-
new_tensors
|
111 |
-
), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
|
112 |
-
if isinstance(tensors, (list, tuple)):
|
113 |
-
return type(tensors)(nested_concat(t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors))
|
114 |
-
elif isinstance(tensors, torch.Tensor):
|
115 |
-
return torch_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
|
116 |
-
elif isinstance(tensors, Mapping):
|
117 |
-
return type(tensors)(
|
118 |
-
{k: nested_concat(t, new_tensors[k], padding_index=padding_index) for k, t in tensors.items()}
|
119 |
-
)
|
120 |
-
elif isinstance(tensors, np.ndarray):
|
121 |
-
return numpy_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
|
122 |
-
else:
|
123 |
-
raise TypeError(f"Unsupported type for concatenation: got {type(tensors)}")
|
124 |
-
|
125 |
-
|
126 |
-
def find_batch_size(tensors):
|
127 |
-
"""
|
128 |
-
Find the first dimension of a tensor in a nested list/tuple/dict of tensors.
|
129 |
-
"""
|
130 |
-
if isinstance(tensors, (list, tuple)):
|
131 |
-
for t in tensors:
|
132 |
-
result = find_batch_size(t)
|
133 |
-
if result is not None:
|
134 |
-
return result
|
135 |
-
elif isinstance(tensors, Mapping):
|
136 |
-
for key, value in tensors.items():
|
137 |
-
result = find_batch_size(value)
|
138 |
-
if result is not None:
|
139 |
-
return result
|
140 |
-
elif isinstance(tensors, torch.Tensor):
|
141 |
-
return tensors.shape[0] if len(tensors.shape) >= 1 else None
|
142 |
-
elif isinstance(tensors, np.ndarray):
|
143 |
-
return tensors.shape[0] if len(tensors.shape) >= 1 else None
|
144 |
-
|
145 |
-
|
146 |
-
def nested_numpify(tensors):
|
147 |
-
"Numpify `tensors` (even if it's a nested list/tuple/dict of tensors)."
|
148 |
-
if isinstance(tensors, (list, tuple)):
|
149 |
-
return type(tensors)(nested_numpify(t) for t in tensors)
|
150 |
-
if isinstance(tensors, Mapping):
|
151 |
-
return type(tensors)({k: nested_numpify(t) for k, t in tensors.items()})
|
152 |
-
|
153 |
-
t = tensors.cpu()
|
154 |
-
if t.dtype == torch.bfloat16:
|
155 |
-
# As of Numpy 1.21.4, NumPy does not support bfloat16 (see
|
156 |
-
# https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).
|
157 |
-
# Until Numpy adds bfloat16, we must convert float32.
|
158 |
-
t = t.to(torch.float32)
|
159 |
-
return t.numpy()
|
160 |
-
|
161 |
-
|
162 |
-
def nested_detach(tensors):
|
163 |
-
"Detach `tensors` (even if it's a nested list/tuple/dict of tensors)."
|
164 |
-
if isinstance(tensors, (list, tuple)):
|
165 |
-
return type(tensors)(nested_detach(t) for t in tensors)
|
166 |
-
elif isinstance(tensors, Mapping):
|
167 |
-
return type(tensors)({k: nested_detach(t) for k, t in tensors.items()})
|
168 |
-
return tensors.detach()
|
169 |
-
|
170 |
-
|
171 |
-
def nested_xla_mesh_reduce(tensors, name):
|
172 |
-
if is_torch_tpu_available():
|
173 |
-
import torch_xla.core.xla_model as xm
|
174 |
-
|
175 |
-
if isinstance(tensors, (list, tuple)):
|
176 |
-
return type(tensors)(nested_xla_mesh_reduce(t, f"{name}_{i}") for i, t in enumerate(tensors))
|
177 |
-
if isinstance(tensors, Mapping):
|
178 |
-
return type(tensors)(
|
179 |
-
{k: nested_xla_mesh_reduce(t, f"{name}_{i}") for i, (k, t) in enumerate(tensors.items())}
|
180 |
-
)
|
181 |
-
|
182 |
-
tensors = atleast_1d(tensors)
|
183 |
-
return xm.mesh_reduce(name, tensors, torch.cat)
|
184 |
-
else:
|
185 |
-
raise ImportError("Torch xla must be installed to use `nested_xla_mesh_reduce`")
|
186 |
-
|
187 |
-
|
188 |
-
def distributed_concat(tensor: Any, num_total_examples: Optional[int] = None) -> Any:
|
189 |
-
try:
|
190 |
-
if isinstance(tensor, (tuple, list)):
|
191 |
-
return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor)
|
192 |
-
if isinstance(tensor, Mapping):
|
193 |
-
return type(tensor)({k: distributed_concat(t, num_total_examples) for k, t in tensor.items()})
|
194 |
-
tensor = atleast_1d(tensor).contiguous()
|
195 |
-
output_tensors = [tensor.clone() for _ in range(dist.get_world_size())]
|
196 |
-
dist.all_gather(output_tensors, tensor)
|
197 |
-
concat = torch.cat(output_tensors, dim=0)
|
198 |
-
|
199 |
-
# truncate the dummy elements added by SequentialDistributedSampler
|
200 |
-
if num_total_examples is not None:
|
201 |
-
concat = concat[:num_total_examples]
|
202 |
-
return concat
|
203 |
-
except AssertionError:
|
204 |
-
raise AssertionError("Not currently using distributed training")
|
205 |
-
|
206 |
-
|
207 |
-
def distributed_broadcast_scalars(
|
208 |
-
scalars: List[Union[int, float]],
|
209 |
-
num_total_examples: Optional[int] = None,
|
210 |
-
device: Optional[torch.device] = torch.device("cuda"),
|
211 |
-
) -> torch.Tensor:
|
212 |
-
try:
|
213 |
-
tensorized_scalar = torch.tensor(scalars).to(device)
|
214 |
-
output_tensors = [tensorized_scalar.clone() for _ in range(dist.get_world_size())]
|
215 |
-
dist.all_gather(output_tensors, tensorized_scalar)
|
216 |
-
concat = torch.cat(output_tensors, dim=0)
|
217 |
-
|
218 |
-
# truncate the dummy elements added by SequentialDistributedSampler
|
219 |
-
if num_total_examples is not None:
|
220 |
-
concat = concat[:num_total_examples]
|
221 |
-
return concat
|
222 |
-
except AssertionError:
|
223 |
-
raise AssertionError("Not currently using distributed training")
|
224 |
-
|
225 |
-
|
226 |
-
def reissue_pt_warnings(caught_warnings):
|
227 |
-
# Reissue warnings that are not the SAVE_STATE_WARNING
|
228 |
-
if len(caught_warnings) > 1:
|
229 |
-
for w in caught_warnings:
|
230 |
-
if w.category != UserWarning or w.message != SAVE_STATE_WARNING:
|
231 |
-
warnings.warn(w.message, w.category)
|
232 |
-
|
233 |
-
|
234 |
-
@contextmanager
|
235 |
-
def torch_distributed_zero_first(local_rank: int):
|
236 |
-
"""
|
237 |
-
Decorator to make all processes in distributed training wait for each local_master to do something.
|
238 |
-
|
239 |
-
Args:
|
240 |
-
local_rank (`int`): The rank of the local process.
|
241 |
-
"""
|
242 |
-
if local_rank not in [-1, 0]:
|
243 |
-
dist.barrier()
|
244 |
-
yield
|
245 |
-
if local_rank == 0:
|
246 |
-
dist.barrier()
|
247 |
-
|
248 |
-
|
249 |
-
class DistributedSamplerWithLoop(DistributedSampler):
|
250 |
-
"""
|
251 |
-
Like a torch.utils.data.distributed.DistributedSampler` but loops at the end back to the beginning of the shuffled
|
252 |
-
samples to make each process have a round multiple of batch_size samples.
|
253 |
-
|
254 |
-
Args:
|
255 |
-
dataset (`torch.utils.data.Dataset`):
|
256 |
-
Dataset used for sampling.
|
257 |
-
batch_size (`int`):
|
258 |
-
The batch size used with this sampler
|
259 |
-
kwargs:
|
260 |
-
All other keyword arguments passed to `DistributedSampler`.
|
261 |
-
"""
|
262 |
-
|
263 |
-
def __init__(self, dataset, batch_size, **kwargs):
|
264 |
-
super().__init__(dataset, **kwargs)
|
265 |
-
self.batch_size = batch_size
|
266 |
-
|
267 |
-
def __iter__(self):
|
268 |
-
indices = list(super().__iter__())
|
269 |
-
remainder = 0 if len(indices) % self.batch_size == 0 else self.batch_size - len(indices) % self.batch_size
|
270 |
-
# DistributedSampler already added samples from the beginning to make the number of samples a round multiple
|
271 |
-
# of the world size, so we skip those.
|
272 |
-
start_remainder = 1 if self.rank < len(self.dataset) % self.num_replicas else 0
|
273 |
-
indices += indices[start_remainder : start_remainder + remainder]
|
274 |
-
return iter(indices)
|
275 |
-
|
276 |
-
|
277 |
-
class SequentialDistributedSampler(Sampler):
|
278 |
-
"""
|
279 |
-
Distributed Sampler that subsamples indices sequentially, making it easier to collate all results at the end.
|
280 |
-
|
281 |
-
Even though we only use this sampler for eval and predict (no training), which means that the model params won't
|
282 |
-
have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add
|
283 |
-
extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather`
|
284 |
-
or `reduce` resulting tensors at the end of the loop.
|
285 |
-
"""
|
286 |
-
|
287 |
-
def __init__(self, dataset, num_replicas=None, rank=None, batch_size=None):
|
288 |
-
warnings.warn(
|
289 |
-
"SequentialDistributedSampler is deprecated and will be removed in v5 of Transformers.",
|
290 |
-
FutureWarning,
|
291 |
-
)
|
292 |
-
if num_replicas is None:
|
293 |
-
if not dist.is_available():
|
294 |
-
raise RuntimeError("Requires distributed package to be available")
|
295 |
-
num_replicas = dist.get_world_size()
|
296 |
-
if rank is None:
|
297 |
-
if not dist.is_available():
|
298 |
-
raise RuntimeError("Requires distributed package to be available")
|
299 |
-
rank = dist.get_rank()
|
300 |
-
self.dataset = dataset
|
301 |
-
self.num_replicas = num_replicas
|
302 |
-
self.rank = rank
|
303 |
-
num_samples = len(self.dataset)
|
304 |
-
# Add extra samples to make num_samples a multiple of batch_size if passed
|
305 |
-
if batch_size is not None:
|
306 |
-
self.num_samples = int(math.ceil(num_samples / (batch_size * num_replicas))) * batch_size
|
307 |
-
else:
|
308 |
-
self.num_samples = int(math.ceil(num_samples / num_replicas))
|
309 |
-
self.total_size = self.num_samples * self.num_replicas
|
310 |
-
self.batch_size = batch_size
|
311 |
-
|
312 |
-
def __iter__(self):
|
313 |
-
indices = list(range(len(self.dataset)))
|
314 |
-
|
315 |
-
# add extra samples to make it evenly divisible
|
316 |
-
indices += indices[: (self.total_size - len(indices))]
|
317 |
-
assert (
|
318 |
-
len(indices) == self.total_size
|
319 |
-
), f"Indices length {len(indices)} and total size {self.total_size} mismatched"
|
320 |
-
|
321 |
-
# subsample
|
322 |
-
indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]
|
323 |
-
assert (
|
324 |
-
len(indices) == self.num_samples
|
325 |
-
), f"Indices length {len(indices)} and sample number {self.num_samples} mismatched"
|
326 |
-
|
327 |
-
return iter(indices)
|
328 |
-
|
329 |
-
def __len__(self):
|
330 |
-
return self.num_samples
|
331 |
-
|
332 |
-
|
333 |
-
def get_tpu_sampler(dataset: torch.utils.data.Dataset, batch_size: int):
|
334 |
-
if xm.xrt_world_size() <= 1:
|
335 |
-
return RandomSampler(dataset)
|
336 |
-
return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
|
337 |
-
|
338 |
-
|
339 |
-
def nested_new_like(arrays, num_samples, padding_index=-100):
|
340 |
-
"""Create the same nested structure as `arrays` with a first dimension always at `num_samples`."""
|
341 |
-
if isinstance(arrays, (list, tuple)):
|
342 |
-
return type(arrays)(nested_new_like(x, num_samples) for x in arrays)
|
343 |
-
return np.full_like(arrays, padding_index, shape=(num_samples, *arrays.shape[1:]))
|
344 |
-
|
345 |
-
|
346 |
-
def expand_like(arrays, new_seq_length, padding_index=-100):
|
347 |
-
"""Expand the `arrays` so that the second dimension grows to `new_seq_length`. Uses `padding_index` for padding."""
|
348 |
-
result = np.full_like(arrays, padding_index, shape=(arrays.shape[0], new_seq_length) + arrays.shape[2:])
|
349 |
-
result[:, : arrays.shape[1]] = arrays
|
350 |
-
return result
|
351 |
-
|
352 |
-
|
353 |
-
def nested_truncate(tensors, limit):
|
354 |
-
"Truncate `tensors` at `limit` (even if it's a nested list/tuple/dict of tensors)."
|
355 |
-
if isinstance(tensors, (list, tuple)):
|
356 |
-
return type(tensors)(nested_truncate(t, limit) for t in tensors)
|
357 |
-
if isinstance(tensors, Mapping):
|
358 |
-
return type(tensors)({k: nested_truncate(t, limit) for k, t in tensors.items()})
|
359 |
-
|
360 |
-
return tensors[:limit]
|
361 |
-
|
362 |
-
|
363 |
-
class DistributedTensorGatherer:
|
364 |
-
"""
|
365 |
-
A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks.
|
366 |
-
|
367 |
-
If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every
|
368 |
-
step, our sampler will generate the following indices:
|
369 |
-
|
370 |
-
`[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]`
|
371 |
-
|
372 |
-
to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and
|
373 |
-
2 will be responsible of making predictions for the following samples:
|
374 |
-
|
375 |
-
- P0: `[0, 1, 2, 3, 4, 5]`
|
376 |
-
- P1: `[6, 7, 8, 9, 10, 11]`
|
377 |
-
- P2: `[12, 13, 14, 15, 0, 1]`
|
378 |
-
|
379 |
-
The first batch treated on each process will be
|
380 |
-
|
381 |
-
- P0: `[0, 1]`
|
382 |
-
- P1: `[6, 7]`
|
383 |
-
- P2: `[12, 13]`
|
384 |
-
|
385 |
-
So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to
|
386 |
-
the following indices:
|
387 |
-
|
388 |
-
`[0, 1, 6, 7, 12, 13]`
|
389 |
-
|
390 |
-
If we directly concatenate our results without taking any precautions, the user will then get the predictions for
|
391 |
-
the indices in this order at the end of the prediction loop:
|
392 |
-
|
393 |
-
`[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]`
|
394 |
-
|
395 |
-
For some reason, that's not going to roll their boat. This class is there to solve that problem.
|
396 |
-
|
397 |
-
Args:
|
398 |
-
world_size (`int`):
|
399 |
-
The number of processes used in the distributed training.
|
400 |
-
num_samples (`int`):
|
401 |
-
The number of samples in our dataset.
|
402 |
-
make_multiple_of (`int`, *optional*):
|
403 |
-
If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument
|
404 |
-
(by adding samples).
|
405 |
-
padding_index (`int`, *optional*, defaults to -100):
|
406 |
-
The padding index to use if the arrays don't all have the same sequence length.
|
407 |
-
"""
|
408 |
-
|
409 |
-
def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100):
|
410 |
-
warnings.warn(
|
411 |
-
"DistributedTensorGatherer is deprecated and will be removed in v5 of Transformers.",
|
412 |
-
FutureWarning,
|
413 |
-
)
|
414 |
-
self.world_size = world_size
|
415 |
-
self.num_samples = num_samples
|
416 |
-
total_size = world_size if make_multiple_of is None else world_size * make_multiple_of
|
417 |
-
self.total_samples = int(np.ceil(num_samples / total_size)) * total_size
|
418 |
-
self.process_length = self.total_samples // world_size
|
419 |
-
self._storage = None
|
420 |
-
self._offsets = None
|
421 |
-
self.padding_index = padding_index
|
422 |
-
|
423 |
-
def add_arrays(self, arrays):
|
424 |
-
"""
|
425 |
-
Add `arrays` to the internal storage, Will initialize the storage to the full size at the first arrays passed
|
426 |
-
so that if we're bound to get an OOM, it happens at the beginning.
|
427 |
-
"""
|
428 |
-
if arrays is None:
|
429 |
-
return
|
430 |
-
if self._storage is None:
|
431 |
-
self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index)
|
432 |
-
self._offsets = list(range(0, self.total_samples, self.process_length))
|
433 |
-
|
434 |
-
slice_len, self._storage = self._nested_set_tensors(self._storage, arrays)
|
435 |
-
for i in range(self.world_size):
|
436 |
-
self._offsets[i] += slice_len
|
437 |
-
|
438 |
-
def _nested_set_tensors(self, storage, arrays):
|
439 |
-
if isinstance(arrays, (list, tuple)):
|
440 |
-
result = [self._nested_set_tensors(x, y) for x, y in zip(storage, arrays)]
|
441 |
-
return result[0][0], type(arrays)(r[1] for r in result)
|
442 |
-
assert (
|
443 |
-
arrays.shape[0] % self.world_size == 0
|
444 |
-
), f"Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}."
|
445 |
-
|
446 |
-
slice_len = arrays.shape[0] // self.world_size
|
447 |
-
for i in range(self.world_size):
|
448 |
-
if len(arrays.shape) == 1:
|
449 |
-
storage[self._offsets[i] : self._offsets[i] + slice_len] = arrays[i * slice_len : (i + 1) * slice_len]
|
450 |
-
else:
|
451 |
-
# Expand the array on the fly if needed.
|
452 |
-
if len(storage.shape) > 1 and storage.shape[1] < arrays.shape[1]:
|
453 |
-
storage = expand_like(storage, arrays.shape[1], padding_index=self.padding_index)
|
454 |
-
storage[self._offsets[i] : self._offsets[i] + slice_len, : arrays.shape[1]] = arrays[
|
455 |
-
i * slice_len : (i + 1) * slice_len
|
456 |
-
]
|
457 |
-
return slice_len, storage
|
458 |
-
|
459 |
-
def finalize(self):
|
460 |
-
"""
|
461 |
-
Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras
|
462 |
-
to get each process a dataset of the same length).
|
463 |
-
"""
|
464 |
-
if self._storage is None:
|
465 |
-
return
|
466 |
-
if self._offsets[0] != self.process_length:
|
467 |
-
logger.warning("Not all data has been set. Are you sure you passed all values?")
|
468 |
-
return nested_truncate(self._storage, self.num_samples)
|
469 |
-
|
470 |
-
|
471 |
-
@dataclass
|
472 |
-
class LabelSmoother:
|
473 |
-
"""
|
474 |
-
Adds label-smoothing on a pre-computed output from a Transformers model.
|
475 |
-
|
476 |
-
Args:
|
477 |
-
epsilon (`float`, *optional*, defaults to 0.1):
|
478 |
-
The label smoothing factor.
|
479 |
-
ignore_index (`int`, *optional*, defaults to -100):
|
480 |
-
The index in the labels to ignore when computing the loss.
|
481 |
-
"""
|
482 |
-
|
483 |
-
epsilon: float = 0.1
|
484 |
-
ignore_index: int = -100
|
485 |
-
|
486 |
-
def __call__(self, model_output, labels, shift_labels=False):
|
487 |
-
logits = model_output["logits"] if isinstance(model_output, dict) else model_output[0]
|
488 |
-
if shift_labels:
|
489 |
-
logits = logits[..., :-1, :].contiguous()
|
490 |
-
labels = labels[..., 1:].contiguous()
|
491 |
-
|
492 |
-
log_probs = -nn.functional.log_softmax(logits, dim=-1)
|
493 |
-
if labels.dim() == log_probs.dim() - 1:
|
494 |
-
labels = labels.unsqueeze(-1)
|
495 |
-
|
496 |
-
padding_mask = labels.eq(self.ignore_index)
|
497 |
-
# In case the ignore_index is -100, the gather will fail, so we replace labels by 0. The padding_mask
|
498 |
-
# will ignore them in any case.
|
499 |
-
labels = torch.clamp(labels, min=0)
|
500 |
-
nll_loss = log_probs.gather(dim=-1, index=labels)
|
501 |
-
# works for fp16 input tensor too, by internally upcasting it to fp32
|
502 |
-
smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32)
|
503 |
-
|
504 |
-
nll_loss.masked_fill_(padding_mask, 0.0)
|
505 |
-
smoothed_loss.masked_fill_(padding_mask, 0.0)
|
506 |
-
|
507 |
-
# Take the mean over the label dimensions, then divide by the number of active elements (i.e. not-padded):
|
508 |
-
num_active_elements = padding_mask.numel() - padding_mask.long().sum()
|
509 |
-
nll_loss = nll_loss.sum() / num_active_elements
|
510 |
-
smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1])
|
511 |
-
return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss
|
512 |
-
|
513 |
-
|
514 |
-
def get_length_grouped_indices(lengths, batch_size, mega_batch_mult=None, generator=None):
|
515 |
-
"""
|
516 |
-
Return a list of indices so that each slice of `batch_size` consecutive indices correspond to elements of similar
|
517 |
-
lengths. To do this, the indices are:
|
518 |
-
|
519 |
-
- randomly permuted
|
520 |
-
- grouped in mega-batches of size `mega_batch_mult * batch_size`
|
521 |
-
- sorted by length in each mega-batch
|
522 |
-
|
523 |
-
The result is the concatenation of all mega-batches, with the batch of `batch_size` containing the element of
|
524 |
-
maximum length placed first, so that an OOM happens sooner rather than later.
|
525 |
-
"""
|
526 |
-
# Default for mega_batch_mult: 50 or the number to get 4 megabatches, whichever is smaller.
|
527 |
-
if mega_batch_mult is None:
|
528 |
-
mega_batch_mult = min(len(lengths) // (batch_size * 4), 50)
|
529 |
-
# Just in case, for tiny datasets
|
530 |
-
if mega_batch_mult == 0:
|
531 |
-
mega_batch_mult = 1
|
532 |
-
|
533 |
-
# We need to use torch for the random part as a distributed sampler will set the random seed for torch.
|
534 |
-
indices = torch.randperm(len(lengths), generator=generator)
|
535 |
-
megabatch_size = mega_batch_mult * batch_size
|
536 |
-
megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)]
|
537 |
-
megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches]
|
538 |
-
|
539 |
-
# The rest is to get the biggest batch first.
|
540 |
-
# Since each megabatch is sorted by descending length, the longest element is the first
|
541 |
-
megabatch_maximums = [lengths[megabatch[0]] for megabatch in megabatches]
|
542 |
-
max_idx = torch.argmax(torch.tensor(megabatch_maximums)).item()
|
543 |
-
# Switch to put the longest element in first position
|
544 |
-
megabatches[0][0], megabatches[max_idx][0] = megabatches[max_idx][0], megabatches[0][0]
|
545 |
-
|
546 |
-
return [i for megabatch in megabatches for i in megabatch]
|
547 |
-
|
548 |
-
|
549 |
-
class LengthGroupedSampler(Sampler):
|
550 |
-
r"""
|
551 |
-
Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
|
552 |
-
keeping a bit of randomness.
|
553 |
-
"""
|
554 |
-
|
555 |
-
def __init__(
|
556 |
-
self,
|
557 |
-
batch_size: int,
|
558 |
-
dataset: Optional[Dataset] = None,
|
559 |
-
lengths: Optional[List[int]] = None,
|
560 |
-
model_input_name: Optional[str] = None,
|
561 |
-
generator=None,
|
562 |
-
):
|
563 |
-
if dataset is None and lengths is None:
|
564 |
-
raise ValueError("One of dataset and lengths must be provided.")
|
565 |
-
|
566 |
-
self.batch_size = batch_size
|
567 |
-
if lengths is None:
|
568 |
-
model_input_name = model_input_name if model_input_name is not None else "input_ids"
|
569 |
-
if (
|
570 |
-
not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding))
|
571 |
-
or model_input_name not in dataset[0]
|
572 |
-
):
|
573 |
-
raise ValueError(
|
574 |
-
"Can only automatically infer lengths for datasets whose items are dictionaries with an "
|
575 |
-
f"'{model_input_name}' key."
|
576 |
-
)
|
577 |
-
lengths = [len(feature[model_input_name]) for feature in dataset]
|
578 |
-
elif isinstance(lengths, torch.Tensor):
|
579 |
-
logger.info(
|
580 |
-
"If lengths is a torch.Tensor, LengthGroupedSampler will be slow. Converting lengths to List[int]..."
|
581 |
-
)
|
582 |
-
lengths = lengths.tolist()
|
583 |
-
|
584 |
-
self.lengths = lengths
|
585 |
-
self.generator = generator
|
586 |
-
|
587 |
-
def __len__(self):
|
588 |
-
return len(self.lengths)
|
589 |
-
|
590 |
-
def __iter__(self):
|
591 |
-
indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=self.generator)
|
592 |
-
return iter(indices)
|
593 |
-
|
594 |
-
|
595 |
-
class DistributedLengthGroupedSampler(DistributedSampler):
|
596 |
-
r"""
|
597 |
-
Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same
|
598 |
-
length while keeping a bit of randomness.
|
599 |
-
"""
|
600 |
-
|
601 |
-
# Copied and adapted from PyTorch DistributedSampler.
|
602 |
-
def __init__(
|
603 |
-
self,
|
604 |
-
batch_size: int,
|
605 |
-
dataset: Optional[Dataset] = None,
|
606 |
-
num_replicas: Optional[int] = None,
|
607 |
-
rank: Optional[int] = None,
|
608 |
-
seed: int = 0,
|
609 |
-
drop_last: bool = False,
|
610 |
-
lengths: Optional[List[int]] = None,
|
611 |
-
model_input_name: Optional[str] = None,
|
612 |
-
):
|
613 |
-
if dataset is None and lengths is None:
|
614 |
-
raise ValueError("One of dataset and lengths must be provided.")
|
615 |
-
if num_replicas is None:
|
616 |
-
if not dist.is_available():
|
617 |
-
raise RuntimeError("Requires distributed package to be available")
|
618 |
-
num_replicas = dist.get_world_size()
|
619 |
-
if rank is None:
|
620 |
-
if not dist.is_available():
|
621 |
-
raise RuntimeError("Requires distributed package to be available")
|
622 |
-
rank = dist.get_rank()
|
623 |
-
|
624 |
-
self.batch_size = batch_size
|
625 |
-
self.num_replicas = num_replicas
|
626 |
-
self.rank = rank
|
627 |
-
self.epoch = 0
|
628 |
-
self.drop_last = drop_last
|
629 |
-
|
630 |
-
if lengths is None:
|
631 |
-
model_input_name = model_input_name if model_input_name is not None else "input_ids"
|
632 |
-
if (
|
633 |
-
not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding))
|
634 |
-
or model_input_name not in dataset[0]
|
635 |
-
):
|
636 |
-
raise ValueError(
|
637 |
-
"Can only automatically infer lengths for datasets whose items are dictionaries with an "
|
638 |
-
f"'{model_input_name}' key."
|
639 |
-
)
|
640 |
-
lengths = [len(feature[model_input_name]) for feature in dataset]
|
641 |
-
elif isinstance(lengths, torch.Tensor):
|
642 |
-
logger.info(
|
643 |
-
"If lengths is a torch.Tensor, DistributedLengthGroupedSampler will be slow. Converting lengths to"
|
644 |
-
" List[int]..."
|
645 |
-
)
|
646 |
-
lengths = lengths.tolist()
|
647 |
-
|
648 |
-
self.lengths = lengths
|
649 |
-
|
650 |
-
# If the dataset length is evenly divisible by # of replicas, then there
|
651 |
-
# is no need to drop any data, since the dataset will be split equally.
|
652 |
-
if self.drop_last and len(self.lengths) % self.num_replicas != 0:
|
653 |
-
# Split to nearest available length that is evenly divisible.
|
654 |
-
# This is to ensure each rank receives the same amount of data when
|
655 |
-
# using this Sampler.
|
656 |
-
self.num_samples = math.ceil((len(self.lengths) - self.num_replicas) / self.num_replicas)
|
657 |
-
else:
|
658 |
-
self.num_samples = math.ceil(len(self.lengths) / self.num_replicas)
|
659 |
-
self.total_size = self.num_samples * self.num_replicas
|
660 |
-
self.seed = seed
|
661 |
-
|
662 |
-
def __iter__(self) -> Iterator:
|
663 |
-
# Deterministically shuffle based on epoch and seed
|
664 |
-
g = torch.Generator()
|
665 |
-
g.manual_seed(self.seed + self.epoch)
|
666 |
-
indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g)
|
667 |
-
|
668 |
-
if not self.drop_last:
|
669 |
-
# add extra samples to make it evenly divisible
|
670 |
-
indices += indices[: (self.total_size - len(indices))]
|
671 |
-
else:
|
672 |
-
# remove tail of data to make it evenly divisible.
|
673 |
-
indices = indices[: self.total_size]
|
674 |
-
assert len(indices) == self.total_size
|
675 |
-
|
676 |
-
# subsample
|
677 |
-
indices = indices[self.rank : self.total_size : self.num_replicas]
|
678 |
-
assert len(indices) == self.num_samples
|
679 |
-
|
680 |
-
return iter(indices)
|
681 |
-
|
682 |
-
|
683 |
-
class ShardSampler(Sampler):
|
684 |
-
"""
|
685 |
-
Sampler that shards batches between several processes. Dispatches indices batch by batch: on 2 processes with batch
|
686 |
-
size 4, the first two batches are `[0, 1, 2, 3, 4, 5, 6, 7]` and `[8, 9, 10, 11, 12, 13, 14, 15]`, which shard into
|
687 |
-
`[0, 1, 2, 3]` and `[8, 9, 10, 11]` for GPU-0 and `[4, 5, 6, 7]` and `[12, 13, 14, 15]` for GPU-1.
|
688 |
-
|
689 |
-
The sampler thus yields `[0, 1, 2, 3, 8, 9, 10, 11]` on GPU-0 and `[4, 5, 6, 7, 12, 13, 14, 15]` on GPU-1.
|
690 |
-
"""
|
691 |
-
|
692 |
-
def __init__(
|
693 |
-
self,
|
694 |
-
dataset: Dataset,
|
695 |
-
batch_size: int = 1,
|
696 |
-
drop_last: bool = False,
|
697 |
-
num_processes: int = 1,
|
698 |
-
process_index: int = 0,
|
699 |
-
):
|
700 |
-
self.dataset = dataset
|
701 |
-
self.batch_size = batch_size
|
702 |
-
self.drop_last = drop_last
|
703 |
-
self.num_processes = num_processes
|
704 |
-
self.process_index = process_index
|
705 |
-
|
706 |
-
self.total_batch_size = total_batch_size = batch_size * num_processes
|
707 |
-
|
708 |
-
num_batches = len(dataset) // total_batch_size if drop_last else math.ceil(len(dataset) / total_batch_size)
|
709 |
-
self.total_num_samples = num_batches * total_batch_size
|
710 |
-
|
711 |
-
def __iter__(self):
|
712 |
-
indices = list(range(len(self.dataset)))
|
713 |
-
|
714 |
-
# Add extra samples to make it evenly divisible. While loop is there in the edge case we have a tiny dataset
|
715 |
-
# and it needs to be done several times.
|
716 |
-
while len(indices) < self.total_num_samples:
|
717 |
-
indices += indices[: (self.total_num_samples - len(indices))]
|
718 |
-
|
719 |
-
result = []
|
720 |
-
for batch_start in range(self.batch_size * self.process_index, self.total_num_samples, self.total_batch_size):
|
721 |
-
result += indices[batch_start : batch_start + self.batch_size]
|
722 |
-
|
723 |
-
return iter(result)
|
724 |
-
|
725 |
-
def __len__(self):
|
726 |
-
# Each shard only sees a fraction of total_num_samples.
|
727 |
-
return self.total_num_samples // self.num_processes
|
728 |
-
|
729 |
-
|
730 |
-
class IterableDatasetShard(IterableDataset):
|
731 |
-
"""
|
732 |
-
Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
|
733 |
-
always yield a number of samples that is a round multiple of the actual batch size (which is `batch_size x
|
734 |
-
num_processes`). Depending on the value of the `drop_last` attribute, it will either stop the iteration at the
|
735 |
-
first batch that would be too small or loop with indices from the beginning.
|
736 |
-
|
737 |
-
On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]` with a batch size of
|
738 |
-
2:
|
739 |
-
|
740 |
-
- the shard on process 0 will yield `[0, 1, 4, 5, 8, 9]` so will see batches `[0, 1]`, `[4, 5]`, `[8, 9]`
|
741 |
-
- the shard on process 1 will yield `[2, 3, 6, 7, 10, 11]` so will see batches `[2, 3]`, `[6, 7]`, `[10, 11]`
|
742 |
-
|
743 |
-
<Tip warning={true}>
|
744 |
-
|
745 |
-
If your IterableDataset implements some randomization that needs to be applied the same way on all processes
|
746 |
-
(for instance, a shuffling), you should use a `torch.Generator` in a `generator` attribute of the `dataset` to
|
747 |
-
generate your random numbers and call the [`~trainer_pt_utils.IterableDatasetShard.set_epoch`] method of this
|
748 |
-
object. It will set the seed of this `generator` to `seed + epoch` on all processes before starting the
|
749 |
-
iteration. Alternatively, you can also implement a `set_epoch()` method in your iterable dataset to deal with
|
750 |
-
this.
|
751 |
-
|
752 |
-
</Tip>
|
753 |
-
|
754 |
-
Args:
|
755 |
-
dataset (`torch.utils.data.IterableDataset`):
|
756 |
-
The batch sampler to split in several shards.
|
757 |
-
batch_size (`int`, *optional*, defaults to 1):
|
758 |
-
The size of the batches per shard.
|
759 |
-
drop_last (`bool`, *optional*, defaults to `False`):
|
760 |
-
Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
|
761 |
-
beginning.
|
762 |
-
num_processes (`int`, *optional*, defaults to 1):
|
763 |
-
The number of processes running concurrently.
|
764 |
-
process_index (`int`, *optional*, defaults to 0):
|
765 |
-
The index of the current process.
|
766 |
-
seed (`int`, *optional*, defaults to 0):
|
767 |
-
A random seed that will be used for the random number generation in
|
768 |
-
[`~trainer_pt_utils.IterableDatasetShard.set_epoch`].
|
769 |
-
"""
|
770 |
-
|
771 |
-
def __init__(
|
772 |
-
self,
|
773 |
-
dataset: IterableDataset,
|
774 |
-
batch_size: int = 1,
|
775 |
-
drop_last: bool = False,
|
776 |
-
num_processes: int = 1,
|
777 |
-
process_index: int = 0,
|
778 |
-
seed: int = 0,
|
779 |
-
):
|
780 |
-
self.dataset = dataset
|
781 |
-
self.batch_size = batch_size
|
782 |
-
self.drop_last = drop_last
|
783 |
-
self.num_processes = num_processes
|
784 |
-
self.process_index = process_index
|
785 |
-
self.seed = seed
|
786 |
-
self.epoch = 0
|
787 |
-
self.num_examples = 0
|
788 |
-
|
789 |
-
def set_epoch(self, epoch):
|
790 |
-
self.epoch = epoch
|
791 |
-
if hasattr(self.dataset, "set_epoch"):
|
792 |
-
self.dataset.set_epoch(epoch)
|
793 |
-
|
794 |
-
def __iter__(self):
|
795 |
-
self.num_examples = 0
|
796 |
-
if (
|
797 |
-
not hasattr(self.dataset, "set_epoch")
|
798 |
-
and hasattr(self.dataset, "generator")
|
799 |
-
and isinstance(self.dataset.generator, torch.Generator)
|
800 |
-
):
|
801 |
-
self.dataset.generator.manual_seed(self.seed + self.epoch)
|
802 |
-
real_batch_size = self.batch_size * self.num_processes
|
803 |
-
process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size)
|
804 |
-
|
805 |
-
first_batch = None
|
806 |
-
current_batch = []
|
807 |
-
for element in self.dataset:
|
808 |
-
self.num_examples += 1
|
809 |
-
current_batch.append(element)
|
810 |
-
# Wait to have a full batch before yielding elements.
|
811 |
-
if len(current_batch) == real_batch_size:
|
812 |
-
for i in process_slice:
|
813 |
-
yield current_batch[i]
|
814 |
-
if first_batch is None:
|
815 |
-
first_batch = current_batch.copy()
|
816 |
-
current_batch = []
|
817 |
-
|
818 |
-
# Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
|
819 |
-
if not self.drop_last and len(current_batch) > 0:
|
820 |
-
if first_batch is None:
|
821 |
-
first_batch = current_batch.copy()
|
822 |
-
while len(current_batch) < real_batch_size:
|
823 |
-
current_batch += first_batch
|
824 |
-
for i in process_slice:
|
825 |
-
yield current_batch[i]
|
826 |
-
|
827 |
-
def __len__(self):
|
828 |
-
# Will raise an error if the underlying dataset is not sized.
|
829 |
-
if self.drop_last:
|
830 |
-
return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
|
831 |
-
else:
|
832 |
-
return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
|
833 |
-
|
834 |
-
|
835 |
-
# In order to keep `trainer.py` compact and easy to understand, place any secondary PT Trainer
|
836 |
-
# helper methods here
|
837 |
-
|
838 |
-
|
839 |
-
def _get_learning_rate(self):
|
840 |
-
if self.deepspeed:
|
841 |
-
# with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
|
842 |
-
# not run for the first few dozen steps while loss scale is too large, and thus during
|
843 |
-
# that time `get_last_lr` will fail if called during that warm up stage, so work around it:
|
844 |
-
try:
|
845 |
-
last_lr = self.lr_scheduler.get_last_lr()[0]
|
846 |
-
except AssertionError as e:
|
847 |
-
if "need to call step" in str(e):
|
848 |
-
logger.warning("tried to get lr value before scheduler/optimizer started stepping, returning lr=0")
|
849 |
-
last_lr = 0
|
850 |
-
else:
|
851 |
-
raise
|
852 |
-
else:
|
853 |
-
last_lr = self.lr_scheduler.get_last_lr()[0]
|
854 |
-
if torch.is_tensor(last_lr):
|
855 |
-
last_lr = last_lr.item()
|
856 |
-
return last_lr
|
857 |
-
|
858 |
-
|
859 |
-
def _secs2timedelta(secs):
|
860 |
-
"""
|
861 |
-
convert seconds to hh:mm:ss.msec, msecs rounded to 2 decimals
|
862 |
-
"""
|
863 |
-
|
864 |
-
msec = int(abs(secs - int(secs)) * 100)
|
865 |
-
return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}"
|
866 |
-
|
867 |
-
|
868 |
-
def metrics_format(self, metrics: Dict[str, float]) -> Dict[str, float]:
|
869 |
-
"""
|
870 |
-
Reformat Trainer metrics values to a human-readable format
|
871 |
-
|
872 |
-
Args:
|
873 |
-
metrics (`Dict[str, float]`):
|
874 |
-
The metrics returned from train/evaluate/predict
|
875 |
-
|
876 |
-
Returns:
|
877 |
-
metrics (`Dict[str, float]`): The reformatted metrics
|
878 |
-
"""
|
879 |
-
|
880 |
-
metrics_copy = metrics.copy()
|
881 |
-
for k, v in metrics_copy.items():
|
882 |
-
if "_mem_" in k:
|
883 |
-
metrics_copy[k] = f"{ v >> 20 }MB"
|
884 |
-
elif "_runtime" in k:
|
885 |
-
metrics_copy[k] = _secs2timedelta(v)
|
886 |
-
elif k == "total_flos":
|
887 |
-
metrics_copy[k] = f"{ int(v) >> 30 }GF"
|
888 |
-
elif type(metrics_copy[k]) == float:
|
889 |
-
metrics_copy[k] = round(v, 4)
|
890 |
-
|
891 |
-
return metrics_copy
|
892 |
-
|
893 |
-
|
894 |
-
def log_metrics(self, split, metrics):
|
895 |
-
"""
|
896 |
-
Log metrics in a specially formatted way
|
897 |
-
|
898 |
-
Under distributed environment this is done only for a process with rank 0.
|
899 |
-
|
900 |
-
Args:
|
901 |
-
split (`str`):
|
902 |
-
Mode/split name: one of `train`, `eval`, `test`
|
903 |
-
metrics (`Dict[str, float]`):
|
904 |
-
The metrics returned from train/evaluate/predictmetrics: metrics dict
|
905 |
-
|
906 |
-
Notes on memory reports:
|
907 |
-
|
908 |
-
In order to get memory usage report you need to install `psutil`. You can do that with `pip install psutil`.
|
909 |
-
|
910 |
-
Now when this method is run, you will see a report that will include: :
|
911 |
-
|
912 |
-
```
|
913 |
-
init_mem_cpu_alloc_delta = 1301MB
|
914 |
-
init_mem_cpu_peaked_delta = 154MB
|
915 |
-
init_mem_gpu_alloc_delta = 230MB
|
916 |
-
init_mem_gpu_peaked_delta = 0MB
|
917 |
-
train_mem_cpu_alloc_delta = 1345MB
|
918 |
-
train_mem_cpu_peaked_delta = 0MB
|
919 |
-
train_mem_gpu_alloc_delta = 693MB
|
920 |
-
train_mem_gpu_peaked_delta = 7MB
|
921 |
-
```
|
922 |
-
|
923 |
-
**Understanding the reports:**
|
924 |
-
|
925 |
-
- the first segment, e.g., `train__`, tells you which stage the metrics are for. Reports starting with `init_`
|
926 |
-
will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the
|
927 |
-
`__init__` will be reported along with the `eval_` metrics.
|
928 |
-
- the third segment, is either `cpu` or `gpu`, tells you whether it's the general RAM or the gpu0 memory
|
929 |
-
metric.
|
930 |
-
- `*_alloc_delta` - is the difference in the used/allocated memory counter between the end and the start of the
|
931 |
-
stage - it can be negative if a function released more memory than it allocated.
|
932 |
-
- `*_peaked_delta` - is any extra memory that was consumed and then freed - relative to the current allocated
|
933 |
-
memory counter - it is never negative. When you look at the metrics of any stage you add up `alloc_delta` +
|
934 |
-
`peaked_delta` and you know how much memory was needed to complete that stage.
|
935 |
-
|
936 |
-
The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the
|
937 |
-
main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may
|
938 |
-
use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more
|
939 |
-
memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the
|
940 |
-
future these reports will evolve to measure those too.
|
941 |
-
|
942 |
-
The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the
|
943 |
-
memory shared with other processes. It is important to note that it does not include swapped out memory, so the
|
944 |
-
reports could be imprecise.
|
945 |
-
|
946 |
-
The CPU peak memory is measured using a sampling thread. Due to python's GIL it may miss some of the peak memory if
|
947 |
-
that thread didn't get a chance to run when the highest memory was used. Therefore this report can be less than
|
948 |
-
reality. Using `tracemalloc` would have reported the exact peak memory, but it doesn't report memory allocations
|
949 |
-
outside of python. So if some C++ CUDA extension allocated its own memory it won't be reported. And therefore it
|
950 |
-
was dropped in favor of the memory sampling approach, which reads the current process memory usage.
|
951 |
-
|
952 |
-
The GPU allocated and peak memory reporting is done with `torch.cuda.memory_allocated()` and
|
953 |
-
`torch.cuda.max_memory_allocated()`. This metric reports only "deltas" for pytorch-specific allocations, as
|
954 |
-
`torch.cuda` memory management system doesn't track any memory allocated outside of pytorch. For example, the very
|
955 |
-
first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.
|
956 |
-
|
957 |
-
Note that this tracker doesn't account for memory allocations outside of [`Trainer`]'s `__init__`, `train`,
|
958 |
-
`evaluate` and `predict` calls.
|
959 |
-
|
960 |
-
Because `evaluation` calls may happen during `train`, we can't handle nested invocations because
|
961 |
-
`torch.cuda.max_memory_allocated` is a single counter, so if it gets reset by a nested eval call, `train`'s tracker
|
962 |
-
will report incorrect info. If this [pytorch issue](https://github.com/pytorch/pytorch/issues/16266) gets resolved
|
963 |
-
it will be possible to change this class to be re-entrant. Until then we will only track the outer level of
|
964 |
-
`train`, `evaluate` and `predict` methods. Which means that if `eval` is called during `train`, it's the latter
|
965 |
-
that will account for its memory usage and that of the former.
|
966 |
-
|
967 |
-
This also means that if any other tool that is used along the [`Trainer`] calls
|
968 |
-
`torch.cuda.reset_peak_memory_stats`, the gpu peak memory stats could be invalid. And the [`Trainer`] will disrupt
|
969 |
-
the normal behavior of any such tools that rely on calling `torch.cuda.reset_peak_memory_stats` themselves.
|
970 |
-
|
971 |
-
For best performance you may want to consider turning the memory profiling off for production runs.
|
972 |
-
"""
|
973 |
-
if not self.is_world_process_zero():
|
974 |
-
return
|
975 |
-
|
976 |
-
print(f"***** {split} metrics *****")
|
977 |
-
metrics_formatted = self.metrics_format(metrics)
|
978 |
-
k_width = max(len(str(x)) for x in metrics_formatted.keys())
|
979 |
-
v_width = max(len(str(x)) for x in metrics_formatted.values())
|
980 |
-
for key in sorted(metrics_formatted.keys()):
|
981 |
-
print(f" {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}")
|
982 |
-
|
983 |
-
|
984 |
-
def save_metrics(self, split, metrics, combined=True):
|
985 |
-
"""
|
986 |
-
Save metrics into a json file for that split, e.g. `train_results.json`.
|
987 |
-
|
988 |
-
Under distributed environment this is done only for a process with rank 0.
|
989 |
-
|
990 |
-
Args:
|
991 |
-
split (`str`):
|
992 |
-
Mode/split name: one of `train`, `eval`, `test`, `all`
|
993 |
-
metrics (`Dict[str, float]`):
|
994 |
-
The metrics returned from train/evaluate/predict
|
995 |
-
combined (`bool`, *optional*, defaults to `True`):
|
996 |
-
Creates combined metrics by updating `all_results.json` with metrics of this call
|
997 |
-
|
998 |
-
To understand the metrics please read the docstring of [`~Trainer.log_metrics`]. The only difference is that raw
|
999 |
-
unformatted numbers are saved in the current method.
|
1000 |
-
|
1001 |
-
"""
|
1002 |
-
if not self.is_world_process_zero():
|
1003 |
-
return
|
1004 |
-
|
1005 |
-
path = os.path.join(self.args.output_dir, f"{split}_results.json")
|
1006 |
-
with open(path, "w") as f:
|
1007 |
-
json.dump(metrics, f, indent=4, sort_keys=True)
|
1008 |
-
|
1009 |
-
if combined:
|
1010 |
-
path = os.path.join(self.args.output_dir, "all_results.json")
|
1011 |
-
if os.path.exists(path):
|
1012 |
-
with open(path, "r") as f:
|
1013 |
-
all_metrics = json.load(f)
|
1014 |
-
else:
|
1015 |
-
all_metrics = {}
|
1016 |
-
|
1017 |
-
all_metrics.update(metrics)
|
1018 |
-
with open(path, "w") as f:
|
1019 |
-
json.dump(all_metrics, f, indent=4, sort_keys=True)
|
1020 |
-
|
1021 |
-
|
1022 |
-
def save_state(self):
|
1023 |
-
"""
|
1024 |
-
Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model
|
1025 |
-
|
1026 |
-
Under distributed environment this is done only for a process with rank 0.
|
1027 |
-
"""
|
1028 |
-
if not self.is_world_process_zero():
|
1029 |
-
return
|
1030 |
-
|
1031 |
-
path = os.path.join(self.args.output_dir, "trainer_state.json")
|
1032 |
-
self.state.save_to_json(path)
|
1033 |
-
|
1034 |
-
|
1035 |
-
def get_parameter_names(model, forbidden_layer_types):
|
1036 |
-
"""
|
1037 |
-
Returns the names of the model parameters that are not inside a forbidden layer.
|
1038 |
-
"""
|
1039 |
-
result = []
|
1040 |
-
for name, child in model.named_children():
|
1041 |
-
result += [
|
1042 |
-
f"{name}.{n}"
|
1043 |
-
for n in get_parameter_names(child, forbidden_layer_types)
|
1044 |
-
if not isinstance(child, tuple(forbidden_layer_types))
|
1045 |
-
]
|
1046 |
-
# Add model specific parameters (defined with nn.Parameter) since they are not in any child.
|
1047 |
-
result += list(model._parameters.keys())
|
1048 |
-
return result
|
1049 |
-
|
1050 |
-
|
1051 |
-
def get_module_class_from_name(module, name):
|
1052 |
-
"""
|
1053 |
-
Gets a class from a module by its name.
|
1054 |
-
|
1055 |
-
Args:
|
1056 |
-
module (`torch.nn.Module`): The module to get the class from.
|
1057 |
-
name (`str`): The name of the class.
|
1058 |
-
"""
|
1059 |
-
modules_children = list(module.children())
|
1060 |
-
if module.__class__.__name__ == name:
|
1061 |
-
return module.__class__
|
1062 |
-
elif len(modules_children) == 0:
|
1063 |
-
return
|
1064 |
-
else:
|
1065 |
-
for child_module in modules_children:
|
1066 |
-
module_class = get_module_class_from_name(child_module, name)
|
1067 |
-
if module_class is not None:
|
1068 |
-
return module_class
|
1069 |
-
|
1070 |
-
|
1071 |
-
if is_sagemaker_mp_enabled():
|
1072 |
-
import smdistributed.modelparallel.torch as smp
|
1073 |
-
|
1074 |
-
@smp.step()
|
1075 |
-
def smp_forward_backward(model, inputs, gradient_accumulation_steps=1):
|
1076 |
-
outputs = model(**inputs)
|
1077 |
-
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
|
1078 |
-
loss /= gradient_accumulation_steps
|
1079 |
-
model.backward(loss)
|
1080 |
-
return loss
|
1081 |
-
|
1082 |
-
@smp.step()
|
1083 |
-
def smp_forward_only(model, inputs):
|
1084 |
-
return model(**inputs)
|
1085 |
-
|
1086 |
-
def smp_gather(tensor):
|
1087 |
-
if isinstance(tensor, (list, tuple)):
|
1088 |
-
return type(tensor)(smp_gather(t) for t in tensor)
|
1089 |
-
elif isinstance(tensor, dict):
|
1090 |
-
return type(tensor)({k: smp_gather(v) for k, v in tensor.items()})
|
1091 |
-
elif not isinstance(tensor, torch.Tensor):
|
1092 |
-
raise TypeError(
|
1093 |
-
f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
|
1094 |
-
)
|
1095 |
-
all_tensors = smp.allgather(tensor, smp.CommGroup.DP_GROUP)
|
1096 |
-
all_tensors = [atleast_1d(t) for t in all_tensors]
|
1097 |
-
return torch.cat([t.cpu() for t in all_tensors], dim=0)
|
1098 |
-
|
1099 |
-
def smp_nested_concat(tensor):
|
1100 |
-
if isinstance(tensor, (list, tuple)):
|
1101 |
-
return type(tensor)(smp_nested_concat(t) for t in tensor)
|
1102 |
-
elif isinstance(tensor, dict):
|
1103 |
-
return type(tensor)({k: smp_nested_concat(v) for k, v in tensor.items()})
|
1104 |
-
# It doesn't seem possible to check here if `tensor` is a StepOutput because StepOutput lives in `smp.step`
|
1105 |
-
# which is also the name of the decorator so Python is confused.
|
1106 |
-
return tensor.concat().detach().cpu()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CognitiveLabs/GPT-4-Vision-Chat/Dockerfile
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
FROM python:3.9
|
2 |
-
RUN useradd -m -u 1000 user
|
3 |
-
USER user
|
4 |
-
ENV HOME=/home/user \
|
5 |
-
PATH=/home/user/.local/bin:$PATH
|
6 |
-
WORKDIR $HOME/app
|
7 |
-
COPY --chown=user . $HOME/app
|
8 |
-
RUN chown -R user:user $HOME/app
|
9 |
-
RUN chmod -R 755 $HOME/app
|
10 |
-
COPY ./requirements.txt ~/app/requirements.txt
|
11 |
-
RUN pip install -r requirements.txt
|
12 |
-
COPY . .
|
13 |
-
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CohereForAI/pokemon-cards-explorer/README.md
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Pokemon Cards Explorer
|
3 |
-
emoji: 🔍
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: purple
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.26.0
|
8 |
-
app_file: ./src/app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-

|
13 |
-
|
14 |
-
# [Pokemon Card Explorer](https://pokemoncards.streamlit.app/)
|
15 |
-
|
16 |
-
A simple semantic vector search engine over all **13000+ trading cards** ever to be released by Niantic, using a very straightforward stack including **Pinecone** (for Vector Database), **OpenAI** (for embeddings), **Cohere** (for Re-ranking) and **Streamlit** (for deployment).
|
17 |
-
|
18 |
-
Data Augmentation via web-scrapping was done to improve the search accuracy. Web-scraping was done using **requests** and **BS4**.
|
19 |
-
|
20 |
-
|
21 |
-

|
22 |
-
|
23 |
-
|
24 |
-

|
25 |
-
|
26 |
-
# Motivation 🤔
|
27 |
-
|
28 |
-
Why? 'cause WHY NOT!
|
29 |
-
|
30 |
-
Any pokemon fan would agree 😌
|
31 |
-
|
32 |
-

|
33 |
-
|
34 |
-
# Implimentation 🛠️
|
35 |
-
|
36 |
-
The entire implementation can be divided into the following parts:
|
37 |
-
|
38 |
-
- Data Preparation Step
|
39 |
-
- Data Injestion Step
|
40 |
-
- Query Step
|
41 |
-
|
42 |
-
## Data Preparation Step
|
43 |
-
|
44 |
-
The original [Pokemon Cards dataset](https://huggingface.co/datasets/TheFusion21/PokemonCards) is available on HuggingFace (uploaded by TheFusion21 💙) which has a 13.1K rows, containing the following information:
|
45 |
-
|
46 |
-
```json
|
47 |
-
{
|
48 |
-
"id": ... ,
|
49 |
-
"image_url" : ... ,
|
50 |
-
"caption" : ... ,
|
51 |
-
"name" : ... ,
|
52 |
-
"hp" : ... ,
|
53 |
-
"set_name" : ...
|
54 |
-
}
|
55 |
-
```
|
56 |
-
|
57 |
-
The ideal candidate to be converted to embeddings would be the `name + caption` which is what I did in `version 1`, but noticed that it sometimes made some errors -- it wasn't able to identify pokemon accurately based on description and needed longer descriptions for better accuracy.
|
58 |
-
|
59 |
-
The data doesn't contain what the pokemon look like, which is what the expected average case user will end up querying. So the conclusion was that the data needed to be augmented.
|
60 |
-
|
61 |
-
I used [PokemonDB](https://pokemondb.net/) pages of individual pokemon, extracted data and images of the pokemon and created a supplementary dataset. All of this was done using **BS4** and **requests**.
|
62 |
-
|
63 |
-
Further information on "how" the pokemon looked like was extracted using BLIP to caption images of pokemon extracted through the PokemonDB.
|
64 |
-
|
65 |
-
The entire pipeline can be visualized through the diagram below.
|
66 |
-
|
67 |
-

|
68 |
-
|
69 |
-
|
70 |
-
The final supplemented data, a.k.a Pokemon Cards++, had the following fields:
|
71 |
-
|
72 |
-
```json
|
73 |
-
{
|
74 |
-
"id": ... ,
|
75 |
-
"card_image_url" : ... ,
|
76 |
-
"caption" : ... ,
|
77 |
-
"name" : ... ,
|
78 |
-
"hp" : ... ,
|
79 |
-
"set_name" : ...,
|
80 |
-
"poke_image_url" : ... ,
|
81 |
-
"poke_image_caption" : ... ,
|
82 |
-
"pokedex_entries" : ... ,
|
83 |
-
"pokedb_intro_text" : ...
|
84 |
-
}
|
85 |
-
```
|
86 |
-
|
87 |
-
And the final text used for generating the embeddings was `name + caption + poke_image_caption + pokedb_intro_text + pokedex_entries` which allowed for a more holistic embedding to be generated for each pokemon.
|
88 |
-
|
89 |
-
## Data Injestion Step
|
90 |
-
|
91 |
-
Once the embeddings for all the data have been created, you need to put it in a vector database storage for quick semantic similarity search (using HNSWor other approx algo). Something I used for this step was Pinecone, which made it really easy to do.
|
92 |
-
|
93 |
-
Essentially, this can be summarized by the diagram below.
|
94 |
-
|
95 |
-

|
96 |
-
|
97 |
-
|
98 |
-
## Query Step
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
In the query step, the user provided question is simply passed into the **same** embedding model that was used in the injestion and sent to the vectorDB for semantic search against the Card Embeddings, which then gives out the K nearest matches for the query embeddings. Now, the k-nearest matches are sent to a re-ranker model which rankes each of the matches against a query on the match relevancy and provides us our final output, ranked Pokemon Cards!
|
103 |
-
|
104 |
-

|
105 |
-
|
106 |
-
|
107 |
-
## That's all Folks!
|
108 |
-
|
109 |
-

|
110 |
-
|
111 |
-
# FAQ
|
112 |
-
|
113 |
-
## How much does it cost to run and maintain this site?
|
114 |
-
Glad you asked! It costs me nothing to keep the Pinecone Vector DB running (but it might shutdown in a few days if not queried) and for CO:here's reranking API which is free. OpenAI charges me per token but the value is quite affordable. It cost me about $2 to get embeddings for the entire dataset. So this entire project just costs me $2 and about 3 days of time.
|
115 |
-
|
116 |
-
## The site is down with a error, why is it not running?
|
117 |
-
Probably because Pinecone deleted the index, which means that I would have to re-upload the embeddings on Pinecone again. Pinecone deletes indices that haven't been used in a week under the free version.
|
118 |
-
|
119 |
-
## You're so awesome, how can I be like you?
|
120 |
-
You can't. Sorry.
|
121 |
-
|
122 |
-
# Acknowledgements
|
123 |
-
|
124 |
-
Thank you to **Pokemon** for making my childhood special! 💙
|
125 |
-
|
126 |
-

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CompVis/text2img-latent-diffusion/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: LDM Text-to-image
|
3 |
-
emoji: 🧨
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.15.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DAMO-NLP-SG/CLEX-Chat/clex_layer.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from torchdiffeq import odeint
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
import math
|
8 |
-
|
9 |
-
class ODELinear(nn.Module):
|
10 |
-
def __init__(
|
11 |
-
self,
|
12 |
-
dim: int,
|
13 |
-
factor,
|
14 |
-
**kwargs
|
15 |
-
):
|
16 |
-
super().__init__()
|
17 |
-
self.ode_up_proj = nn.Parameter(torch.empty(dim//2, factor*dim).to(torch.float32))
|
18 |
-
self.ode_down_proj = nn.Parameter(torch.empty(factor*dim, dim//2).to(torch.float32))
|
19 |
-
self.dim = dim
|
20 |
-
self.act = torch.nn.SiLU()
|
21 |
-
self.reset_parameters()
|
22 |
-
|
23 |
-
def reset_parameters(self):
|
24 |
-
nn.init.kaiming_uniform_(self.ode_up_proj, a=math.sqrt(5))
|
25 |
-
nn.init.zeros_(self.ode_down_proj)
|
26 |
-
|
27 |
-
def get_time_embedding(self, t, base=10000, device='cuda', dtype=torch.float32):
|
28 |
-
if t < 1:
|
29 |
-
alpha = 1
|
30 |
-
else:
|
31 |
-
alpha = 2*t-1
|
32 |
-
ntk_base = base * alpha ** (self.dim / (self.dim-2))
|
33 |
-
ntk_inv_freq = 1.0 / (ntk_base ** (torch.arange(0, self.dim, 2, dtype=torch.float32).to(device) / self.dim))
|
34 |
-
index = torch.arange(0, self.dim, 2, dtype=torch.float32).to(device)
|
35 |
-
delta_ntk_freq = -2*index/(self.dim-2) * 1 / (base ** (index/self.dim) * (alpha ** (index/(self.dim-2) + 1)))
|
36 |
-
return delta_ntk_freq.to(device, dtype=dtype), ntk_inv_freq.to(device, dtype=dtype)
|
37 |
-
|
38 |
-
def forward(self, t, x: torch.Tensor):
|
39 |
-
delta_time, time = self.get_time_embedding(t, device=x.device, dtype=x.dtype)
|
40 |
-
x = x + torch.log(time)
|
41 |
-
time_embed = delta_time / time
|
42 |
-
delta_inv_freq = self.act(x @ self.ode_up_proj.float()) @ self.ode_down_proj.float() + time_embed
|
43 |
-
return delta_inv_freq
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
class LlamaCLEXScalingRotaryEmbedding(nn.Module):
|
48 |
-
|
49 |
-
def __init__(self, dim, max_position_embeddings=2048, rope_scaling=None, base=10000, device=None) -> None:
|
50 |
-
super().__init__()
|
51 |
-
|
52 |
-
self.max_t = rope_scaling["max_factor"]
|
53 |
-
self.dim = dim
|
54 |
-
self.max_position_embeddings = max_position_embeddings
|
55 |
-
self.base = base
|
56 |
-
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
57 |
-
self.register_buffer("inv_freq", inv_freq)
|
58 |
-
|
59 |
-
self.proj_func = ODELinear(dim, rope_scaling["param_factor"])
|
60 |
-
self.rope_cached = None
|
61 |
-
self.max_t_cached = 0
|
62 |
-
self.freq_cached = None
|
63 |
-
self.time_dt = 0.01
|
64 |
-
self.ode_args = {
|
65 |
-
"method": "rk4",
|
66 |
-
"options": {"step_size": self.time_dt},
|
67 |
-
}
|
68 |
-
|
69 |
-
def sample_random_times(self, max_t, device):
|
70 |
-
return torch.randint(2, max_t, (1,), dtype = torch.long, device=device)
|
71 |
-
|
72 |
-
def get_random_position_ids(self, n=2048, max=8192):
|
73 |
-
positions = torch.randperm(max)[:n].sort().values
|
74 |
-
# positions = positions.to(device=device)
|
75 |
-
return positions
|
76 |
-
|
77 |
-
|
78 |
-
def get_continuous_freq(self, time_grid, ex_positions, device):
|
79 |
-
solution = odeint(
|
80 |
-
self.proj_func, torch.log(self.inv_freq.to(device, dtype=torch.float32)), time_grid, **self.ode_args
|
81 |
-
)
|
82 |
-
if time_grid.size(0) == 2:
|
83 |
-
training
|
84 |
-
scale_inv_freq = torch.exp(solution[1])
|
85 |
-
# print(time_grid[1].tolist(), torch.sum(scale_inv_freq).tolist(), torch.sum(self.proj_func.ode_down_proj).tolist())
|
86 |
-
freqs = torch.outer(ex_positions.float().squeeze(), scale_inv_freq)
|
87 |
-
else:
|
88 |
-
scale_inv_freq = torch.exp(solution)
|
89 |
-
# freqs = torch.einsum('i, kl -> kil', ex_positions, scale_inv_freq)
|
90 |
-
return scale_inv_freq
|
91 |
-
embed = torch.cat((freqs,freqs), dim=-1)
|
92 |
-
return embed
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
def forward(self, device, dtype, seq_len, do_train=False):
|
97 |
-
device = self.proj_func.ode_up_proj.device
|
98 |
-
scale_factor = seq_len // self.max_position_embeddings
|
99 |
-
if do_train:
|
100 |
-
t_val = self.sample_random_times(self.max_t+1, device)[0]
|
101 |
-
import math
|
102 |
-
sampled_position_ids = self.get_random_position_ids(n=seq_len-2, max=seq_len*t_val-2).float()
|
103 |
-
ex_positions = torch.cat([
|
104 |
-
torch.tensor([0]),
|
105 |
-
(sampled_position_ids + 1) / scale_factor,
|
106 |
-
torch.tensor([seq_len*t_val//scale_factor-1])]
|
107 |
-
).to(device, dtype=torch.float32)
|
108 |
-
else:
|
109 |
-
t_val = scale_factor if seq_len%self.max_position_embeddings == 0.0 else scale_factor + 1
|
110 |
-
t_val = t_val if t_val <= self.max_t else self.max_t
|
111 |
-
ex_positions = torch.arange(0, self.max_position_embeddings * t_val, dtype=torch.float32).to(device)
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
if t_val == 1.0:
|
116 |
-
scale_inv_freq = self.inv_freq.to(device)
|
117 |
-
freqs = torch.outer(ex_positions.float().squeeze(), scale_inv_freq)
|
118 |
-
embed = torch.cat((freqs,freqs), dim=-1)
|
119 |
-
cos, sin = embed.cos()[None, None, :, :], embed.sin()[None, None, :, :]
|
120 |
-
elif do_train:
|
121 |
-
time_grid = torch.tensor([1.0, t_val]).float().to(device)
|
122 |
-
embed = self.get_continuous_freq(time_grid, ex_positions, device)
|
123 |
-
cos, sin = embed.cos()[None, None, :, :], embed.sin()[None, None, :, :]
|
124 |
-
else:
|
125 |
-
if t_val > self.max_t_cached:
|
126 |
-
if self.freq_cached is None:
|
127 |
-
time_grid = torch.arange(1.0, self.max_t, dtype=torch.float32).to(device)
|
128 |
-
self.freq_cached = self.get_continuous_freq(time_grid, ex_positions, device)
|
129 |
-
scale_inv_freq = self.freq_cached[int(t_val-1.0)]
|
130 |
-
freqs = torch.outer(ex_positions.float().squeeze(), scale_inv_freq)
|
131 |
-
embed = torch.cat((freqs,freqs), dim=-1)
|
132 |
-
self.rope_cached = torch.cat((embed.cos()[None, None, None, :, :], embed.sin()[None, None, None, :, :]), dim=0)
|
133 |
-
self.max_t_cached = t_val
|
134 |
-
cos, sin = self.rope_cached
|
135 |
-
|
136 |
-
return torch.cat(
|
137 |
-
(cos[None, :, :, :seq_len, ...].to(dtype=dtype),
|
138 |
-
sin[None, :, :, :seq_len, ...].to(dtype=dtype)),
|
139 |
-
dim=0
|
140 |
-
)
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImagePalette.py
DELETED
@@ -1,266 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# The Python Imaging Library.
|
3 |
-
# $Id$
|
4 |
-
#
|
5 |
-
# image palette object
|
6 |
-
#
|
7 |
-
# History:
|
8 |
-
# 1996-03-11 fl Rewritten.
|
9 |
-
# 1997-01-03 fl Up and running.
|
10 |
-
# 1997-08-23 fl Added load hack
|
11 |
-
# 2001-04-16 fl Fixed randint shadow bug in random()
|
12 |
-
#
|
13 |
-
# Copyright (c) 1997-2001 by Secret Labs AB
|
14 |
-
# Copyright (c) 1996-1997 by Fredrik Lundh
|
15 |
-
#
|
16 |
-
# See the README file for information on usage and redistribution.
|
17 |
-
#
|
18 |
-
|
19 |
-
import array
|
20 |
-
|
21 |
-
from . import GimpGradientFile, GimpPaletteFile, ImageColor, PaletteFile
|
22 |
-
|
23 |
-
|
24 |
-
class ImagePalette:
|
25 |
-
"""
|
26 |
-
Color palette for palette mapped images
|
27 |
-
|
28 |
-
:param mode: The mode to use for the palette. See:
|
29 |
-
:ref:`concept-modes`. Defaults to "RGB"
|
30 |
-
:param palette: An optional palette. If given, it must be a bytearray,
|
31 |
-
an array or a list of ints between 0-255. The list must consist of
|
32 |
-
all channels for one color followed by the next color (e.g. RGBRGBRGB).
|
33 |
-
Defaults to an empty palette.
|
34 |
-
"""
|
35 |
-
|
36 |
-
def __init__(self, mode="RGB", palette=None):
|
37 |
-
self.mode = mode
|
38 |
-
self.rawmode = None # if set, palette contains raw data
|
39 |
-
self.palette = palette or bytearray()
|
40 |
-
self.dirty = None
|
41 |
-
|
42 |
-
@property
|
43 |
-
def palette(self):
|
44 |
-
return self._palette
|
45 |
-
|
46 |
-
@palette.setter
|
47 |
-
def palette(self, palette):
|
48 |
-
self._colors = None
|
49 |
-
self._palette = palette
|
50 |
-
|
51 |
-
@property
|
52 |
-
def colors(self):
|
53 |
-
if self._colors is None:
|
54 |
-
mode_len = len(self.mode)
|
55 |
-
self._colors = {}
|
56 |
-
for i in range(0, len(self.palette), mode_len):
|
57 |
-
color = tuple(self.palette[i : i + mode_len])
|
58 |
-
if color in self._colors:
|
59 |
-
continue
|
60 |
-
self._colors[color] = i // mode_len
|
61 |
-
return self._colors
|
62 |
-
|
63 |
-
@colors.setter
|
64 |
-
def colors(self, colors):
|
65 |
-
self._colors = colors
|
66 |
-
|
67 |
-
def copy(self):
|
68 |
-
new = ImagePalette()
|
69 |
-
|
70 |
-
new.mode = self.mode
|
71 |
-
new.rawmode = self.rawmode
|
72 |
-
if self.palette is not None:
|
73 |
-
new.palette = self.palette[:]
|
74 |
-
new.dirty = self.dirty
|
75 |
-
|
76 |
-
return new
|
77 |
-
|
78 |
-
def getdata(self):
|
79 |
-
"""
|
80 |
-
Get palette contents in format suitable for the low-level
|
81 |
-
``im.putpalette`` primitive.
|
82 |
-
|
83 |
-
.. warning:: This method is experimental.
|
84 |
-
"""
|
85 |
-
if self.rawmode:
|
86 |
-
return self.rawmode, self.palette
|
87 |
-
return self.mode, self.tobytes()
|
88 |
-
|
89 |
-
def tobytes(self):
|
90 |
-
"""Convert palette to bytes.
|
91 |
-
|
92 |
-
.. warning:: This method is experimental.
|
93 |
-
"""
|
94 |
-
if self.rawmode:
|
95 |
-
msg = "palette contains raw palette data"
|
96 |
-
raise ValueError(msg)
|
97 |
-
if isinstance(self.palette, bytes):
|
98 |
-
return self.palette
|
99 |
-
arr = array.array("B", self.palette)
|
100 |
-
return arr.tobytes()
|
101 |
-
|
102 |
-
# Declare tostring as an alias for tobytes
|
103 |
-
tostring = tobytes
|
104 |
-
|
105 |
-
def getcolor(self, color, image=None):
|
106 |
-
"""Given an rgb tuple, allocate palette entry.
|
107 |
-
|
108 |
-
.. warning:: This method is experimental.
|
109 |
-
"""
|
110 |
-
if self.rawmode:
|
111 |
-
msg = "palette contains raw palette data"
|
112 |
-
raise ValueError(msg)
|
113 |
-
if isinstance(color, tuple):
|
114 |
-
if self.mode == "RGB":
|
115 |
-
if len(color) == 4:
|
116 |
-
if color[3] != 255:
|
117 |
-
msg = "cannot add non-opaque RGBA color to RGB palette"
|
118 |
-
raise ValueError(msg)
|
119 |
-
color = color[:3]
|
120 |
-
elif self.mode == "RGBA":
|
121 |
-
if len(color) == 3:
|
122 |
-
color += (255,)
|
123 |
-
try:
|
124 |
-
return self.colors[color]
|
125 |
-
except KeyError as e:
|
126 |
-
# allocate new color slot
|
127 |
-
if not isinstance(self.palette, bytearray):
|
128 |
-
self._palette = bytearray(self.palette)
|
129 |
-
index = len(self.palette) // 3
|
130 |
-
special_colors = ()
|
131 |
-
if image:
|
132 |
-
special_colors = (
|
133 |
-
image.info.get("background"),
|
134 |
-
image.info.get("transparency"),
|
135 |
-
)
|
136 |
-
while index in special_colors:
|
137 |
-
index += 1
|
138 |
-
if index >= 256:
|
139 |
-
if image:
|
140 |
-
# Search for an unused index
|
141 |
-
for i, count in reversed(list(enumerate(image.histogram()))):
|
142 |
-
if count == 0 and i not in special_colors:
|
143 |
-
index = i
|
144 |
-
break
|
145 |
-
if index >= 256:
|
146 |
-
msg = "cannot allocate more than 256 colors"
|
147 |
-
raise ValueError(msg) from e
|
148 |
-
self.colors[color] = index
|
149 |
-
if index * 3 < len(self.palette):
|
150 |
-
self._palette = (
|
151 |
-
self.palette[: index * 3]
|
152 |
-
+ bytes(color)
|
153 |
-
+ self.palette[index * 3 + 3 :]
|
154 |
-
)
|
155 |
-
else:
|
156 |
-
self._palette += bytes(color)
|
157 |
-
self.dirty = 1
|
158 |
-
return index
|
159 |
-
else:
|
160 |
-
msg = f"unknown color specifier: {repr(color)}"
|
161 |
-
raise ValueError(msg)
|
162 |
-
|
163 |
-
def save(self, fp):
|
164 |
-
"""Save palette to text file.
|
165 |
-
|
166 |
-
.. warning:: This method is experimental.
|
167 |
-
"""
|
168 |
-
if self.rawmode:
|
169 |
-
msg = "palette contains raw palette data"
|
170 |
-
raise ValueError(msg)
|
171 |
-
if isinstance(fp, str):
|
172 |
-
fp = open(fp, "w")
|
173 |
-
fp.write("# Palette\n")
|
174 |
-
fp.write(f"# Mode: {self.mode}\n")
|
175 |
-
for i in range(256):
|
176 |
-
fp.write(f"{i}")
|
177 |
-
for j in range(i * len(self.mode), (i + 1) * len(self.mode)):
|
178 |
-
try:
|
179 |
-
fp.write(f" {self.palette[j]}")
|
180 |
-
except IndexError:
|
181 |
-
fp.write(" 0")
|
182 |
-
fp.write("\n")
|
183 |
-
fp.close()
|
184 |
-
|
185 |
-
|
186 |
-
# --------------------------------------------------------------------
|
187 |
-
# Internal
|
188 |
-
|
189 |
-
|
190 |
-
def raw(rawmode, data):
|
191 |
-
palette = ImagePalette()
|
192 |
-
palette.rawmode = rawmode
|
193 |
-
palette.palette = data
|
194 |
-
palette.dirty = 1
|
195 |
-
return palette
|
196 |
-
|
197 |
-
|
198 |
-
# --------------------------------------------------------------------
|
199 |
-
# Factories
|
200 |
-
|
201 |
-
|
202 |
-
def make_linear_lut(black, white):
|
203 |
-
lut = []
|
204 |
-
if black == 0:
|
205 |
-
for i in range(256):
|
206 |
-
lut.append(white * i // 255)
|
207 |
-
else:
|
208 |
-
raise NotImplementedError # FIXME
|
209 |
-
return lut
|
210 |
-
|
211 |
-
|
212 |
-
def make_gamma_lut(exp):
|
213 |
-
lut = []
|
214 |
-
for i in range(256):
|
215 |
-
lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5))
|
216 |
-
return lut
|
217 |
-
|
218 |
-
|
219 |
-
def negative(mode="RGB"):
|
220 |
-
palette = list(range(256 * len(mode)))
|
221 |
-
palette.reverse()
|
222 |
-
return ImagePalette(mode, [i // len(mode) for i in palette])
|
223 |
-
|
224 |
-
|
225 |
-
def random(mode="RGB"):
|
226 |
-
from random import randint
|
227 |
-
|
228 |
-
palette = []
|
229 |
-
for i in range(256 * len(mode)):
|
230 |
-
palette.append(randint(0, 255))
|
231 |
-
return ImagePalette(mode, palette)
|
232 |
-
|
233 |
-
|
234 |
-
def sepia(white="#fff0c0"):
|
235 |
-
bands = [make_linear_lut(0, band) for band in ImageColor.getrgb(white)]
|
236 |
-
return ImagePalette("RGB", [bands[i % 3][i // 3] for i in range(256 * 3)])
|
237 |
-
|
238 |
-
|
239 |
-
def wedge(mode="RGB"):
|
240 |
-
palette = list(range(256 * len(mode)))
|
241 |
-
return ImagePalette(mode, [i // len(mode) for i in palette])
|
242 |
-
|
243 |
-
|
244 |
-
def load(filename):
|
245 |
-
# FIXME: supports GIMP gradients only
|
246 |
-
|
247 |
-
with open(filename, "rb") as fp:
|
248 |
-
for paletteHandler in [
|
249 |
-
GimpPaletteFile.GimpPaletteFile,
|
250 |
-
GimpGradientFile.GimpGradientFile,
|
251 |
-
PaletteFile.PaletteFile,
|
252 |
-
]:
|
253 |
-
try:
|
254 |
-
fp.seek(0)
|
255 |
-
lut = paletteHandler(fp).getpalette()
|
256 |
-
if lut:
|
257 |
-
break
|
258 |
-
except (SyntaxError, ValueError):
|
259 |
-
# import traceback
|
260 |
-
# traceback.print_exc()
|
261 |
-
pass
|
262 |
-
else:
|
263 |
-
msg = "cannot load palette"
|
264 |
-
raise OSError(msg)
|
265 |
-
|
266 |
-
return lut # data, rawmode
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|