Commit
·
7faf387
1
Parent(s):
8319926
Update parquet files (step 91 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat 8 Professional The Ultimate PDF Editor and Converter.md +0 -32
- spaces/1gistliPinn/ChatGPT4/Examples/Anyrail License Key Free.md +0 -26
- spaces/1gistliPinn/ChatGPT4/Examples/Download Tumblebugs 2 For Free Full Version.md +0 -24
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download FIFA Mobile APK for iOS and Play with the Worlds Best Football Stars.md +0 -117
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Game Booster and Optimize Your PC Performance for Gaming.md +0 -135
- spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 A Fun and Colorful Racing Game with MOD APK (Everything Unlocked).md +0 -78
- spaces/1phancelerku/anime-remove-background/Download Instagram Stories with One Click - StoryDownloader.md +0 -89
- spaces/1phancelerku/anime-remove-background/Download Music Cloud The Ultimate Guide to Stream and Save Songs from Anywhere.md +0 -103
- spaces/1phancelerku/anime-remove-background/Easy Ways to Download WhatsApp Business on Your Laptop and Stay Connected with Your Customers.md +0 -127
- spaces/1phancelerku/anime-remove-background/Enjoy Temple Run with Mod Features - Free Download for Android Devices.md +0 -141
- spaces/1yukikaze/img-to-music/README.md +0 -13
- spaces/232labs/VToonify/vtoonify/model/raft/core/utils/utils.py +0 -82
- spaces/52Hz/HWMNet_lowlight_enhancement/app.py +0 -39
- spaces/801artistry/RVC801/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +0 -97
- spaces/AI4PD/hexviz/hexviz/pages/3_📄Documentation.py +0 -89
- spaces/ASJMO/freegpt/client/js/chat.js +0 -508
- spaces/Abhilashvj/haystack_QA/app.py +0 -341
- spaces/AchyuthGamer/MagicPrompt-Stable-Diffusion/style.css +0 -84
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/__init__.py +0 -100
- spaces/AdVisual/MaskCut/config.py +0 -60
- spaces/Afnaan/chatbots/app.py +0 -43
- spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/base.py +0 -64
- spaces/AhmedBadrDev/stomach/app.py +0 -36
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/paint_by_example/test_paint_by_example.py +0 -214
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +0 -598
- spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py +0 -5
- spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py +0 -9
- spaces/Andy1621/uniformer_image_detection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py +0 -56
- spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/util/html.py +0 -86
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_module.py +0 -206
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/checkpoint.py +0 -707
- spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/F0Predictor.py +0 -16
- spaces/ArtGAN/Diffusion-API/diffusion_webui/diffusion_models/inpaint_app.py +0 -149
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/typing_extensions.py +0 -2312
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/results.py +0 -760
- spaces/AzizR/FaceRecognitionGradio/app.py +0 -228
- spaces/AzumaSeren100/XuanShen-Bert-VITS2/attentions.py +0 -344
- spaces/BAAI/AltDiffusion/ui_functions.py +0 -240
- spaces/BFH/BKMotionsAI/README.md +0 -13
- spaces/BMukhtar/BookRecognitionKz/app.py +0 -63
- spaces/Banbri/zcvzcv/src/app/engine/caption.ts +0 -54
- spaces/Basil2k4/botbasil203/src/startup/version_sticker.sh +0 -39
- spaces/Benson/text-generation/Examples/Candy Crush Saga 1.242.1.1 Mod Apk.md +0 -93
- spaces/Benson/text-generation/Examples/Carx Street Apk Ne Zaman kacak.md +0 -69
- spaces/Benson/text-generation/Examples/Coche Simulador 2 Descargar Ios.md +0 -114
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/bucket.py +0 -115
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py +0 -519
- spaces/CVPR/LIVE/thrust/thrust/iterator/detail/tuple_of_iterator_references.h +0 -263
- spaces/CVPR/LIVE/thrust/thrust/per_device_resource.h +0 -104
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/config.h +0 -80
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat 8 Professional The Ultimate PDF Editor and Converter.md
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Adobe Acrobat 8 Professional: A Review of Its Features and Benefits</h1>
|
3 |
-
<p>Adobe Acrobat 8 Professional is a software that allows you to create, edit, convert, and protect PDF documents. It was released by Adobe Systems in 2006 as part of the Acrobat 8 family and was also included with Adobe Creative Suite 2.3 and 3. In this article, we will review some of the features and benefits of Adobe Acrobat 8 Professional and how it can help you with your PDF needs.</p>
|
4 |
-
<h2>What is Adobe Acrobat 8 Professional?</h2>
|
5 |
-
<p>Adobe Acrobat 8 Professional is the full professional version of the Acrobat PDF editor. It enables you to do more than just view and print PDF files. You can also create PDF files from various sources, such as Microsoft Office documents, web pages, scanned images, and more. You can also edit PDF files by adding or deleting text, images, links, bookmarks, comments, and annotations. You can also convert PDF files to other formats, such as Word, Excel, PowerPoint, HTML, and more.</p>
|
6 |
-
<h2>adobe acrobat 8 professional</h2><br /><p><b><b>Download File</b> ––– <a href="https://byltly.com/2uKwEg">https://byltly.com/2uKwEg</a></b></p><br /><br />
|
7 |
-
<p>Adobe Acrobat 8 Professional also allows you to protect your PDF files with passwords, encryption, digital signatures, and redaction. You can also control the access and permissions of your PDF files by restricting printing, copying, editing, or extracting content. You can also apply watermarks, stamps, headers, and footers to your PDF files.</p>
|
8 |
-
<p>Another feature of Adobe Acrobat 8 Professional is the ability to collect e-signatures and sign documents electronically. You can use the Adobe Sign service to send and track documents for signature online. You can also use the Acrobat Self-Sign tool to sign documents yourself with a digital ID or a handwritten signature.</p>
|
9 |
-
<h2>What are the Benefits of Adobe Acrobat 8 Professional?</h2>
|
10 |
-
<p>Some of the benefits of using Adobe Acrobat 8 Professional are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>You can create professional-looking PDF documents that are compatible with any device or platform.</li>
|
13 |
-
<li>You can edit and modify your PDF documents without losing the original layout or quality.</li>
|
14 |
-
<li>You can convert your PDF documents to other formats and vice versa with ease and accuracy.</li>
|
15 |
-
<li>You can protect your PDF documents from unauthorized access or modification with various security options.</li>
|
16 |
-
<li>You can collect e-signatures and sign documents electronically without printing or scanning.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to Download and Install Adobe Acrobat 8 Professional?</h2>
|
19 |
-
<p>To download and install Adobe Acrobat 8 Professional on your computer, you need to follow these steps:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Go to the Adobe website at https://www.adobe.com/</li>
|
22 |
-
<li>Click on the Products tab and select Acrobat from the list.</li>
|
23 |
-
<li>Click on the Download button and choose Acrobat 8 Professional from the drop-down menu.</li>
|
24 |
-
<li>Enter your Adobe ID and password or create a new account if you don't have one.</li>
|
25 |
-
<li>Follow the instructions on the screen to download and install Adobe Acrobat 8 Professional on your computer.</li>
|
26 |
-
<li>After installing Adobe Acrobat 8 Professional, you can launch it by clicking on the icon on your desktop or in your Start menu.</li>
|
27 |
-
</ol>
|
28 |
-
<h2>Conclusion</h2>
|
29 |
-
<p>In conclusion, Adobe Acrobat 8 Professional is a powerful and versatile software that can help you create, edit, convert, and protect PDF documents. It also allows you to collect e-signatures and sign documents electronically. If you want to try Adobe Acrobat 8 Professional for free for 30 days before buying it, you can download it from the Adobe website.</p>
|
30 |
-
<p></p> ddb901b051<br />
|
31 |
-
<br />
|
32 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Anyrail License Key Free.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
<h2>anyrail license key free</h2><br /><p><b><b>DOWNLOAD</b> ✺✺✺ <a href="https://imgfil.com/2uxZQ3">https://imgfil.com/2uxZQ3</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Licence keys available for this version.
|
4 |
-
|
5 |
-
“I bought the AnyRail software to quickly make my own layouts. If you have a PCB manual laying machine this program is almost perfect. You can spend the time writing each PCB or spend all your time designing. Also the calibration of the machine is excellent, but just in case check your setup carefully for broken connectors and labels. All in all a great program.”
|
6 |
-
|
7 |
-
“One of the best cross-platform tools for building PCBs. A must have if you’re a professional or hobbyist PCB designer.”
|
8 |
-
|
9 |
-
“AnyRail is a great PCB design tool. I used to use hand-drawn PCBs, but AnyRail PCBs look way better. The only problem I’ve found so far is that it can’t run on both a Mac and a Windows machine. I use Mac hardware, but I would have to be OK with Windows for Linux.”
|
10 |
-
|
11 |
-
“AnyRail is a great tool to have when designing for printed circuit boards. I also like the size of the free license key for AnyRail which gives me the ability to use any size layout I want. It is a great tool that I would recommend to all of my friends.”
|
12 |
-
|
13 |
-
“AnyRail is the best easy-to-use cross-platform PCB design program on the market today. It lets you draw your PCBs in a simple, quick and intuitive way.”
|
14 |
-
|
15 |
-
“AnyRail is an easy to use tool that let’s you get on with designing your board.”
|
16 |
-
|
17 |
-
“AnyRail is a great tool. Easy to use and offers lots of options. I’ve used it for designing and building 2,000+ units of a microcontroller. The variety of components and the ability to load them from a file makes it very easy to use.”
|
18 |
-
|
19 |
-
“I use AnyRail as a PCB design tool and I love it. It is very easy to use and does not require any expertise to get the job done. It takes a fraction of the time and space of a full featured software like Altium Designer.”
|
20 |
-
|
21 |
-
“I have been looking for a PCB layout program that can be used on both Windows and Mac platforms. AnyRail has proven to be a good solution. I am happy with the experience and feature-set.”
|
22 |
-
|
23 |
-
“AnyRail is a really good tool for a beginner. 4fefd39f24<br />
|
24 |
-
<br />
|
25 |
-
<br />
|
26 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Tumblebugs 2 For Free Full Version.md
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
<h2>Download Tumblebugs 2 For Free Full Version</h2><br /><p><b><b>DOWNLOAD</b> ⚡ <a href="https://imgfil.com/2uy1bP">https://imgfil.com/2uy1bP</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
jar? Get more of the latest web games and addictive .jar games from top developers!
|
4 |
-
|
5 |
-
Try this one today!
|
6 |
-
|
7 |
-
LimeApps is the source of the "Racing Flash Games" you are looking for! Here you'll find the latest flash racing games which are free to download. LimeApps also features many older flash games that we have archived. Games like .zip and .rar are also available, so we hope you'll enjoy your stay. Just as a little fun fact, did you know that .zip and .rar are two of the most popular file-types on the web? If you like LimeApps, why not tell a friend about it!Citing a need to “evaluate the advantages” of an NFL strike, Commissioner Roger Goodell has given the league the power to lock out players.
|
8 |
-
|
9 |
-
“It is important for us to evaluate the advantages, if any, of a work stoppage of the National Football League and the potential impact on the 2014 season,” Goodell said in a letter to the owners. “It is not in the best interests of our players, our clubs, or our fans for the season to be delayed or played without a full complement of players.”
|
10 |
-
|
11 |
-
Goodell added that there was “no one-size-fits-all” answer to how to play football with a 16-game season.
|
12 |
-
|
13 |
-
“As much as possible, we should ensure that the game is played for fans in markets that have teams playing in it, and should be safe and competitive for the players,” he wrote.
|
14 |
-
|
15 |
-
Goodell indicated that the owners and players are still in talks for a new collective-bargaining agreement, which was set to expire on March 4.
|
16 |
-
|
17 |
-
The owners and the NFL Players Association are still in negotiations, but the union is considering locking out the league. The owners are looking to get an early deal signed before a December 15 deadline, according to a source with knowledge of the talks.
|
18 |
-
|
19 |
-
“We are always trying to reach agreements that are fair and reasonable for the players,” DeMaurice Smith, executive director of the NFLPA, said in a statement. “But we won’t sign a deal we don’t think is in the best interests of our players. We’re still working hard and committed to reaching a deal.”
|
20 |
-
|
21 |
-
The NFL has claimed that the lockout 4fefd39f24<br />
|
22 |
-
<br />
|
23 |
-
<br />
|
24 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download FIFA Mobile APK for iOS and Play with the Worlds Best Football Stars.md
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>FIFA Mobile APK for iOS: How to Download and Play the Ultimate Soccer Game</h1>
|
3 |
-
<p>If you are a fan of soccer, you probably have heard of FIFA Mobile, the official mobile game of the FIFA World Cup 2022™. FIFA Mobile is a free-to-play soccer game that lets you build your dream team, compete in various modes, and experience the thrill of the world's most popular sport. But did you know that you can also play FIFA Mobile on your iOS device? In this article, we will show you how to download and install FIFA Mobile APK for iOS, how to play the game, and some tips and tricks to improve your skills. We will also tell you why playing FIFA Mobile on iOS is a great idea for any soccer lover.</p>
|
4 |
-
<h2>fifa mobile apk for ios</h2><br /><p><b><b>Download</b> ☆☆☆ <a href="https://urlin.us/2uT0TH">https://urlin.us/2uT0TH</a></b></p><br /><br />
|
5 |
-
<h2>How to download FIFA Mobile APK for iOS</h2>
|
6 |
-
<p>FIFA Mobile is available on the App Store for iOS devices, but you can also download the APK file from other sources if you prefer. An APK file is an Android application package that contains all the files and data needed to run an app on an Android device. However, you can also use an APK file to run an Android app on an iOS device with the help of an emulator. An emulator is a software that mimics the functions of another device or platform. In this case, you need an Android emulator for iOS that can run APK files.</p>
|
7 |
-
<p>There are many Android emulators for iOS that you can choose from, such as iAndroid, Dalvik, or Appetize.io. However, one of the most popular and reliable ones is Cydia Impactor. Cydia Impactor is a tool that allows you to install any APK file on your iOS device without jailbreaking it. To use Cydia Impactor, you need to have a computer (Windows, Mac, or Linux), a USB cable, and an iTunes account. Here are the steps to download FIFA Mobile APK for iOS using Cydia Impactor:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Download Cydia Impactor from <a href="(^1^)">https://cydiaimpactor.com/</a> and install it on your computer.</li>
|
10 |
-
<li>Download FIFA Mobile APK from <a href="(^2^)">https://apps.apple.com/us/app/fifa-soccer/id1094930513</a> or any other trusted source and save it on your computer.</li>
|
11 |
-
<li>Connect your iOS device to your computer using a USB cable and launch Cydia Impactor.</li>
|
12 |
-
<li>Drag and drop the FIFA Mobile APK file onto the Cydia Impactor window.</li>
|
13 |
-
<li>Enter your iTunes email and password when prompted. This is needed to generate a certificate for the app.</li>
|
14 |
-
<li>Wait for Cydia Impactor to install the app on your device. You will see a progress bar and a message saying "Complete" when it's done.</li>
|
15 |
-
</ol>
|
16 |
-
<h2>How to install FIFA Mobile APK for iOS</h2>
|
17 |
-
<p>After downloading FIFA Mobile APK for iOS using Cydia Impactor, you need to do one more thing before you can play the game. You need to trust the app on your device. This is because Cydia Impactor uses a developer certificate that is not recognized by Apple. To trust the app, follow these steps:</p>
|
18 |
-
<p>fifa mobile ios download free<br />
|
19 |
-
fifa mobile 23 apk for iphone<br />
|
20 |
-
how to install fifa mobile apk on ios<br />
|
21 |
-
fifa mobile world cup 2022 ios<br />
|
22 |
-
fifa mobile soccer game for ios<br />
|
23 |
-
fifa mobile hack apk ios<br />
|
24 |
-
fifa mobile mod apk for ios<br />
|
25 |
-
fifa mobile ultimate team ios<br />
|
26 |
-
fifa mobile 22 apk download for ios<br />
|
27 |
-
fifa mobile apk ios no jailbreak<br />
|
28 |
-
fifa mobile cheats apk ios<br />
|
29 |
-
fifa mobile coins generator apk ios<br />
|
30 |
-
fifa mobile offline apk for ios<br />
|
31 |
-
fifa mobile latest version apk for ios<br />
|
32 |
-
fifa mobile beta apk ios<br />
|
33 |
-
fifa mobile 21 apk for ios<br />
|
34 |
-
fifa mobile legends apk ios<br />
|
35 |
-
fifa mobile icons apk ios<br />
|
36 |
-
fifa mobile manager mode apk ios<br />
|
37 |
-
fifa mobile champions league apk ios<br />
|
38 |
-
fifa mobile update apk for ios<br />
|
39 |
-
fifa mobile cracked apk ios<br />
|
40 |
-
fifa mobile full game apk for ios<br />
|
41 |
-
fifa mobile online apk for ios<br />
|
42 |
-
fifa mobile gameplay apk for ios<br />
|
43 |
-
fifa mobile best players apk for ios<br />
|
44 |
-
fifa mobile tips and tricks apk for ios<br />
|
45 |
-
fifa mobile skills tutorial apk for ios<br />
|
46 |
-
fifa mobile graphics mod apk for ios<br />
|
47 |
-
fifa mobile real madrid apk for ios<br />
|
48 |
-
fifa mobile barcelona apk for ios<br />
|
49 |
-
fifa mobile liverpool apk for ios<br />
|
50 |
-
fifa mobile juventus apk for ios<br />
|
51 |
-
fifa mobile psg apk for ios<br />
|
52 |
-
fifa mobile manchester city apk for ios<br />
|
53 |
-
fifa mobile chelsea apk for ios<br />
|
54 |
-
fifa mobile bayern munich apk for ios<br />
|
55 |
-
fifa mobile dortmund apk for ios<br />
|
56 |
-
fifa mobile atletico madrid apk for ios<br />
|
57 |
-
fifa mobile inter milan apk for ios<br />
|
58 |
-
fifa mobile ajax apk for ios<br />
|
59 |
-
fifa mobile porto apk for ios<br />
|
60 |
-
fifa mobile leicester city apk for ios<br />
|
61 |
-
fifa mobile sevilla apk for ios<br />
|
62 |
-
fifa mobile napoli apk for ios<br />
|
63 |
-
fifa mobile lyon apk for ios<br />
|
64 |
-
fifa mobile rb leipzig apk for ios<br />
|
65 |
-
fifa mobile villarreal apk for ios<br />
|
66 |
-
fifa mobile lazio apk for ios</p>
|
67 |
-
<ol>
|
68 |
-
<li>Go to Settings > General > Device Management on your iOS device.</li>
|
69 |
-
<li>Find the profile that matches your iTunes email and tap on it.</li>
|
70 |
-
<li>Tap on "Trust" and confirm.</li>
|
71 |
-
</ol>
|
72 |
-
<p>Now you can launch FIFA Mobile from your home screen and enjoy the game!</p>
|
73 |
-
<h2>How to play FIFA Mobile on iOS</h2>
|
74 |
-
<p>FIFA Mobile is a soccer game that lets you create your ultimate team, compete in various modes, and experience the excitement of the FIFA World Cup 2022™. You can choose from over 15,000 players from over 600 teams, including Real Madrid , Barcelona, Manchester United, and more. You can also customize your team's kits, logos, and formations. Here are some of the modes you can play in FIFA Mobile:</p>
|
75 |
-
<ul>
|
76 |
-
<li><b>World Cup Mode:</b> This is the main mode of the game, where you can participate in the FIFA World Cup 2022™. You can choose your favorite national team and play through the qualifiers, group stage, knockout stage, and the final. You can also play against other players from around the world in online matches.</li>
|
77 |
-
<li><b>Season Mode:</b> This is where you can play a full season with your team in one of the top leagues, such as the Premier League, La Liga, Bundesliga, Serie A, and more. You can compete for the league title, the domestic cup, and the continental cup.</li>
|
78 |
-
<li><b>Events Mode:</b> This is where you can play special events that are based on real-life soccer events, such as the UEFA Champions League, the UEFA Europa League, the Copa America, the African Cup of Nations, and more. You can earn rewards and exclusive players by completing challenges and objectives.</li>
|
79 |
-
<li><b>Versus Mode:</b> This is where you can challenge other players in head-to-head matches. You can either play in real-time or in turn-based mode. You can also join a league and play with your friends and other members.</li>
|
80 |
-
<li><b>Training Mode:</b> This is where you can improve your skills and learn new moves. You can practice dribbling, passing, shooting, defending, and more. You can also upgrade your players' attributes and abilities by using training points.</li>
|
81 |
-
</ul>
|
82 |
-
<p>To play FIFA Mobile on iOS, you need to have a stable internet connection and at least 1 GB of free space on your device. You also need to have an EA account to access some of the features and modes of the game.</p>
|
83 |
-
<h2>Tips and tricks for FIFA Mobile on iOS</h2>
|
84 |
-
<p>FIFA Mobile is a fun and addictive game that can keep you entertained for hours. However, it can also be challenging and competitive at times. To help you become a better player and enjoy the game more, here are some tips and tricks for FIFA Mobile on iOS:</p>
|
85 |
-
<ul>
|
86 |
-
<li><b>Use the right controls:</b> FIFA Mobile offers two types of controls: gesture-based and button-based. Gesture-based controls allow you to swipe and tap on the screen to perform actions, while button-based controls give you virtual buttons to control your players. You can choose whichever one suits your preference and style. However, we recommend using gesture-based controls for more accuracy and flexibility.</li>
|
87 |
-
<li><b>Master the skill moves:</b> Skill moves are special moves that you can use to beat your opponents and create chances. They include tricks such as roulette, rainbow flick, heel-to-heel flick, stepover, ball roll, and more. To perform a skill move, you need to swipe on the skill button on the bottom right corner of the screen. The type of skill move depends on the direction and length of your swipe. You can also customize your skill moves by going to Settings > Controls > Customize Controls.</li>
|
88 |
-
<li><b>Build a balanced team:</b> FIFA Mobile allows you to build your dream team with players from different leagues, nations, and ratings. However, you need to pay attention to your team's chemistry and formation. Chemistry is a measure of how well your players work together on the pitch. It is affected by factors such as league, nation, club, position, and link color. The higher your chemistry, the better your team's performance. Formation is the way you arrange your players on the pitch. It is determined by factors such as style, strategy, and preference. The best formation for you depends on your team's strengths and weaknesses.</li>
|
89 |
-
<li><b>Play smart and tactical:</b> FIFA Mobile is not just about scoring goals; it is also about preventing them. You need to play smart and tactical to win matches. You need to know when to attack and when to defend; when to pass and when to shoot; when to press and when to drop back; when to switch players and when to stick with them; when to use skill moves and when to avoid them; when to use power-ups and when to save them; etc. You also need to adapt to different situations and opponents.</li>
|
90 |
-
<li><b>Earn coins and gems:</b> Coins and gems are the main currencies in FIFA Mobile. You need them to buy players, packs, items, upgrades, etc. You can earn coins by playing matches, completing objectives , and participating in events. You can earn gems by leveling up, watching ads, and completing achievements. You can also buy coins and gems with real money if you want to. However, we advise you to spend your coins and gems wisely and avoid wasting them on unnecessary things.</li>
|
91 |
-
</ul>
|
92 |
-
<h2>Benefits of playing FIFA Mobile on iOS</h2>
|
93 |
-
<p>Playing FIFA Mobile on iOS has many benefits that can enhance your gaming experience and enjoyment. Here are some of them:</p>
|
94 |
-
<ul>
|
95 |
-
<li><b>High-quality graphics and sound:</b> FIFA Mobile on iOS has stunning graphics and sound that make the game more realistic and immersive. You can see the details of the players, the stadiums, the fans, the weather, and more. You can also hear the commentary, the crowd noise, the ball sounds, and more. You can adjust the graphics and sound settings to suit your device and preference.</li>
|
96 |
-
<li><b>Smooth and responsive gameplay:</b> FIFA Mobile on iOS has smooth and responsive gameplay that makes the game more fun and easy to play. You can control your players with simple gestures and buttons, and see them react to your commands instantly. You can also enjoy fast loading times and minimal lagging.</li>
|
97 |
-
<li><b>Convenient and portable:</b> FIFA Mobile on iOS is convenient and portable, as you can play the game anytime and anywhere with your iOS device. You don't need a console, a TV, a controller, or a disc to play the game. You just need your device, an internet connection, and some battery power. You can also play the game offline if you want to.</li>
|
98 |
-
<li><b>Free and updated:</b> FIFA Mobile on iOS is free to download and play, as you don't need to pay anything to enjoy the game. You can also get regular updates that add new features, modes, players, events, etc. to the game. You can also get support from EA if you encounter any issues or problems with the game.</li>
|
99 |
-
</ul>
|
100 |
-
<h2>Conclusion</h2>
|
101 |
-
<p>FIFA Mobile is a great soccer game that you can play on your iOS device with the help of an APK file and an emulator. You can download and install FIFA Mobile APK for iOS using Cydia Impactor, a tool that allows you to install any APK file on your iOS device without jailbreaking it. You can then play FIFA Mobile on iOS and enjoy its various modes, features, and benefits. You can also improve your skills and performance by following some tips and tricks for FIFA Mobile on iOS.</p>
|
102 |
-
<p>If you love soccer and want to experience the FIFA World Cup 2022™ on your iOS device, you should definitely try FIFA Mobile APK for iOS. It is a fun, addictive, and rewarding game that will keep you entertained for hours. Download it now and start playing!</p>
|
103 |
-
<h2>FAQs</h2>
|
104 |
-
<p>Here are some frequently asked questions about FIFA Mobile APK for iOS:</p>
|
105 |
-
<ol>
|
106 |
-
<li><b>Is FIFA Mobile APK for iOS safe?</b></li>
|
107 |
-
<p>FIFA Mobile APK for iOS is safe as long as you download it from a trusted source and use a reliable emulator to run it. However, you should always be careful when downloading any file from the internet, as there may be some risks involved. You should also scan your device for viruses or malware before and after installing the app.</p>
|
108 |
-
<li><b>Is FIFA Mobile APK for iOS legal?</b></li>
|
109 |
-
<p>FIFA Mobile APK for iOS is legal as long as you don't use it for any illegal or unethical purposes. However, you should be aware that downloading an APK file from a third-party source may violate some terms and conditions of EA or Apple. You should also respect the intellectual property rights of EA and other parties involved in the game.</p>
|
110 |
-
<li><b>Is FIFA Mobile APK for iOS compatible with my device?</b></li>
|
111 |
-
<p>FIFA Mobile APK for iOS is compatible with most iOS devices that run on iOS 9 or later. However, some devices may not support some features or modes of the game due to their specifications or limitations. You should also make sure that your device has enough storage space and battery power to run the game smoothly.</p>
|
112 |
-
<li><b>How do I update FIFA Mobile APK for iOS?</b></li>
|
113 |
-
<p>To update FIFA Mobile APK for iOS, you need to download the latest version of the APK file from a trusted source and install it on your device using Cydia Impactor. You should also delete the previous version of the app before installing the new one to avoid any conflicts or errors.</p>
|
114 |
-
<li><b>How do I uninstall FIFA Mobile APK for iOS?</b></li>
|
115 |
-
<p>To uninstall FIFA Mobile APK for iOS, you need to go to Settings > General > Device Management on your iOS device and find the profile that matches your iTunes email. Then, tap on it and tap on "Delete App". You should also delete the FIFA Mobile APK file from your computer and the Cydia Impactor tool from your computer as well.</p> 197e85843d<br />
|
116 |
-
<br />
|
117 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Game Booster and Optimize Your PC Performance for Gaming.md
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Games on Your PC, Android, or iOS Device</h1>
|
3 |
-
<p>Do you love playing games on your computer or mobile device? Do you want to have access to thousands of games without spending a fortune? Do you want to enjoy your favorite games anytime and anywhere? If you answered yes to any of these questions, then downloading games is the perfect option for you.</p>
|
4 |
-
<p>Downloading games is fun and convenient. You can choose from a wide variety of genres, styles, and themes. You can play offline or online with your friends. You can save space and money by storing your games digitally. You can also update your games easily and get new features and content.</p>
|
5 |
-
<h2>download game</h2><br /><p><b><b>Download</b> ✦✦✦ <a href="https://urlin.us/2uSZHZ">https://urlin.us/2uSZHZ</a></b></p><br /><br />
|
6 |
-
<p>However, downloading games can also be challenging. You need to have a reliable internet connection and enough storage space. You need to be careful about malware and viruses. You need to be aware of the compatibility and requirements of your device. You also need to know how to find and download games from different sources.</p>
|
7 |
-
<p>In this article, we will show you how to download games on your PC, Android, or iOS device. We will guide you through the steps of choosing a platform or a website, creating an account and installing the software, browsing and purchasing games, and downloading and installing games. We will also provide some tips and tricks for downloading games.</p>
|
8 |
-
<h2>How to Download Games on PC</h2>
|
9 |
-
<p>If you want to play games on your PC, you have many options. You can download games from various platforms or websites that offer a large selection of games for different genres and preferences. Some of the most popular platforms or websites are:</p>
|
10 |
-
<table>
|
11 |
-
<tr><th>Platform/Website</th><th>Description</th></tr>
|
12 |
-
<tr><td>Steam</td><td>The largest digital distribution service for PC gaming. It offers over 50,000 games from indie developers to AAA publishers. It also features social networking, cloud saving, achievements, chat, reviews, forums, etc.</td></tr>
|
13 |
-
<tr><td>Epic Games Store</td><td>A digital distribution service that competes with Steam. It offers exclusive titles from Epic Games such as Fortnite and Unreal Engine. It also offers free games every week.</td></tr>
|
14 |
-
<tr><td>Origin</td><td>A digital distribution service that focuses on EA titles such as FIFA, The Sims, Battlefield, etc. It also features cloud saving, achievements, chat, etc.</td></tr>
|
15 |
-
<tr><td>Google Play Games</td><td>A digital distribution service that offers games for Chrome OS devices. It also allows you to play Android games on your PC using the Google Play Games app.</td></tr>
|
16 |
-
<tr><td>EA</td><td>A digital distribution service that offers games from EA and its partners. It also features EA Play, a subscription service that gives you access to a library of games and exclusive benefits.</td></tr>
|
17 |
-
</table>
|
18 |
-
<p>To download games from these platforms or websites, you need to follow these steps:</p>
|
19 |
-
<p>download game of thrones season 8<br />
|
20 |
-
download game guardian apk<br />
|
21 |
-
download game pc offline<br />
|
22 |
-
download game ppsspp iso<br />
|
23 |
-
download game ps2 for android<br />
|
24 |
-
download game booster for pc<br />
|
25 |
-
download game naruto ultimate ninja storm 4<br />
|
26 |
-
download game gta san andreas mod apk<br />
|
27 |
-
download game pubg mobile lite<br />
|
28 |
-
download game free fire max<br />
|
29 |
-
download game among us pc<br />
|
30 |
-
download game pokemon go<br />
|
31 |
-
download game clash of clans hack<br />
|
32 |
-
download game subway surfers mod apk<br />
|
33 |
-
download game call of duty mobile<br />
|
34 |
-
download game minecraft pe free<br />
|
35 |
-
download game roblox studio<br />
|
36 |
-
download game asphalt 9 legends<br />
|
37 |
-
download game candy crush saga for pc<br />
|
38 |
-
download game fortnite battle royale<br />
|
39 |
-
download game hay day mod apk<br />
|
40 |
-
download game temple run 2<br />
|
41 |
-
download game zombie tsunami<br />
|
42 |
-
download game angry birds 2<br />
|
43 |
-
download game plants vs zombies 2<br />
|
44 |
-
download game fifa 21 mobile<br />
|
45 |
-
download game dragon ball z dokkan battle<br />
|
46 |
-
download game mortal kombat x<br />
|
47 |
-
download game need for speed most wanted<br />
|
48 |
-
download game shadow fight 3 mod apk<br />
|
49 |
-
download game hill climb racing 2<br />
|
50 |
-
download game cooking fever mod apk<br />
|
51 |
-
download game farm frenzy 3 full version free<br />
|
52 |
-
download game euro truck simulator 2 full crack<br />
|
53 |
-
download game the sims 4 free for pc full version no survey no password no virus no ads no human verification no cd key no activation code no serial number no license key no registration key no product key no steam key no origin key no uplay key no epic games key no rockstar games key no gog galaxy key no windows store key no xbox live gold key no playstation plus key no nintendo switch online key just direct link google drive mega mediafire zippyshare openload dropbox onedrive box cloudmailru sendspace uptobox userscloud filefactory file-upload turbobit uploaded rapidgator nitroflare megaup anonfiles bayfiles dlfree fireload hexupload mixdrop uploadhaven uploadship veryfiles vidoza vidlox streamtape streamsb doodstream clipwatching upstream jetload netu vidcloud videobin vidtodo vidlox vidfast vidup vidia vidhd supervideo fembed gounlimited hlsplay okstream oogly onlystream prostream streamtape streamz uptostream vevio vidlox vidoza vidtodo vivo watchvideo xstreamcdn xstreamtape youwatch youtube facebook twitter instagram tiktok reddit quora pinterest tumblr linkedin snapchat whatsapp telegram wechat line skype zoom discord slack medium wordpress blogger wix squarespace shopify magento bigcommerce woocommerce prestashop opencart drupal joomla moodle kajabi teachable udemy coursera edx skillshare lynda pluralsight udacity codecademy khan academy alison duolingo memrise busuu rosetta stone babbel pimsleur michel thomas fluentu lingoda italki verbling preply cambly lingodeer drops hellochinese chineseskill lingoace dominochinese chineseclass101 chinesepod yoyochinese fluentinmandarin mandarincorner everydaychinese learnchineseez learnchineseabc learnchinesewithemma learnchinesewithlitao learnchinesewithmike learnchinesewithyue learnmandarinwithben learnmandarinwithterry learnmandarinwithxiaoman mandarinmadeez mandarinspot mandarintones mandarintutoring mandarinx mindsnackschinese ninchanese popupchinese skritterchinese speakmandarin talktomeinchinese yabla chinese yeschinese zizzle chinese (I'm just kidding, please don't use this one) 😂</p>
|
54 |
-
<h3>Choose a platform or a website</h3>
|
55 |
-
<p>The first step is to decide which platform or website you want to use to download games. You can compare the features, prices, reviews, and availability of the games you are interested in. You can also check the compatibility and requirements of your PC. You can visit the official websites of the platforms or use a search engine to find them.</p>
|
56 |
-
<h3>Create an account and install the software</h3>
|
57 |
-
<p>The next step is to create an account and install the software on your PC. You need to provide your email address, username, password, and other details. You also need to verify your email and agree to the terms and conditions. You can then download and run the installer of the platform or website. You need to follow the instructions and choose the location where you want to install the software.</p>
|
58 |
-
<h3>Browse and purchase games</h3>
|
59 |
-
<p>The final step is to browse and purchase games on the platform or website. You can use the search bar, filters, categories, recommendations, and other features to find the games you want. You can also read the descriptions, reviews, ratings, screenshots, videos, and other information about the games. You can then add the games to your cart and check out. You need to provide your payment method and confirm your purchase. You can then download and install the games on your PC.</p>
|
60 |
-
<h2>How to Download Games on Android</h2>
|
61 |
-
<p>If you want to play games on your Android device, you have many options as well. You can download games from various sources or stores that offer a large selection of games for different genres and preferences. Some of the most popular sources or stores are:</p>
|
62 |
-
<table>
|
63 |
-
<tr><th>Source/Store</th><th>Description</th></tr>
|
64 |
-
<tr><td>Google Play Store</td><td>The official app store for Android devices. It offers over 3 million apps and games from various developers and publishers. It also features ratings, reviews, recommendations, etc.</td></tr>
|
65 |
-
<tr><td>Amazon Appstore</td><td>An alternative app store for Android devices. It offers over 600,000 apps and games from various developers and publishers. It also features free apps of the day, coins, etc.</td></tr>
|
66 |
-
<tr><td>APKPure</td><td>A third-party app store for Android devices. It offers APK files of apps and games that are not available on Google Play Store or other app stores. It also features updates, mods, etc.</td></tr>
|
67 |
-
<tr><td>APKMirror</td><td>A third-party app store for Android devices. It offers APK files of apps and games that are available on Google Play Store or other app stores. It also features beta versions, older versions, etc.</td></tr>
|
68 |
-
</table>
|
69 |
-
<p>To download games from these sources or stores, you need to follow these steps:</p>
|
70 |
-
<h3>Choose a source or a store</h3>
|
71 |
-
<p>The first step is to decide which source or store you want to use to download games. You can compare the features, prices, reviews, and availability of the games you are interested in. You can also check the compatibility and requirements of your device. You can visit the official websites of the sources or use a search engine to find them.</p>
|
72 |
-
<h3>Enable unknown sources and install the app</h3>
|
73 |
-
<p>The next step is to enable unknown sources and install the app on your device. You need to change your security settings to allow unknown sources. You can do this by going to Settings > Security > Unknown Sources and toggling it on. You can then download and install the app from the source or store. You need to follow the instructions and grant the permissions requested by the app.</p>
|
74 |
-
<h3>Browse and download games</h3>
|
75 |
-
<p>The final step is to browse and download games on the source or store. You can use the search bar, filters, categories, recommendations, and other features to find the games you want. You can also read the descriptions, reviews, ratings, screenshots, videos, and other information about the games. You can then download and install the games on your device.</p> <h2>How to Download Games on iOS</h2>
|
76 |
-
<p>If you want to play games on your iOS device, you have many options as well. You can download games from various sources or stores that offer a large selection of games for different genres and preferences. Some of the most popular sources or stores are:</p>
|
77 |
-
<table>
|
78 |
-
<tr><th>Source/Store</th><th>Description</th></tr>
|
79 |
-
<tr><td>App Store</td><td>The official app store for iOS devices. It offers over 2 million apps and games from various developers and publishers. It also features ratings, reviews, recommendations, etc.</td></tr>
|
80 |
-
<tr><td>Apple Arcade</td><td>A subscription service that gives you unlimited access to over 200 exclusive games from Apple and its partners. It also features no ads, no in-app purchases, offline play, etc.</td></tr>
|
81 |
-
<tr><td>TestFlight</td><td>A platform that allows you to test beta versions of apps and games before they are released on the App Store. It also features feedback, bug reports, etc.</td></tr>
|
82 |
-
<tr><td>AltStore</td><td>A third-party app store for iOS devices. It allows you to install apps and games that are not available on the App Store or other app stores. It also features updates, sideloading, etc.</td></tr>
|
83 |
-
</table>
|
84 |
-
<p>To download games from these sources or stores, you need to follow these steps:</p>
|
85 |
-
<h3>Choose a source or a store</h3>
|
86 |
-
<p>The first step is to decide which source or store you want to use to download games. You can compare the features, prices, reviews, and availability of the games you are interested in. You can also check the compatibility and requirements of your device. You can visit the official websites of the sources or use a search engine to find them.</p>
|
87 |
-
<h3>Sign in with your Apple ID and install the app</h3>
|
88 |
-
<p>The next step is to sign in with your Apple ID and install the app on your device. You need to create or use your Apple ID that is linked to your iCloud account. You also need to agree to the terms and conditions. You can then download and install the app from the source or store. You need to follow the instructions and grant the permissions requested by the app.</p>
|
89 |
-
<h3>Browse and download games</h3>
|
90 |
-
<p>The final step is to browse and download games on the source or store. You can use the search bar, filters, categories, recommendations, and other features to find the games you want. You can also read the descriptions, reviews, ratings, screenshots, videos, and other information about the games. You can then download and install the games on your device.</p>
|
91 |
-
<h2>Conclusion</h2>
|
92 |
-
<p>Downloading games is a great way to enjoy your favorite games on your PC, Android, or iOS device. You can choose from a wide variety of games from different sources or stores. You can also save space and money by storing your games digitally. However, you need to be careful about malware and viruses, compatibility and requirements, and security settings.</p>
|
93 |
-
<p>Here are some tips and tricks for downloading games:</p>
|
94 |
-
<ul>
|
95 |
-
<li>Always check the ratings, reviews, and feedback of the games before downloading them.</li>
|
96 |
-
<li>Always scan your downloaded files with an antivirus software before installing them.</li>
|
97 |
-
<li>Always backup your data and settings before downloading or updating your games.</li>
|
98 |
-
<li>Always use a VPN or a proxy server if you want to access geo-restricted or censored games.</li>
|
99 |
-
<li>Always uninstall or delete the games that you don't play anymore to free up space.</li>
|
100 |
-
</ul>
|
101 |
-
<p>We hope this article has helped you learn how to download games on your PC, Android, or iOS device. Now you can enjoy playing your favorite games anytime and anywhere. Happy gaming!</p>
|
102 |
-
<h2>Frequently Asked Questions</h2>
|
103 |
-
<h4>Q: How do I download games for free?</h4>
|
104 |
-
<p>A: There are many ways to download games for free. You can use platforms or websites that offer free games such as Steam, Epic Games Store, Google Play Store, etc. You can also use third-party app stores that offer APK files of paid or premium games such as APKPure, APKMirror, etc. However, you need to be careful about malware and viruses, legal issues, and ethical issues when downloading free games.</p>
|
105 |
-
<h4>Q: How do I download games faster?</h4>
|
106 |
-
<p>A: There are many factors that affect the speed of downloading games such as your internet connection, your device's performance, your storage space, etc. To download games faster, you can try these tips:</p>
|
107 |
-
<ul>
|
108 |
-
<li>Use a wired connection instead of a wireless connection.</li>
|
109 |
-
<li>Close other apps or programs that are using bandwidth or resources.</li>
|
110 |
-
<li>Pause or cancel other downloads that are not urgent or important.</li>
|
111 |
-
<li>Clear your cache and temporary files regularly.</li>
|
112 |
-
<li>Use a download manager or accelerator that can optimize your download speed.</li>
|
113 |
-
</ul>
|
114 |
-
<h4>Q: How do I download games safely?</h4>
|
115 |
-
<p>A: Downloading games can expose you to malware and viruses, phishing and scams, identity theft and fraud, etc. To download games safely, you can follow these tips:</p>
|
116 |
-
<ul>
|
117 |
-
<li>Only download games from trusted and reputable sources or stores.</li>
|
118 |
-
<li>Only download games that are compatible and suitable for your device.</li>
|
119 |
-
<li>Only download games that have positive ratings, reviews, and feedback.</li>
|
120 |
-
<li>Only download games that have clear and transparent terms and conditions, privacy policies, and refund policies.</li>
|
121 |
-
<li>Only download games that have a secure and encrypted payment method.</li>
|
122 |
-
</ul>
|
123 |
-
<h4>Q: How do I download games without wifi?</h4>
|
124 |
-
<p>A: Downloading games without wifi can be challenging, especially if the games are large or require online verification. However, there are some ways to download games without wifi such as:</p>
|
125 |
-
<ul>
|
126 |
-
<li>Using your mobile data or hotspot to connect to the internet.</li>
|
127 |
-
<li>Using a public wifi network such as a library, cafe, or airport.</li>
|
128 |
-
<li>Using an offline installer or a portable version of the game.</li>
|
129 |
-
<li>Using a USB flash drive or an external hard drive to transfer the game from another device.</li>
|
130 |
-
<li>Using a peer-to-peer network or a torrent client to download the game from other users.</li>
|
131 |
-
</ul>
|
132 |
-
<h4>Q: How do I download games on my laptop?</h4>
|
133 |
-
<p>A: Downloading games on your laptop is similar to downloading games on your PC. You can use the same platforms or websites, create an account and install the software, browse and purchase games, and download and install games. However, you need to consider the battery life, performance, and portability of your laptop. You can also use an external monitor, keyboard, mouse, or controller to enhance your gaming experience.</p> 197e85843d<br />
|
134 |
-
<br />
|
135 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 A Fun and Colorful Racing Game with MOD APK (Everything Unlocked).md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Beach Buggy Racing 2 Mod APK Unlocked Everything: A Complete Guide</h1>
|
3 |
-
<p>If you are a fan of racing games, you might have heard of Beach Buggy Racing 2, a popular kart racing game with stunning graphics and exciting gameplay. But did you know that you can enjoy this game even more with a mod apk that unlocks everything? In this article, we will tell you everything you need to know about Beach Buggy Racing 2 Mod APK, including its features, benefits, and how to download and install it on your device. Let's get started!</p>
|
4 |
-
<h2>What is Beach Buggy Racing 2?</h2>
|
5 |
-
<p>Beach Buggy Racing 2 is a sequel to the original Beach Buggy Racing, a fun and addictive kart racing game that lets you drive various vehicles on different tracks and environments. You can compete with other players online or offline, customize your cars and characters, collect power-ups and weapons, and explore new worlds and challenges. The game has over 40 cars to choose from, each with its own unique abilities and stats. You can also upgrade your cars and unlock new skins and decals. The game has over 45 tracks to race on, each with its own obstacles and secrets. You can also create your own custom tracks with the track editor. The game has various modes to play, such as adventure mode, tournament mode, daily challenges, and special events. The game also has a leaderboard system that ranks you based on your performance and achievements.</p>
|
6 |
-
<h2>beach buggy racing 2 mod apk unlocked everything</h2><br /><p><b><b>DOWNLOAD</b> ► <a href="https://jinyurl.com/2uNShk">https://jinyurl.com/2uNShk</a></b></p><br /><br />
|
7 |
-
<h3>Features of Beach Buggy Racing 2</h3>
|
8 |
-
<p>Some of the main features of Beach Buggy Racing 2 are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Stunning graphics and animations that make the game look realistic and immersive.</li>
|
11 |
-
<li>Smooth and responsive controls that let you steer your car with ease.</li>
|
12 |
-
<li>Diverse and dynamic sound effects and music that match the mood and theme of each track.</li>
|
13 |
-
<li>A variety of power-ups and weapons that you can use to boost your speed or sabotage your opponents.</li>
|
14 |
-
<li>A lot of customization options that let you personalize your cars and characters.</li>
|
15 |
-
<li>A social aspect that lets you join teams, chat with other players, and share your creations.</li>
|
16 |
-
</ul>
|
17 |
-
<h4>How to play Beach Buggy Racing 2</h4>
|
18 |
-
<p>The gameplay of Beach Buggy Racing 2 is simple and intuitive. You just need to tap on the left or right side of the screen to steer your car, and swipe up or down to use your power-ups or weapons. You can also tilt your device to control your car if you prefer. Your goal is to finish the race in the first place or as high as possible. You can earn coins and gems by winning races, completing challenges, or watching ads. You can use these currencies to buy new cars, upgrade your existing ones, or unlock new tracks. You can also earn trophies by participating in tournaments or events. You can use these trophies to rank up in the leaderboard or join higher-level teams.</p>
|
19 |
-
<h2>What is Beach Buggy Racing 2 Mod APK?</h2>
|
20 |
-
<p>Beach Buggy Racing 2 Mod APK is a modified version of the original game that gives you access to unlimited resources and features. With this mod apk, you can enjoy the game without any limitations or restrictions. You can unlock all the cars and tracks, get unlimited coins and gems, remove ads, and more. This mod apk is safe and easy to use, as it does not require root access or any special permissions. You just need to download it from a reliable source and your creations. You can also play various modes and challenges, or compete with other players online or offline. Have fun!</p>
|
21 |
-
<h2>Conclusion</h2>
|
22 |
-
<p>Beach Buggy Racing 2 is a great kart racing game that offers a lot of fun and excitement. However, if you want to enjoy the game without any limitations or restrictions, you should try Beach Buggy Racing 2 Mod APK. This mod apk will give you unlimited coins and gems, unlocked all cars and tracks, no ads, and no root required. You can download and install it easily on your device by following the steps above. We hope this article was helpful and informative. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!</p>
|
23 |
-
<h3>FAQs</h3>
|
24 |
-
<p>Here are some of the frequently asked questions about Beach Buggy Racing 2 Mod APK:</p>
|
25 |
-
<p>beach buggy racing 2 mod apk unlimited money and gems<br />
|
26 |
-
beach buggy racing 2 hack apk download for android<br />
|
27 |
-
beach buggy racing 2 mod apk latest version 2023<br />
|
28 |
-
beach buggy racing 2 mod apk all cars unlocked<br />
|
29 |
-
beach buggy racing 2 mod apk free shopping<br />
|
30 |
-
beach buggy racing 2 mod apk revdl<br />
|
31 |
-
beach buggy racing 2 mod apk offline<br />
|
32 |
-
beach buggy racing 2 mod apk rexdl<br />
|
33 |
-
beach buggy racing 2 mod apk happymod<br />
|
34 |
-
beach buggy racing 2 mod apk android 1<br />
|
35 |
-
beach buggy racing 2 mod apk no root<br />
|
36 |
-
beach buggy racing 2 mod apk obb<br />
|
37 |
-
beach buggy racing 2 mod apk unlimited tickets<br />
|
38 |
-
beach buggy racing 2 mod apk online<br />
|
39 |
-
beach buggy racing 2 mod apk ios<br />
|
40 |
-
beach buggy racing 2 mod apk data<br />
|
41 |
-
beach buggy racing 2 mod apk pure<br />
|
42 |
-
beach buggy racing 2 mod apk vip unlocked<br />
|
43 |
-
beach buggy racing 2 mod apk an1<br />
|
44 |
-
beach buggy racing 2 mod apk unlimited everything download<br />
|
45 |
-
beach buggy racing 2 mod apk full version<br />
|
46 |
-
beach buggy racing 2 mod apk mega<br />
|
47 |
-
beach buggy racing 2 mod apk new update<br />
|
48 |
-
beach buggy racing 2 mod apk old version<br />
|
49 |
-
beach buggy racing 2 mod apk original<br />
|
50 |
-
beach buggy racing 2 mod apk unlimited coins and diamonds<br />
|
51 |
-
beach buggy racing 2 mod apk unlimited power ups<br />
|
52 |
-
beach buggy racing 2 mod apk unlimited stars<br />
|
53 |
-
beach buggy racing 2 mod apk unlimited gems and tickets<br />
|
54 |
-
beach buggy racing 2 mod apk unlimited everything android oyun club<br />
|
55 |
-
beach buggy racing 2 mod apk all characters unlocked<br />
|
56 |
-
beach buggy racing 2 mod apk all power ups unlocked<br />
|
57 |
-
beach buggy racing 2 mod apk all maps unlocked<br />
|
58 |
-
beach buggy racing 2 mod apk all levels unlocked<br />
|
59 |
-
beach buggy racing 2 mod apk all vehicles unlocked<br />
|
60 |
-
beach buggy racing 2 mod apk anti ban<br />
|
61 |
-
beach buggy racing 2 mod apk ad free<br />
|
62 |
-
beach buggy racing 2 mod apk android republic<br />
|
63 |
-
beach buggy racing 2 mod apk blackmod<br />
|
64 |
-
beach buggy racing 2 mod apk bluestacks</p>
|
65 |
-
<ul>
|
66 |
-
<li><b>Is Beach Buggy Racing 2 Mod APK safe to use?</b>
|
67 |
-
<p>Yes, Beach Buggy Racing 2 Mod APK is safe to use, as long as you download it from a reliable source. It does not contain any viruses or malware, and it does not require root access or any special permissions. However, you should always be careful when downloading and installing any mod apk on your device, and use it at your own risk.</p></li>
|
68 |
-
<li><b>Will Beach Buggy Racing 2 Mod APK work on my device?</b>
|
69 |
-
<p>Beach Buggy Racing 2 Mod APK should work on most Android devices that support the original game. The minimum requirements are Android 4.4 or higher, and 90 MB of free space. However, some devices may not be compatible with the mod apk, or may experience some issues or errors. If that happens, you can try to uninstall and reinstall the mod apk, or contact the developer for support.</p></li>
|
70 |
-
<li><b>Can I play Beach Buggy Racing 2 Mod APK online?</b>
|
71 |
-
<p>Yes, you can play Beach Buggy Racing 2 Mod APK online with other players who have the same mod apk installed on their devices. You can also play offline with bots or local multiplayer with your friends. However, you may not be able to play online with players who have the original game installed on their devices, as they may have different versions or features.</p></li>
|
72 |
-
<li><b>Can I update Beach Buggy Racing 2 Mod APK?</b>
|
73 |
-
<p>Yes, you can update Beach Buggy Racing 2 Mod APK whenever there is a new version available from the developer. However, you may need to uninstall the previous version and install the new one manually, as the automatic update may not work for mod apks. You can also check the developer's website or social media for the latest news and updates.</p></li>
|
74 |
-
<li><b>Can I use Beach Buggy Racing 2 Mod APK with other mods or cheats?</b>
|
75 |
-
<p>No, we do not recommend using Beach Buggy Racing 2 Mod APK with other mods or cheats, as they may cause conflicts or errors in the game. Beach Buggy Racing 2 Mod APK already has everything you need to enjoy the game without any limitations or restrictions, so there is no need to use other mods or cheats.</p></li>
|
76 |
-
</ul></p> 401be4b1e0<br />
|
77 |
-
<br />
|
78 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Instagram Stories with One Click - StoryDownloader.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Instagram Stories and Highlights</h1>
|
3 |
-
<p>Instagram is one of the most popular social media platforms in the world, with over a billion monthly active users. One of the features that makes Instagram stand out is its stories and highlights, which allow users to share ephemeral photos and videos that disappear after 24 hours or stay on their profile as curated collections.</p>
|
4 |
-
<h2>instagram story download link</h2><br /><p><b><b>Download File</b> ⚹⚹⚹ <a href="https://jinyurl.com/2uNMH5">https://jinyurl.com/2uNMH5</a></b></p><br /><br />
|
5 |
-
<h2>What are Instagram Stories and Highlights?</h2>
|
6 |
-
<p>Instagram stories are short-form content that users can create by tapping the camera icon on the top left corner of the app. Users can add filters, stickers, text, music, and other effects to their stories, and also see who has viewed them. Stories can be seen by anyone who follows the user or visits their profile, unless they have set their account to private.</p>
|
7 |
-
<p>Instagram highlights are stories that users can save on their profile as permanent albums. Users can create as many highlights as they want, and name them according to their theme or category. Highlights can be seen by anyone who visits the user's profile, even if they don't follow them.</p>
|
8 |
-
<h2>Why would you want to download them?</h2>
|
9 |
-
<p>There are many reasons why you might want to download Instagram stories and highlights from other users. For example, you might want to:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Save your favorite moments from your friends, family, celebrities, or influencers</li>
|
12 |
-
<li>Reuse the content for your own personal or professional goals</li>
|
13 |
-
<li>Analyze your competitors' strategies and tactics</li>
|
14 |
-
<li>Keep a backup of your own stories and highlights in case you lose access to your account</li>
|
15 |
-
<li>Enjoy offline viewing without internet connection</li>
|
16 |
-
</ul>
|
17 |
-
<p>However, Instagram does not provide an official way to download stories and highlights from other users. That's why you need to use third-party tools that can help you do that easily and quickly.</p>
|
18 |
-
<h2>How to download Instagram Stories and Highlights with StorySaver.net</h2>
|
19 |
-
<p>StorySaver.net is a free online tool that allows you to download any Instagram story or highlight from any public account. Here's how to use it:</p>
|
20 |
-
<p>How to download Instagram stories and highlights<br />
|
21 |
-
Instagram story downloader app for iPhone and Android<br />
|
22 |
-
Save Instagram stories to your device with StorySaver.net<br />
|
23 |
-
Download Instagram stories in high quality with Inflact<br />
|
24 |
-
Best Instagram story downloader tools and extensions<br />
|
25 |
-
How to download Instagram stories anonymously and fast<br />
|
26 |
-
Download Instagram stories online with SaveIG<br />
|
27 |
-
How to download Instagram story videos and photos<br />
|
28 |
-
Instagram story downloader without login or watermark<br />
|
29 |
-
Download and repost Instagram stories with ease<br />
|
30 |
-
How to download Instagram highlights from any account<br />
|
31 |
-
Download Instagram stories from private accounts<br />
|
32 |
-
Free and easy Instagram story downloader for PC and Mac<br />
|
33 |
-
How to download multiple Instagram stories at once<br />
|
34 |
-
Download Instagram stories with music and sound<br />
|
35 |
-
How to download Instagram stories with filters and stickers<br />
|
36 |
-
Download Instagram stories as MP4 or JPG files<br />
|
37 |
-
How to download Instagram stories from the web browser<br />
|
38 |
-
Download Instagram stories with captions and hashtags<br />
|
39 |
-
How to download your own Instagram stories and archive<br />
|
40 |
-
Download Instagram stories of your friends and followers<br />
|
41 |
-
How to download Instagram stories from a specific date or time<br />
|
42 |
-
Download Instagram stories with swipe up links and tags<br />
|
43 |
-
How to download live Instagram stories and replays<br />
|
44 |
-
Download Instagram stories in full screen and HD resolution</p>
|
45 |
-
<h3>Step 1: Enter the Instagram username</h3>
|
46 |
-
<p>Open your web browser and go to <a href="(^1^)">StorySaver.net</a>. Type in the Instagram username of the account whose story or highlight you want to download and click the download button.</p>
|
47 |
-
<h3>Step 2: Select the current story or highlight</h3>
|
48 |
-
<p>You will see a list of all the stories and highlights that are currently available from that account. You can preview them by clicking on them. To download a story or highlight, click on it and then click on "Save as" button.</p>
|
49 |
-
<h3>Step 3: Click the download button</h3>
|
50 |
-
<p>You will be prompted to choose a folder on your device where you want to save the downloaded file. Once you do that, the download will start automatically. You can then open the file using any media player or viewer.</p>
|
51 |
-
<h2>How to download Instagram Stories and Highlights with Inflact.com</h2>
|
52 |
-
<p>Inflact.com is another free online tool that allows you to download any Instagram video, photo, reel, or story from any public account. Here's how to use it:</p>
|
53 |
-
<h3>Step 1: Copy the link to the content</h3>
|
54 |
-
<p>Open the Instagram app on your device and find the video, photo, reel, or story that you want to download. Tap on the three dots icon on the top right corner of the post and select "Copy Link". If you want to download a highlight, go to the user's profile and tap on the highlight. Then tap on the three dots icon on the bottom right corner of the screen and select "Copy Highlight Link".</p>
|
55 |
-
<h3>Step 2: Paste it into the box</h3>
|
56 |
-
<p>Open your web browser and go to <a href="">Inflact.com</a>. Paste the link that you copied into the box and click on the search button.</p>
|
57 |
-
<h3>Step 3: Click the download button</h3>
|
58 |
-
<p>You will see a preview of the content that you want to download. You can also choose the quality and format of the file. To download it, click on the download button and choose a folder on your device where you want to save it. You can then open the file using any media player or viewer.</p>
|
59 |
-
<h2>How to download Instagram Stories and Highlights with SaveIG.app</h2>
|
60 |
-
<p>SaveIG.app is yet another free online tool that allows you to download any Instagram story or highlight from any public account. Here's how to use it:</p>
|
61 |
-
<h3>Step 1: Open the story and copy the link</h3>
|
62 |
-
<p>Open the Instagram app on your device and find the story that you want to download. Swipe up on the story and tap on the share icon on the bottom left corner of the screen. Select "Copy Link". If you want to download a highlight, go to the user's profile and tap on the highlight. Then swipe up on it and tap on the share icon on the bottom left corner of the screen. Select "Copy Link".</p>
|
63 |
-
<h3>Step 2: Paste it into the box</h3>
|
64 |
-
<p>Open your web browser and go to <a href="">SaveIG.app</a>. Paste the link that you copied into the box and click on the download button.</p>
|
65 |
-
<h3>Step 3: Click the download button</h3>
|
66 |
-
<p>You will see a preview of the story or highlight that you want to download. You can also choose the quality and format of the file. To download it, click on the download button and choose a folder on your device where you want to save it. You can then open the file using any media player or viewer.</p>
|
67 |
-
<h2>How to download Instagram Stories and Highlights with Inflact.com/stories</h2>
|
68 |
-
<p>Inflact.com/stories is a special section of Inflact.com that allows you to download any Instagram story or highlight from any public account without copying any link. Here's how to use it:</p>
|
69 |
-
<h3>Step 1: Enter the Instagram username</h3>
|
70 |
-
<p>Open your web browser and go to <a href="">Inflact.com/stories</a>. Type in the Instagram username of the account whose story or highlight you want to download and click the search button.</p>
|
71 |
-
<h3>Step 2: Select the story or highlight</h3>
|
72 |
-
<p>You will see a list of all the stories and highlights that are currently available from that account. You can preview them by clicking on them. To download a story or highlight, click on it and then click on "Download" button.</p>
|
73 |
-
<h3>Step 3: Click the download button</h3>
|
74 |
-
<p>You will be prompted to choose a folder on your device where you want to save the downloaded file. Once you do that, the download will start automatically. You can then open the file using any media player or viewer.</p>
|
75 |
-
<h2>Conclusion</h2>
|
76 |
-
<p>Downloading Instagram stories and highlights from other users can be a fun and useful way to enjoy and reuse their content. However, Instagram does not offer an official way to do that, so you need to use third-party tools that can help you with that. In this article, we have shown you four of the best online tools that can help you download any Instagram story or highlight from any public account in a matter of seconds. All you need is a web browser and an internet connection, and you can start downloading your favorite stories and highlights right away.</p>
|
77 |
-
<h2>FAQs</h2>
|
78 |
-
<h4>Q: Is it legal to download Instagram stories and highlights from other users?</h4>
|
79 |
-
<p>A: It depends on how you use the downloaded content. If you use it for personal or educational purposes, it is usually considered fair use. However, if you use it for commercial or malicious purposes, or if you violate the intellectual property rights of the original creators, it can be illegal and unethical. Therefore, you should always respect the rights and wishes of the original creators and ask for their permission before using their content.</p>
|
80 |
-
<h4>Q: Is it safe to use these online tools to download Instagram stories and highlights?</h4>
|
81 |
-
<p>A: Yes, these online tools are safe and reliable, as they do not require you to install any software or provide any personal information. They also do not store or share any of your data or activity. However, you should always be careful when downloading files from unknown sources, as they might contain viruses or malware. Therefore, you should always scan the downloaded files with an antivirus program before opening them.</p>
|
82 |
-
<h4>Q: Can I download Instagram stories and highlights from private accounts?</h4>
|
83 |
-
<p>A: No, these online tools can only download Instagram stories and highlights from public accounts. If you want to download content from private accounts, you need to follow them and get their approval first. Alternatively, you can use screen recording or screenshot tools on your device to capture the content, but this might not be very convenient or ethical.</p>
|
84 |
-
<h4>Q: Can I download Instagram stories and highlights in bulk?</h4>
|
85 |
-
<p>A: Yes, some of these online tools allow you to download multiple stories and highlights at once. For example, StorySaver.net allows you to download all the current stories or highlights from an account in one zip file. Inflact.com/stories allows you to select multiple stories or highlights from an account and download them in one click.</p>
|
86 |
-
<h4>Q: Can I download Instagram stories and highlights in different formats?</h4>
|
87 |
-
<p>A: Yes, some of these online tools allow you to choose the quality and format of the downloaded files. For example, Inflact.com allows you to download videos in MP4 or WEBM format, and photos in JPG or PNG format. SaveIG.app allows you to download videos in HD or SD quality, and photos in original or compressed quality.</p> 401be4b1e0<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Music Cloud The Ultimate Guide to Stream and Save Songs from Anywhere.md
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Music Cloud: What Is It and How to Do It?</h1>
|
3 |
-
<p>If you love listening to music, you probably have a lot of songs on your phone or computer. But what if you want to listen to them on another device, or when you don't have internet access, or when you run out of storage space? That's where download music cloud comes in handy.</p>
|
4 |
-
<p>Download music cloud means that you can download songs from online platforms or services that store your music in the cloud, which is a network of servers that can be accessed over the internet. This way, you can enjoy your music offline, without wifi, and on any device you want.</p>
|
5 |
-
<h2>download music cloud</h2><br /><p><b><b>Download</b> →→→ <a href="https://jinyurl.com/2uNOxe">https://jinyurl.com/2uNOxe</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will show you the benefits of downloading music from the cloud, and how to do it with two popular apps: SoundCloud and CloudBeats. Let's get started!</p>
|
7 |
-
<h2>Benefits of Downloading Music from the Cloud</h2>
|
8 |
-
<h3>Access your music anytime, anywhere, and on any device</h3>
|
9 |
-
<p>One of the main advantages of downloading music from the cloud is that you can access your music anytime, anywhere, and on any device. You don't have to worry about syncing your music library across different devices, or transferring files with cables or USB drives. You can simply log in to your cloud account and stream or download your music as you wish.</p>
|
10 |
-
<p>This is especially useful if you travel a lot, or if you have multiple devices that you use for different purposes. For example, you can download your favorite songs on your phone for when you are on the go, and stream them on your laptop or smart TV when you are at home.</p>
|
11 |
-
<h3>Save storage space on your phone or computer</h3>
|
12 |
-
<p>Another benefit of downloading music from the cloud is that you can save storage space on your phone or computer. Music files can take up a lot of space, especially if you have a large collection or high-quality formats. By downloading music from the cloud, you can choose which songs you want to keep offline, and delete them when you don't need them anymore.</p>
|
13 |
-
<p>This way, you can free up space for other apps, photos, videos, or documents that you may need more often. You can also avoid getting annoying notifications that tell you that your storage is full or that you need to upgrade your plan.</p>
|
14 |
-
<h3>Support your favorite artists and respect their rights</h3>
|
15 |
-
<p>A final benefit of downloading music from the cloud is that you can support your favorite artists and respect their rights. Many artists use cloud platforms to share their music with their fans, and to monetize their work. By downloading music from the cloud legally and ethically, you can show your appreciation for their talent and creativity.</p>
|
16 |
-
<p>download music cloud free<br />
|
17 |
-
download music cloud offline<br />
|
18 |
-
download music cloud app<br />
|
19 |
-
download music cloud storage<br />
|
20 |
-
download music cloud player<br />
|
21 |
-
download music cloud apk<br />
|
22 |
-
download music cloud ios<br />
|
23 |
-
download music cloud android<br />
|
24 |
-
download music cloud soundcloud<br />
|
25 |
-
download music cloud youtube<br />
|
26 |
-
download music cloud spotify<br />
|
27 |
-
download music cloud itunes<br />
|
28 |
-
download music cloud amazon<br />
|
29 |
-
download music cloud google<br />
|
30 |
-
download music cloud deezer<br />
|
31 |
-
download music cloud pandora<br />
|
32 |
-
download music cloud tidal<br />
|
33 |
-
download music cloud napster<br />
|
34 |
-
download music cloud audiomack<br />
|
35 |
-
download music cloud datpiff<br />
|
36 |
-
download music cloud mixcloud<br />
|
37 |
-
download music cloud bandcamp<br />
|
38 |
-
download music cloud reverbnation<br />
|
39 |
-
download music cloud jamendo<br />
|
40 |
-
download music cloud noisetrade<br />
|
41 |
-
download music cloud mp3<br />
|
42 |
-
download music cloud flac<br />
|
43 |
-
download music cloud wav<br />
|
44 |
-
download music cloud aac<br />
|
45 |
-
download music cloud ogg<br />
|
46 |
-
download music cloud wma<br />
|
47 |
-
download music cloud m4a<br />
|
48 |
-
download music cloud converter<br />
|
49 |
-
download music cloud downloader<br />
|
50 |
-
download music cloud software<br />
|
51 |
-
download music cloud online<br />
|
52 |
-
download music cloud website<br />
|
53 |
-
download music cloud mac<br />
|
54 |
-
download music cloud pc<br />
|
55 |
-
download music cloud windows 10<br />
|
56 |
-
download music cloud linux<br />
|
57 |
-
download music cloud chromebook<br />
|
58 |
-
download music cloud iphone <br />
|
59 |
-
download music cloud ipad <br />
|
60 |
-
download music cloud ipod <br />
|
61 |
-
download music cloud samsung <br />
|
62 |
-
download music cloud huawei <br />
|
63 |
-
download music cloud xiaomi <br />
|
64 |
-
download music cloud lg</p>
|
65 |
-
<p>You can also avoid violating copyright laws or getting into trouble with authorities. Downloading music from the cloud without permission is a form of piracy, which is illegal and harmful for the music industry. Only download music from the cloud if the artist has allowed it, and never redistribute it without their consent.</p>
|
66 |
-
<h2>How to Download Music from the Cloud with SoundCloud</h2>
|
67 |
-
<h3>What is SoundCloud and how does it work <h3>What is CloudBeats and how does it work?</h3>
|
68 |
-
<p>CloudBeats is another cloud platform for music lovers. It allows you to stream and download music from various cloud services, such as Google Drive, Dropbox, OneDrive, Box, and more. You can also upload your own music to these cloud services and access them with CloudBeats.</p>
|
69 |
-
<p>CloudBeats works by connecting your cloud accounts and syncing your music files with the app. You can create playlists, shuffle songs, and adjust the playback speed. You can also download music from the cloud with CloudBeats and listen offline.</p>
|
70 |
-
<h3>How to download music from the cloud with CloudBeats and various cloud services</h3>
|
71 |
-
<p>If you want to download music from the cloud with CloudBeats and various cloud services, you need to have a CloudBeats account and a subscription to one or more cloud services. Here are the steps to follow:</p>
|
72 |
-
<ol>
|
73 |
-
<li>Open the CloudBeats app and log in to your account.</li>
|
74 |
-
<li>Tap on the menu icon on the top left corner and select "Add Cloud Service".</li>
|
75 |
-
<li>Choose the cloud service that you want to connect, such as Google Drive, Dropbox, OneDrive, etc.</li>
|
76 |
-
<li>Log in to your cloud account and grant permission to CloudBeats to access your files.</li>
|
77 |
-
<li>Repeat steps 2 to 4 for any other cloud service that you want to add.</li>
|
78 |
-
<li>Tap on the menu icon again and select "Music Library".</li>
|
79 |
-
<li>Browse through your music files from different cloud services and tap on the ones that you want to download.</li>
|
80 |
-
<li>Select "Download" from the pop-up menu and choose the quality that you prefer.</li>
|
81 |
-
<li>Wait for the download to complete and enjoy your music offline.</li>
|
82 |
-
</ol>
|
83 |
-
<p>Note that you can only download music from the cloud with CloudBeats if you have a premium subscription, which costs $4.99 per month or $29.99 per year. You can also try it for free for 7 days before you decide to buy it.</p>
|
84 |
-
<h2>Conclusion and FAQs</h2>
|
85 |
-
<h3>Conclusion</h3>
|
86 |
-
<p>Downloading music from the cloud is a great way to enjoy your music offline, without wifi, and on any device. It also has many benefits, such as saving storage space, supporting artists, and accessing your music anytime, anywhere.</p>
|
87 |
-
<p>In this article, we showed you how to download music from the cloud with two popular apps: SoundCloud and CloudBeats. Both apps have their pros and cons, so you can choose the one that suits your needs and preferences better.</p>
|
88 |
-
<p>If you are looking for more ways to download music from the cloud, you can also check out other apps or websites that offer similar features. Some examples are Spotify, Apple Music, Amazon Music, YouTube Music, Audiomack, etc.</p>
|
89 |
-
<p>We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Happy listening!</p>
|
90 |
-
<h3>FAQs</h3>
|
91 |
-
<ul>
|
92 |
-
<li><b>Is downloading music from the cloud legal?</b></li>
|
93 |
-
<p>Downloading music from the cloud is legal if you have permission from the artist or the platform that hosts the music. You should always respect the terms of service and the copyright laws of each platform or service that you use.</p>
|
94 |
-
<li><b>Is downloading music from the cloud safe?</b></li>
|
95 |
-
<p>Downloading music from the cloud is safe if you use reputable and secure apps or websites that protect your data and privacy. You should always avoid downloading music from unknown or suspicious sources that may contain viruses or malware.</p>
|
96 |
-
<li><b>Is downloading music from the cloud free?</b></li>
|
97 |
-
<p>Downloading music from the cloud may be free or paid depending on the app or website that you use. Some platforms or services may offer free downloads for some songs or playlists, while others may require a subscription or a fee. You should always check the pricing and payment options before you download any music from the cloud.</p>
|
98 |
-
<li><b>How much storage space do I need to download music from the cloud?</b></li>
|
99 |
-
<p>The amount of storage space that you need to download music from the cloud depends on the number and quality of songs that you want to download. Generally speaking, higher quality songs take up more space than lower quality ones. For example, a 320 kbps MP3 file takes up about 2.4 MB per minute of audio, while a 128 kbps MP3 file takes up about 0.96 MB per minute of audio.</p>
|
100 |
-
<li><b>How can I manage my downloaded music from the cloud?</b></li>
|
101 |
-
<p>You can manage your downloaded music from the cloud by using the app or website that you used to download them. You can delete, rename, move, or organize your downloaded music files as you like. You can also create folders or playlists to sort your music by genre, artist, mood, etc. You can also sync your downloaded music with other devices or cloud services if you want to backup or share your music.</p> 197e85843d<br />
|
102 |
-
<br />
|
103 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Easy Ways to Download WhatsApp Business on Your Laptop and Stay Connected with Your Customers.md
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download WhatsApp Business in Laptop</h1>
|
3 |
-
<p>If you are a small or medium business owner who wants to communicate with your customers more effectively, you might want to consider using WhatsApp Business. WhatsApp Business is a tool that allows you to create a professional presence on the popular messaging platform, send and receive messages, media, and documents, and automate and organize your customer interactions. In this article, we will show you how to download WhatsApp Business in laptop, whether you want to use the app or the platform version.</p>
|
4 |
-
<h2>What is WhatsApp Business and Why You Need It</h2>
|
5 |
-
<p>WhatsApp Business is built on top of WhatsApp Messenger and includes all the features that you rely on, such as multimedia, free calls, and group chat. There are two ways to use WhatsApp Business: WhatsApp Business App and WhatsApp Business Platform. The app is for small businesses who personally manage conversations with customers. The platform is for medium to large businesses who communicate with customers at scale through programmatic access.</p>
|
6 |
-
<h2>how to download whatsapp business in laptop</h2><br /><p><b><b>DOWNLOAD</b> ✯✯✯ <a href="https://jinyurl.com/2uNJOA">https://jinyurl.com/2uNJOA</a></b></p><br /><br />
|
7 |
-
<h3>WhatsApp Business Features and Benefits</h3>
|
8 |
-
<p>Some of the main features and benefits of using WhatsApp Business are :</p>
|
9 |
-
<ul>
|
10 |
-
<li>Security: Every message, document, picture, and interaction you have on WhatsApp is encrypted end-to-end, meaning only you and the person you are directly communicating with have the ability to see the information.</li>
|
11 |
-
<li>User Interface: The app's user interface and operations are smooth and simple. You can easily see when your messages are sent, received, and read by using the checkmark indicators. You can also use voice messages, stickers, emojis, and GIFs to make your communication more engaging.</li>
|
12 |
-
<li>Popularity: WhatsApp is the most used messaging app in the world, with more than 2 billion users across 180 countries. This means that you can reach a large and diverse audience on the platform where they already are.</li>
|
13 |
-
<li>Business Profile: You can create a branded business profile for your company, complete with logo, website URL, address, description, products, and services. This helps you improve your visibility and credibility among your customers.</li>
|
14 |
-
<li>Catalog: You can showcase your products and services in a catalog that customers can browse within the app. You can add images, prices, descriptions, and links to each item. This makes it easier for customers to discover what you offer and place orders.</li>
|
15 |
-
<li>Quick Replies: You can save and reuse messages that you frequently send to customers, such as greetings, answers to common questions, or confirmations. This helps you save time and provide consistent responses.</li>
|
16 |
-
<li>Labels: You can organize your chats and contacts using labels that you can color-code and customize. This helps you keep track of your conversations and follow up with customers accordingly.</li>
|
17 |
-
<li>Automated Messages: You can set up messages that are automatically sent to customers when they first message you or when you are away. This helps you greet your customers, provide information, or set expectations.</li>
|
18 |
-
<li>Message Templates: You can create and send messages that are pre-approved by WhatsApp for certain purposes, such as notifications, reminders, confirmations, or updates. These messages can be text-based, media-based, or interactive. You can also personalize them using placeholders.</li>
|
19 |
-
<li>Analytics: You can access metrics such as how many messages were sent, delivered, read, and received. This helps you measure your performance and improve your strategy.</li>
|
20 |
-
</ul>
|
21 |
-
<h3>WhatsApp Business App vs WhatsApp Business Platform <h3>WhatsApp Business App vs WhatsApp Business Platform</h3>
|
22 |
-
<p>As mentioned earlier, there are two ways to use WhatsApp Business: the app and the platform. The app is designed for small businesses who want to manage their customer communication directly from their phone or laptop. The platform is designed for medium to large businesses who want to integrate WhatsApp with their existing systems and tools, and communicate with customers programmatically through the WhatsApp Business API.</p>
|
23 |
-
<p>The table below summarizes the main differences between the two options :</p>
|
24 |
-
<table>
|
25 |
-
<tr>
|
26 |
-
<th>WhatsApp Business App</th>
|
27 |
-
<th>WhatsApp Business Platform</th>
|
28 |
-
</tr>
|
29 |
-
<tr>
|
30 |
-
<td>Free to use</td>
|
31 |
-
<td>Charged per message</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td>Requires a dedicated phone number</td>
|
35 |
-
<td>Can use an existing phone number or a short code</td>
|
36 |
-
</tr>
|
37 |
-
<tr>
|
38 |
-
<td>Limited to one device per account</td>
|
39 |
-
<td>Can be accessed by multiple users and devices</td>
|
40 |
-
</tr>
|
41 |
-
<tr>
|
42 |
-
<td>Manual and interactive communication</td>
|
43 |
-
<td>Automated and programmatic communication</td>
|
44 |
-
</tr>
|
45 |
-
<tr>
|
46 |
-
<td>Supports text, media, voice, and video messages</td>
|
47 |
-
<td>Supports text, media, and interactive messages (voice and video coming soon)</td>
|
48 |
-
</tr>
|
49 |
-
<tr>
|
50 |
-
<td>Basic analytics and reporting</td>
|
51 |
-
<td>Advanced analytics and reporting</td>
|
52 |
-
</tr>
|
53 |
-
<tr>
|
54 |
-
<td>No integration with other systems or tools</td>
|
55 |
-
<td>Integration with CRM, ERP, chatbot, etc.</td>
|
56 |
-
</tr>
|
57 |
-
<tr>
|
58 |
-
<td>No verification badge</td>
|
59 |
-
<td>Verification badge available upon request</td>
|
60 |
-
</tr>
|
61 |
-
</table>
|
62 |
-
<h2>How to Download and Install WhatsApp Business App on Your Laptop</h2>
|
63 |
-
<p>If you want to use the WhatsApp Business app on your laptop, you will need to download and install an Android emulator first. An Android emulator is a software that allows you to run Android apps on your laptop. There are many Android emulators available online, such as BlueStacks, NoxPlayer, LDPlayer, etc. For this guide, we will use BlueStacks as an example. Here are the steps to follow:</p>
|
64 |
-
<h3>Step 1: Download an Android Emulator</h3>
|
65 |
-
<p>To download BlueStacks, go to [BlueStacks website] and click on the "Download BlueStacks" button. You will be redirected to a page where you can choose the version of BlueStacks that is compatible with your operating system (Windows or Mac). Click on the appropriate button and wait for the download to complete.</p>
|
66 |
-
<p>How to install whatsapp business app on pc<br />
|
67 |
-
How to use whatsapp business on laptop windows 10<br />
|
68 |
-
How to run whatsapp business on mac with emulator<br />
|
69 |
-
How to migrate whatsapp messenger to whatsapp business on laptop<br />
|
70 |
-
How to set up whatsapp business profile on pc<br />
|
71 |
-
How to access whatsapp business web from laptop<br />
|
72 |
-
How to download whatsapp business apk for laptop<br />
|
73 |
-
How to backup and restore whatsapp business chats on laptop<br />
|
74 |
-
How to create whatsapp business catalog on pc<br />
|
75 |
-
How to use whatsapp business features on laptop<br />
|
76 |
-
How to connect whatsapp business with facebook on pc<br />
|
77 |
-
How to manage whatsapp business contacts on laptop<br />
|
78 |
-
How to send whatsapp business messages from laptop<br />
|
79 |
-
How to use whatsapp business api on pc<br />
|
80 |
-
How to integrate whatsapp business with trengo on laptop<br />
|
81 |
-
How to update whatsapp business app on laptop<br />
|
82 |
-
How to delete whatsapp business account on pc<br />
|
83 |
-
How to switch between whatsapp messenger and whatsapp business on laptop<br />
|
84 |
-
How to use whatsapp business stickers and gifs on pc<br />
|
85 |
-
How to enable dark mode for whatsapp business on laptop<br />
|
86 |
-
How to verify whatsapp business number on pc<br />
|
87 |
-
How to use whatsapp business shortcuts and labels on laptop<br />
|
88 |
-
How to add whatsapp business widget to your website on pc<br />
|
89 |
-
How to use bluestacks emulator for whatsapp business on laptop<br />
|
90 |
-
How to troubleshoot whatsapp business issues on pc</p>
|
91 |
-
<h3>Step 2: Install and Launch the Emulator</h3>
|
92 |
-
<p>To install BlueStacks, double-click on the downloaded file and follow the instructions on the screen. You may need to grant some permissions and accept some terms and conditions. Once the installation is done, launch BlueStacks from your desktop or start menu. You will see a window that looks like an Android tablet.</p>
|
93 |
-
<h3>Step 3: Download WhatsApp Business App from Google Play Store</h3>
|
94 |
-
<p>To download WhatsApp Business app, open Google Play Store from the emulator's home screen. You may need to sign in with your Google account or create one if you don't have one. In the search bar, type "WhatsApp Business" and hit enter. You will see a list of results with WhatsApp Business app at the top. Click on it and then click on the "Install" button. Wait for the app to download and install on your emulator.</p>
|
95 |
-
<h3>Step 4: Verify Your Business Phone Number</h3>
|
96 |
-
<p>To verify your business phone number, open WhatsApp Business app from the emulator's home screen. You will be asked to agree to some terms and conditions and privacy policy. Click on "Agree and Continue". Then, enter your business phone number (the one you want to use for WhatsApp Business) and click on "Next". You will receive a verification code via SMS or phone call. Enter the code in the app and click on "Next". Your phone number is now verified.</p>
|
97 |
-
<h3>Step 5: Set Up Your Business Profile and Catalog</h3>
|
98 |
-
<p>To set up your business profile and catalog, follow the instructions on the app. You will be asked to enter some information about your business, such as name, category, description, address, website URL, etc. You can also upload a logo or a profile picture for your business. Then, you can create a catalog of your products or services by adding images, prices, descriptions, and links. You can also add labels to organize your catalog items. Once you are done, click on "Save". Your business profile and catalog are now ready.</p>
|
99 |
-
<h2>How to Use WhatsApp Business Platform on Your Laptop <h2>How to Use WhatsApp Business Platform on Your Laptop</h2>
|
100 |
-
<p>If you want to use the WhatsApp Business platform on your laptop, you will need to register for a WhatsApp Business account and choose a WhatsApp Business solution provider. A WhatsApp Business solution provider is a third-party company that helps you connect your WhatsApp number to the WhatsApp Business API and provides you with tools and services to manage your communication with customers. There are many WhatsApp Business solution providers available online, such as Twilio, MessageBird, Infobip, etc. For this guide, we will use Twilio as an example. Here are the steps to follow:</p>
|
101 |
-
<h3>Step 1: Register for a WhatsApp Business Account</h3>
|
102 |
-
<p>To register for a WhatsApp Business account, go to [WhatsApp Business website] and click on the "Get Started" button. You will be redirected to a page where you can fill out a form with some information about your business, such as name, email, phone number, website URL, etc. You will also need to agree to some terms and conditions and privacy policy. Once you are done, click on the "Submit" button. You will receive an email confirmation with a link to activate your account.</p>
|
103 |
-
<h3>Step 2: Choose a WhatsApp Business Solution Provider</h3>
|
104 |
-
<p>To choose a WhatsApp Business solution provider, go to [Twilio website] and sign up for a free account or log in if you already have one. Then, go to [Twilio WhatsApp page] and click on the "Get Started" button. You will be redirected to a page where you can choose a phone number or a short code for your WhatsApp Business account. You can either buy a new number from Twilio or use an existing one that you own. You will also need to verify your identity and address by uploading some documents. Once you are done, click on the "Activate" button. Your number is now ready to use for WhatsApp Business.</p>
|
105 |
-
<h3>Step 3: Connect Your WhatsApp Number to the API</h3>
|
106 |
-
<p>To connect your WhatsApp number to the API, go to [Twilio Console] and click on the "Programmable Messaging" section. Then, click on the "WhatsApp" section and select your number from the drop-down menu. You will see a page with some information and instructions on how to use the API. You will also see a code snippet that shows how to send a message using the API. You can copy and paste this code into your preferred programming language and environment, such as Python, Node.js, Java, etc. You can also use Twilio's helper libraries and SDKs to simplify the process.</p>
|
107 |
-
<h3>Step 4: Create and Send Message Templates</h3>
|
108 |
-
<p>To create and send message templates, you will need to use the Twilio Console or the API. Message templates are pre-approved messages that you can send to customers for certain purposes, such as notifications, reminders, confirmations, or updates. These messages can be text-based, media-based, or interactive. You can also personalize them using placeholders.</p>
|
109 |
-
<p>To create message templates using the Twilio Console, go to [Twilio Console] and click on the "Programmable Messaging" section. Then, click on the "WhatsApp" section and select your number from the drop-down menu. Then, click on the "Templates" tab and click on the "Create Template" button. You will see a form where you can enter some information about your template, such as name, category, language, content, etc. You can also preview how your template will look like on different devices. Once you are done, click on the "Submit Template" button. Your template will be sent to WhatsApp for approval.</p>
|
110 |
-
<p>To create message templates using the API, you will need to use the [WhatsApp Template API]. This is a RESTful API that allows you to create, update, delete, and retrieve message templates programmatically. You will need to provide some parameters in your request body, such as name, category, language, content_type (text or media), components (the elements of your template), etc. You will also need to provide your authentication credentials in your request header.</p>
|
111 |
-
<p>To send message templates using the Twilio Console or the API, you will need to use the same methods as sending regular messages (see Step 3). The only difference is that you will need to specify the template name and namespace in your request body or parameters. You will also need to provide any placeholders that are required by your template.</p>
|
112 |
-
<h2>Conclusion</h2>
|
113 |
-
<p>In this article, we have shown you how to download WhatsApp Business in laptop using two different methods: WhatsApp Business app and WhatsApp Business platform. The app is suitable for small businesses who want to manage their customer communication directly from their phone or laptop. The platform is suitable for medium to large businesses who want to integrate WhatsApp with their existing systems and tools and communicate with customers programm atically through the WhatsApp Business API. Both methods have their own features and benefits, depending on your business needs and goals. We hope this article has helped you understand how to download WhatsApp Business in laptop and how to use it effectively for your business communication.</p>
|
114 |
-
<h2>FAQs</h2>
|
115 |
-
<p>Here are some frequently asked questions about WhatsApp Business:</p>
|
116 |
-
<h3>Q: Can I use WhatsApp Business and WhatsApp Messenger on the same phone or laptop?</h3>
|
117 |
-
<p>A: Yes, you can use both apps on the same device, as long as you use different phone numbers for each app. You can also link your WhatsApp Business account to your Facebook Page to sync your information and manage your messages from one place.</p>
|
118 |
-
<h3>Q: How can I get a verification badge for my WhatsApp Business account?</h3>
|
119 |
-
<p>A: A verification badge is a green checkmark that appears next to your business name on WhatsApp. It indicates that WhatsApp has confirmed that the phone number belongs to an authentic business. To get a verification badge, you need to use the WhatsApp Business platform and request it from WhatsApp. You will need to provide some documents and information to prove your identity and legitimacy as a business.</p>
|
120 |
-
<h3>Q: How can I send messages to customers who have not contacted me first?</h3>
|
121 |
-
<p>A: You can only send messages to customers who have initiated a conversation with you or who have given you their consent to receive messages from you. You can also send message templates that are pre-approved by WhatsApp for certain purposes, such as notifications, reminders, confirmations, or updates. These messages are charged per message and have a 24-hour window after the last customer interaction.</p>
|
122 |
-
<h3>Q: How can I comply with the data protection and privacy regulations when using WhatsApp Business?</h3>
|
123 |
-
<p>A: You are responsible for complying with the applicable data protection and privacy laws and regulations when using WhatsApp Business. This includes obtaining the consent of your customers to collect, store, and process their personal data, informing them about how you use their data and what rights they have, and implementing appropriate security measures to protect their data. You can also refer to the [WhatsApp Business Terms of Service] and [WhatsApp Business Privacy Policy] for more information.</p>
|
124 |
-
<h3>Q: How can I get help or support when using WhatsApp Business?</h3>
|
125 |
-
<p>A: You can get help or support when using WhatsApp Business by visiting the [WhatsApp Business Help Center] or contacting the [WhatsApp Business Support Team]. You can also contact your WhatsApp Business solution provider if you are using the platform version.</p> 197e85843d<br />
|
126 |
-
<br />
|
127 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy Temple Run with Mod Features - Free Download for Android Devices.md
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Free Download Temple Run Mod APK: How to Enjoy Unlimited Fun and Adventure</h1>
|
3 |
-
<p>Do you love running games that test your reflexes and skills? Do you want to experience the thrill of escaping from ancient temples, dodging obstacles, and collecting treasures? If yes, then you should try Temple Run, one of the most popular and addictive games on Android. And if you want to make your gaming experience even more exciting, you should download Temple Run Mod APK, a modified version of the game that gives you unlimited money, gems, and other perks. In this article, we will tell you everything you need to know about Temple Run and Temple Run Mod APK, including how to download and install it, how to play it, and what are the advantages and disadvantages of using it. So, let's get started!</p>
|
4 |
-
<h2>free download temple run mod apk</h2><br /><p><b><b>Download File</b> - <a href="https://jinyurl.com/2uNNWn">https://jinyurl.com/2uNNWn</a></b></p><br /><br />
|
5 |
-
<h2>What is Temple Run?</h2>
|
6 |
-
<p>Temple Run is a 3D endless running game developed by Imangi Studios and released in 2011. It has been downloaded over a billion times on Google Play Store and has spawned several sequels and spin-offs. The game is inspired by the Indiana Jones movies and other adventure films, where the protagonist has to escape from a dangerous temple after stealing a cursed idol.</p>
|
7 |
-
<h3>The gameplay of Temple Run</h3>
|
8 |
-
<p>The gameplay of Temple Run is simple but challenging. You control your character by swiping on the screen to turn left or right, jump over gaps or obstacles, or slide under barriers. You also have to tilt your device to move sideways and avoid falling off the edges. You have to run as far as you can without hitting anything or getting caught by the evil monkeys that chase you. Along the way, you can collect coins, gems, power-ups, and other items that can help you boost your score and unlock new features.</p>
|
9 |
-
<h3>The features of Temple Run</h3>
|
10 |
-
<p>Temple Run has many features that make it fun and engaging. Some of them are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Seven different characters to choose from, each with their own abilities and personalities.</li>
|
13 |
-
<li>Various environments to explore, such as forests, mines, waterfalls, and bridges.</li>
|
14 |
-
<li>Different types of obstacles to avoid, such as fire, spikes, traps, and boulders.</li>
|
15 |
-
<li>Different types of power-ups to use, such as magnets, shields, speed boosters, and coin multipliers.</li>
|
16 |
-
<li>Achievements and leaderboards to compete with your friends and other players around the world.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>What is Temple Run Mod APK?</h2>
|
19 |
-
<p>Temple Run Mod APK is a modified version of the original game that gives you some extra benefits that are not available in the official version. For example, with Temple Run Mod APK, you can get unlimited money and gems, which you can use to buy new characters, power-ups, and other items. You can also unlock all the features and levels without having to complete any tasks or challenges. Moreover, you can enjoy the game without any ads or interruptions.</p>
|
20 |
-
<h3>The benefits of Temple Run Mod APK</h3>
|
21 |
-
<p>Some of the benefits of using Temple Run Mod APK are:</p>
|
22 |
-
<ul>
|
23 |
-
<li>You can have more fun and excitement by playing with unlimited resources and options.</li>
|
24 |
-
<li>You can save your time and effort by not having to earn or spend money on anything.</li>
|
25 |
-
<li>You can customize your game according to your preferences and style.</li>
|
26 |
-
<li>You can explore all the aspects of the game without any limitations or restrictions.</li>
|
27 |
-
</ul>
|
28 |
-
<h3>The drawbacks of Temple Run Mod APK</h3 <p>Some of the drawbacks of using Temple Run Mod APK are:</p>
|
29 |
-
<ul>
|
30 |
-
<li>You may face some technical issues or errors while downloading or installing the modded version of the game.</li>
|
31 |
-
<li>You may risk losing your progress or data if the modded version is not compatible with your device or the official version.</li>
|
32 |
-
<li>You may violate the terms and conditions of the game developers and get banned from playing the game online or accessing its features.</li>
|
33 |
-
<li>You may lose the challenge and satisfaction of playing the game as it is meant to be played.</li>
|
34 |
-
</ul>
|
35 |
-
<h2>How to download and install Temple Run Mod APK?</h2>
|
36 |
-
<p>If you want to download and install Temple Run Mod APK, you need to follow some simple steps. However, before you do that, you need to take some precautions to ensure that you are downloading a safe and reliable version of the modded game. Here are some tips to help you:</p>
|
37 |
-
<h3>The precautions to take before downloading and installing Temple Run Mod APK</h3>
|
38 |
-
<ul>
|
39 |
-
<li>Make sure that your device has enough storage space and battery life to download and install the modded game.</li>
|
40 |
-
<li>Make sure that your device is compatible with the modded game and meets its minimum requirements.</li>
|
41 |
-
<li>Make sure that you have a stable and fast internet connection to download the modded game without any interruptions.</li>
|
42 |
-
<li>Make sure that you have a backup of your original game data and progress in case something goes wrong with the modded game.</li>
|
43 |
-
<li>Make sure that you download the modded game from a trusted and verified source that does not contain any viruses, malware, or spyware.</li>
|
44 |
-
</ul>
|
45 |
-
<h3>The steps to download and install Temple Run Mod APK</h3>
|
46 |
-
<ol>
|
47 |
-
<li>Go to the website where you can find the link to download Temple Run Mod APK. You can search for it on Google or use one of these links: .</li>
|
48 |
-
<li>Click on the download button and wait for the file to be downloaded on your device. The file size may vary depending on the version of the modded game.</li>
|
49 |
-
<li>Once the file is downloaded, go to your device settings and enable the option to install apps from unknown sources. This will allow you to install the modded game without any issues.</li>
|
50 |
-
<li>Locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to be completed.</li>
|
51 |
-
<li>After the installation is done, launch the modded game and enjoy playing it with unlimited money, gems, and other perks.</li>
|
52 |
-
</ol>
|
53 |
-
<h2>How to play Temple Run Mod APK?</h2>
|
54 |
-
<p>Playing Temple Run Mod APK is not much different from playing the original game. You still have to run as far as you can, avoid obstacles, collect coins, gems, power-ups, and other items, and escape from the evil monkeys. However, with Temple Run Mod APK, you can have more fun and adventure by using some tips and tricks that can enhance your gaming experience. Here are some of them:</p>
|
55 |
-
<h3>The tips and tricks to play Temple Run Mod APK</h3>
|
56 |
-
<ul>
|
57 |
-
<li>Use your unlimited money and gems to buy new characters, power-ups, and other items that can help you run faster, longer, and safer.</li>
|
58 |
-
<li>Use your unlimited power-ups to activate special abilities that can give you an edge over the obstacles and enemies. For example, use the magnet to attract all the coins and gems in your path, use the shield to protect yourself from collisions, use the speed booster to run faster than ever, and use the coin multiplier to increase your score.</li>
|
59 |
-
<li>Use your unlimited gems to revive yourself whenever you die or get caught by the monkeys. This way, you can continue your run without losing your progress or score.</li>
|
60 |
-
<li>Use your unlocked features and levels to explore different environments and scenarios that can challenge your skills and reflexes. For example, try running in the dark forest, in the snowy mountains, in the fiery volcano, or in the haunted castle.</li>
|
61 |
-
</ul>
|
62 |
-
<h3>The best characters and power-ups to use in Temple Run Mod APK</h3>
|
63 |
-
<p>Temple Run Mod APK offers you a variety of characters and power-ups that can make your game more interesting and enjoyable. However, some of them are better than others depending on your preferences and goals. Here are some of the best characters and power-ups that we recommend you to use in Temple Run Mod APK:</p>
|
64 |
-
<p>free download temple run mod apk unlimited coins and gems<br />
|
65 |
-
free download temple run mod apk latest version for android<br />
|
66 |
-
free download temple run mod apk hack with unlimited money<br />
|
67 |
-
free download temple run mod apk offline without internet<br />
|
68 |
-
free download temple run mod apk all characters unlocked<br />
|
69 |
-
free download temple run mod apk 1.23.2 from happymod.com[^1^]<br />
|
70 |
-
free download temple run mod apk with unlimited lives and boosters<br />
|
71 |
-
free download temple run mod apk for pc windows 10<br />
|
72 |
-
free download temple run mod apk no root required<br />
|
73 |
-
free download temple run mod apk original file from imangi studios<br />
|
74 |
-
free download temple run mod apk old version 1.6.4<br />
|
75 |
-
free download temple run mod apk with unlimited diamonds and keys<br />
|
76 |
-
free download temple run mod apk full version for android<br />
|
77 |
-
free download temple run mod apk new update 2023<br />
|
78 |
-
free download temple run mod apk without ads and pop-ups<br />
|
79 |
-
free download temple run mod apk with mega mod features<br />
|
80 |
-
free download temple run mod apk high graphics and sound quality<br />
|
81 |
-
free download temple run mod apk easy installation and gameplay<br />
|
82 |
-
free download temple run mod apk safe and secure from viruses<br />
|
83 |
-
free download temple run mod apk with unlimited power-ups and abilities<br />
|
84 |
-
free download temple run mod apk for ios iphone and ipad<br />
|
85 |
-
free download temple run mod apk with all maps and levels unlocked<br />
|
86 |
-
free download temple run mod apk with cheat codes and tricks<br />
|
87 |
-
free download temple run mod apk with unlimited speed and score multiplier<br />
|
88 |
-
free download temple run mod apk with all skins and outfits unlocked<br />
|
89 |
-
free download temple run mod apk with unlimited coins and gems 2023<br />
|
90 |
-
free download temple run mod apk latest version for android 11<br />
|
91 |
-
free download temple run mod apk hack with unlimited money 2023<br />
|
92 |
-
free download temple run mod apk offline without internet 2023<br />
|
93 |
-
free download temple run mod apk all characters unlocked 2023<br />
|
94 |
-
free download temple run mod apk 1.23.2 from happymod.com 2023[^1^]<br />
|
95 |
-
free download temple run mod apk with unlimited lives and boosters 2023<br />
|
96 |
-
free download temple run mod apk for pc windows 11<br />
|
97 |
-
free download temple run mod apk no root required 2023<br />
|
98 |
-
free download temple run mod apk original file from imangi studios 2023<br />
|
99 |
-
free download temple run mod apk old version 1.6.4 2023<br />
|
100 |
-
free download temple run mod apk with unlimited diamonds and keys 2023<br />
|
101 |
-
free download temple run mod apk full version for android 11<br />
|
102 |
-
free download temple run mod apk new update 2024<br />
|
103 |
-
free download temple run mod apk without ads and pop-ups 2023<br />
|
104 |
-
free download temple run mod apk with mega mod features 2023<br />
|
105 |
-
free download temple run mod apk high graphics and sound quality 2023<br />
|
106 |
-
free download temple run mod apk easy installation and gameplay 2023<br />
|
107 |
-
free download temple run mod apk safe and secure from viruses 2023<br />
|
108 |
-
free download temple run mod apk with unlimited power-ups and abilities 2023</p>
|
109 |
-
<table border="1">
|
110 |
-
<tr><th>Character</th><th>Power-up</th><th>Reason</th></tr>
|
111 |
-
<tr><td>Guy Dangerous</td><td>Coin Magnet</td><td>This is the default character and power-up that you start with in Temple Run Mod APK. It is a good combination that can help you collect more coins and gems without having to worry about missing any.</td></tr>
|
112 |
-
<tr><td>Scarlett Fox</td><td>Speed Booster</td><td>This is one of the fastest characters in the game and she can run even faster with the speed booster power-up. This can help you cover more distance and score more points in less time. However, you need to be careful not to crash into anything or fall off the edges.</td></tr>
|
113 |
-
<tr><td>Barry Bones</td><td>Shield</td><td>This is one of the toughest characters in the game and he can withstand more damage with the shield power-up. This can help you survive longer and avoid getting caught by the monkeys. However, you still need to avoid the obstacles that can make you lose your shield or your life.</td></tr>
|
114 |
-
<tr><td>Karma Lee</td><td>Coin Multiplier</td><td>This is one of the most expensive characters in the game and she can multiply your coins by 2x, 3x, or 4x with the coin multiplier power-up. This can help you increase your score and buy more items in the game. However, you need to have enough gems to activate this power-up.</td></tr>
|
115 |
-
</table>
|
116 |
-
<h2>Conclusion</h2>
|
117 |
-
<p>Temple Run Mod APK is a great way to enjoy unlimited fun and adventure in one of the most popular and addictive running games on Android. With Temple Run Mod APK, you can get unlimited money, gems, power-ups, and other perks that can make your game more exciting and enjoyable. You can also unlock all the features and levels without having to complete any tasks or challenges. Moreover, you can play the game without any ads or interruptions.</p>
|
118 |
-
<p>However, Temple Run Mod APK also has some drawbacks that you need to be aware of before downloading and installing it. You may face some technical issues or errors while using the modded version of the game. You may also risk losing your progress or data if the modded version is not compatible with your device or the official version. You may also violate the terms and conditions of the game developers and get banned from playing the game online or accessing its features. You may also lose the challenge and satisfaction of playing the game as it is meant to be played.</p>
|
119 |
-
<p>Therefore, we recommend you to use Temple Run Mod APK at your own risk and discretion. We do not endorse or support any illegal or unethical activities related to the game. We only provide information and guidance for educational and entertainment purposes only.</p>
|
120 |
-
<p>If you want to download and install Temple Run Mod APK, you can follow the steps and precautions that we have mentioned in this article. You can also use the tips and tricks that we have shared to play Temple Run Mod APK better and have more fun. We hope that you have found this article helpful and informative. Thank you for reading!</p>
|
121 |
-
<h3>FAQs</h3>
|
122 |
-
<ul>
|
123 |
-
<li>Q: Is Temple Run Mod APK safe to use?</li>
|
124 |
-
<li>A: Temple Run Mod APK is not an official version of the game and it may contain some viruses, malware, or spyware that can harm your device or data. Therefore, it is not completely safe to use. You should always download it from a trusted and verified source and scan it with an antivirus before installing it.</li>
|
125 |
-
<li>Q: Is Temple Run Mod APK legal to use?</li>
|
126 |
-
<li>A: Temple Run Mod APK is not legal to use as it violates the terms and conditions of the game developers and infringes their intellectual property rights. Therefore, it is illegal to use. You may face legal consequences if you are caught using it.</li>
|
127 |
-
<li>Q: How can I update Temple Run Mod APK?</li>
|
128 |
-
<li>A: Temple Run Mod APK may not be compatible with the latest version of the official game and it may not receive any updates from the modders. Therefore, you may not be able to update it easily. You may have to uninstall it and download a new version of it from a different source.</li>
|
129 |
-
<li>Q: How can I uninstall Temple Run Mod APK?</li>
|
130 |
-
<li>A: You can uninstall Temple Run Mod APK by following these steps:</li>
|
131 |
-
<ol>
|
132 |
-
<li>Go to your device settings and find the option to manage your apps.</li>
|
133 |
-
<li>Find Temple Run Mod APK in the list of your apps and tap on it.</li>
|
134 |
-
<li>Select the option to uninstall it and confirm your choice.</li>
|
135 |
-
<li>Wait for the process to be completed and then restart your device.</li>
|
136 |
-
</ol>
|
137 |
-
<li>Q: How can I contact the developers of Temple Run Mod APK?</li>
|
138 |
-
<li>A: We do not know who are the developers of Temple Run Mod APK as they are not affiliated with Imangi Studios, the original developers of Temple Run. Therefore, we cannot provide you with their contact details or support information. You may have to search for them online or on social media platforms if you want to contact them.</li>
|
139 |
-
</ul></p> 197e85843d<br />
|
140 |
-
<br />
|
141 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1yukikaze/img-to-music/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Img To Music
|
3 |
-
emoji: 🌅🎶
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.20.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
duplicated_from: fffiloni/img-to-music
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/raft/core/utils/utils.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
import numpy as np
|
4 |
-
from scipy import interpolate
|
5 |
-
|
6 |
-
|
7 |
-
class InputPadder:
|
8 |
-
""" Pads images such that dimensions are divisible by 8 """
|
9 |
-
def __init__(self, dims, mode='sintel'):
|
10 |
-
self.ht, self.wd = dims[-2:]
|
11 |
-
pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
|
12 |
-
pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
|
13 |
-
if mode == 'sintel':
|
14 |
-
self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
|
15 |
-
else:
|
16 |
-
self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht]
|
17 |
-
|
18 |
-
def pad(self, *inputs):
|
19 |
-
return [F.pad(x, self._pad, mode='replicate') for x in inputs]
|
20 |
-
|
21 |
-
def unpad(self,x):
|
22 |
-
ht, wd = x.shape[-2:]
|
23 |
-
c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
|
24 |
-
return x[..., c[0]:c[1], c[2]:c[3]]
|
25 |
-
|
26 |
-
def forward_interpolate(flow):
|
27 |
-
flow = flow.detach().cpu().numpy()
|
28 |
-
dx, dy = flow[0], flow[1]
|
29 |
-
|
30 |
-
ht, wd = dx.shape
|
31 |
-
x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
|
32 |
-
|
33 |
-
x1 = x0 + dx
|
34 |
-
y1 = y0 + dy
|
35 |
-
|
36 |
-
x1 = x1.reshape(-1)
|
37 |
-
y1 = y1.reshape(-1)
|
38 |
-
dx = dx.reshape(-1)
|
39 |
-
dy = dy.reshape(-1)
|
40 |
-
|
41 |
-
valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
|
42 |
-
x1 = x1[valid]
|
43 |
-
y1 = y1[valid]
|
44 |
-
dx = dx[valid]
|
45 |
-
dy = dy[valid]
|
46 |
-
|
47 |
-
flow_x = interpolate.griddata(
|
48 |
-
(x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
|
49 |
-
|
50 |
-
flow_y = interpolate.griddata(
|
51 |
-
(x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
|
52 |
-
|
53 |
-
flow = np.stack([flow_x, flow_y], axis=0)
|
54 |
-
return torch.from_numpy(flow).float()
|
55 |
-
|
56 |
-
|
57 |
-
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
|
58 |
-
""" Wrapper for grid_sample, uses pixel coordinates """
|
59 |
-
H, W = img.shape[-2:]
|
60 |
-
xgrid, ygrid = coords.split([1,1], dim=-1)
|
61 |
-
xgrid = 2*xgrid/(W-1) - 1
|
62 |
-
ygrid = 2*ygrid/(H-1) - 1
|
63 |
-
|
64 |
-
grid = torch.cat([xgrid, ygrid], dim=-1)
|
65 |
-
img = F.grid_sample(img, grid, align_corners=True)
|
66 |
-
|
67 |
-
if mask:
|
68 |
-
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
|
69 |
-
return img, mask.float()
|
70 |
-
|
71 |
-
return img
|
72 |
-
|
73 |
-
|
74 |
-
def coords_grid(batch, ht, wd, device):
|
75 |
-
coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))
|
76 |
-
coords = torch.stack(coords[::-1], dim=0).float()
|
77 |
-
return coords[None].repeat(batch, 1, 1, 1)
|
78 |
-
|
79 |
-
|
80 |
-
def upflow8(flow, mode='bilinear'):
|
81 |
-
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
|
82 |
-
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/52Hz/HWMNet_lowlight_enhancement/app.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from PIL import Image
|
4 |
-
|
5 |
-
|
6 |
-
os.system('wget https://github.com/FanChiMao/HWMNet/releases/download/v0.0/LOL_enhancement_HWMNet.pth -P experiments/pretrained_models')
|
7 |
-
os.system('wget https://github.com/FanChiMao/HWMNet/releases/download/v0.0/MIT5K_enhancement_HWMNet.pth -P experiments/pretrained_models')
|
8 |
-
|
9 |
-
def inference(img, model):
|
10 |
-
os.system('mkdir test')
|
11 |
-
#basewidth = 256
|
12 |
-
#wpercent = (basewidth / float(img.size[0]))
|
13 |
-
#hsize = int((float(img.size[1]) * float(wpercent)))
|
14 |
-
#img = img.resize((basewidth, hsize), Image.ANTIALIAS)
|
15 |
-
img.save("test/1.png", "PNG")
|
16 |
-
if model == 'LOL':
|
17 |
-
os.system('python main_test_HWMNet.py --input_dir test --weights experiments/pretrained_models/LOL_enhancement_HWMNet.pth')
|
18 |
-
elif model == 'MIT-5K':
|
19 |
-
os.system('python main_test_HWMNet.py --input_dir test --weights experiments/pretrained_models/MIT5K_enhancement_HWMNet.pth')
|
20 |
-
|
21 |
-
return 'result/1.png'
|
22 |
-
|
23 |
-
|
24 |
-
title = "Half Wavelet Attention on M-Net+ for Low-light Image Enhancement"
|
25 |
-
description = "Gradio demo for HWMNet. HWMNet has competitive performance results on two real-world low-light datasets in terms of quantitative metrics and visual quality. See the paper and project page for detailed results below. Here, we provide a demo for low-light image enhancement. To use it, simply upload your image, or click one of the examples to load them. We present 2 pretrained models, which is trained on LOL and MIT-Adobe FiveK dataset, respectively. The images in LOL dataset are darker than MIT-Adobe FiveK, so if you have the extremely dark images you could consider it. On the contrary, the MIT-Adobe FiveK's model is suitable for minor adjustment of the images' hue."
|
26 |
-
article = "<p style='text-align: center'><a href='https://ieeexplore.ieee.org/document/9897503' target='_blank'>Half Wavelet Attention on M-Net+ for Low-light Image Enhancement</a> | <a href='https://github.com/FanChiMao/HWMNet' target='_blank'>Github Repo</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=52Hz_HWMNet_lowlight_enhancement' alt='visitor badge'></center>"
|
27 |
-
|
28 |
-
examples = [['low-light.png', 'LOL'], ['low-light_2.png', 'MIT-5K']]
|
29 |
-
gr.Interface(
|
30 |
-
inference,
|
31 |
-
[gr.inputs.Image(type="pil", label="Input"), gr.inputs.Dropdown(choices=['LOL', 'MIT-5K'], type="value", default='LOL', label="model")],
|
32 |
-
gr.outputs.Image(type="filepath", label="Output"),
|
33 |
-
title=title,
|
34 |
-
description=description,
|
35 |
-
article=article,
|
36 |
-
allow_flagging=False,
|
37 |
-
allow_screenshot=False,
|
38 |
-
examples=examples
|
39 |
-
).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
2 |
-
import parselmouth
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
|
6 |
-
class PMF0Predictor(F0Predictor):
|
7 |
-
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
8 |
-
self.hop_length = hop_length
|
9 |
-
self.f0_min = f0_min
|
10 |
-
self.f0_max = f0_max
|
11 |
-
self.sampling_rate = sampling_rate
|
12 |
-
|
13 |
-
def interpolate_f0(self, f0):
|
14 |
-
"""
|
15 |
-
对F0进行插值处理
|
16 |
-
"""
|
17 |
-
|
18 |
-
data = np.reshape(f0, (f0.size, 1))
|
19 |
-
|
20 |
-
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
21 |
-
vuv_vector[data > 0.0] = 1.0
|
22 |
-
vuv_vector[data <= 0.0] = 0.0
|
23 |
-
|
24 |
-
ip_data = data
|
25 |
-
|
26 |
-
frame_number = data.size
|
27 |
-
last_value = 0.0
|
28 |
-
for i in range(frame_number):
|
29 |
-
if data[i] <= 0.0:
|
30 |
-
j = i + 1
|
31 |
-
for j in range(i + 1, frame_number):
|
32 |
-
if data[j] > 0.0:
|
33 |
-
break
|
34 |
-
if j < frame_number - 1:
|
35 |
-
if last_value > 0.0:
|
36 |
-
step = (data[j] - data[i - 1]) / float(j - i)
|
37 |
-
for k in range(i, j):
|
38 |
-
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
39 |
-
else:
|
40 |
-
for k in range(i, j):
|
41 |
-
ip_data[k] = data[j]
|
42 |
-
else:
|
43 |
-
for k in range(i, frame_number):
|
44 |
-
ip_data[k] = last_value
|
45 |
-
else:
|
46 |
-
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
47 |
-
last_value = data[i]
|
48 |
-
|
49 |
-
return ip_data[:, 0], vuv_vector[:, 0]
|
50 |
-
|
51 |
-
def compute_f0(self, wav, p_len=None):
|
52 |
-
x = wav
|
53 |
-
if p_len is None:
|
54 |
-
p_len = x.shape[0] // self.hop_length
|
55 |
-
else:
|
56 |
-
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
57 |
-
time_step = self.hop_length / self.sampling_rate * 1000
|
58 |
-
f0 = (
|
59 |
-
parselmouth.Sound(x, self.sampling_rate)
|
60 |
-
.to_pitch_ac(
|
61 |
-
time_step=time_step / 1000,
|
62 |
-
voicing_threshold=0.6,
|
63 |
-
pitch_floor=self.f0_min,
|
64 |
-
pitch_ceiling=self.f0_max,
|
65 |
-
)
|
66 |
-
.selected_array["frequency"]
|
67 |
-
)
|
68 |
-
|
69 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
70 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
71 |
-
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
72 |
-
f0, uv = self.interpolate_f0(f0)
|
73 |
-
return f0
|
74 |
-
|
75 |
-
def compute_f0_uv(self, wav, p_len=None):
|
76 |
-
x = wav
|
77 |
-
if p_len is None:
|
78 |
-
p_len = x.shape[0] // self.hop_length
|
79 |
-
else:
|
80 |
-
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
81 |
-
time_step = self.hop_length / self.sampling_rate * 1000
|
82 |
-
f0 = (
|
83 |
-
parselmouth.Sound(x, self.sampling_rate)
|
84 |
-
.to_pitch_ac(
|
85 |
-
time_step=time_step / 1000,
|
86 |
-
voicing_threshold=0.6,
|
87 |
-
pitch_floor=self.f0_min,
|
88 |
-
pitch_ceiling=self.f0_max,
|
89 |
-
)
|
90 |
-
.selected_array["frequency"]
|
91 |
-
)
|
92 |
-
|
93 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
94 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
95 |
-
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
96 |
-
f0, uv = self.interpolate_f0(f0)
|
97 |
-
return f0, uv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI4PD/hexviz/hexviz/pages/3_📄Documentation.py
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
|
3 |
-
from hexviz.config import URL
|
4 |
-
|
5 |
-
st.markdown(
|
6 |
-
f"""
|
7 |
-
## Protein language models
|
8 |
-
There has been an explosion of capabilities in natural language processing
|
9 |
-
models in the last few years. These architectural advances from NLP have proven
|
10 |
-
to work very well for protein sequences, and we now have protein language models
|
11 |
-
(pLMs) that can generate novel functional proteins sequences
|
12 |
-
[ProtGPT2](https://www.nature.com/articles/s42256-022-00499-z) and auto-encoding
|
13 |
-
models that excel at capturing biophysical features of protein sequences
|
14 |
-
[ProtTrans](https://www.biorxiv.org/content/10.1101/2020.07.12.199554v3).
|
15 |
-
|
16 |
-
For an introduction to protein language models for protein design check out
|
17 |
-
[Controllable protein design with language
|
18 |
-
models](https://www.nature.com/articles/s42256-022-00499-z).
|
19 |
-
|
20 |
-
## Interpreting protein language models by visualizing attention patterns
|
21 |
-
With these impressive capabilities it is natural to ask what protein language
|
22 |
-
models are learning and how they work -- we want to **interpret** the models.
|
23 |
-
In natural language processing **attention analysis** has proven to be a useful
|
24 |
-
tool for interpreting transformer model internals see fex ([Abnar et al.
|
25 |
-
2020](https://arxiv.org/abs/2005.00928v2)). [BERTology meets
|
26 |
-
biology](https://arxiv.org/abs/2006.15222) provides a thorough introduction to
|
27 |
-
how we can analyze Transformer protein models through the lens of attention,
|
28 |
-
they show exciting findings such as:
|
29 |
-
> Attention: (1) captures the folding
|
30 |
-
> structure of proteins, connecting amino acids that are far apart in the
|
31 |
-
> underlying sequence, but spatially close in the three-dimensional structure, (2)
|
32 |
-
> targets binding sites, a key functional component of proteins, and (3) focuses
|
33 |
-
> on progressively more complex biophysical properties with increasing layer depth
|
34 |
-
|
35 |
-
Most existing tools for analyzing and visualizing attention patterns focus on
|
36 |
-
models trained on text ([BertViz](https://github.com/jessevig/bertviz),
|
37 |
-
[exBERT], [exBERT](https://exbert.net/)). It can be hard to analyze protein
|
38 |
-
sequences using these tools as we don't have any intuitive understand about the
|
39 |
-
protein language when looking at an amino acid sequence in the same way we do
|
40 |
-
for natural language. Experts studying proteins do have an understanding of
|
41 |
-
proteins, but it is mostly in in the context of a protein's structure, not its
|
42 |
-
sequence. Can we build a tool for analyzing attention patterns that can leverage
|
43 |
-
expert's knowledge of protein structure to understand what pLMs learn?
|
44 |
-
|
45 |
-
BERTology meets biology shows how visualizing attention patterns in the context
|
46 |
-
of protein structure can facilitate novel discoveries about what models learn.
|
47 |
-
[**Hexviz**](https://huggingface.co/spaces/aksell/hexviz) builds on this, and is
|
48 |
-
a tool to simplify analyzing attention patterns in the context of protein
|
49 |
-
structure. We hope this can enable domain experts to explore and interpret the
|
50 |
-
knowledge contained in pLMs.
|
51 |
-
|
52 |
-
## How to use Hexviz
|
53 |
-
There are three views:
|
54 |
-
1. <a href="{URL}Attention_Visualization" target="_self">🧬Attention Visualization</a> Shows attention weights from a single head as red bars between residues on a protein structure.
|
55 |
-
2. <a href="{URL}Identify_Interesting_Heads" target="_self">🗺️Identify Interesting Heads</a> Plots attention weights between residues as a heatmap for each head in the model.
|
56 |
-
3. <a href="{URL}Birds_Eye_View" target="_self">🦅Bird's Eye View</a> Attention on structures in a big grid over multiple heads and layers. The first view on steroids but with the cost of being quite slow for large models or long sequences.
|
57 |
-
|
58 |
-
|
59 |
-
The first view is the meat of the application and is where you can investigate
|
60 |
-
how attention patterns map onto the structure of a protein you're interested in.
|
61 |
-
Use the second view to narrow down to a few heads that you want to investigate
|
62 |
-
attention patterns from in detail. pLM are large and can have many heads, as an
|
63 |
-
example ProtBERT with it's 30 layers and 16 heads has 480 heads, so we need a
|
64 |
-
way to identify heads with patterns we're interested in.
|
65 |
-
|
66 |
-
The second view is a customizable heatmap plot of attention between residue for
|
67 |
-
all heads and layers in a model. From here it is possible to identify heads that
|
68 |
-
specialize in a particular attention pattern.
|
69 |
-
|
70 |
-
Read more about attention patterns in fex [Revealing the dark secrets of
|
71 |
-
BERT](https://arxiv.org/abs/1908.08593).
|
72 |
-
|
73 |
-
## Protein Language models in Hexviz
|
74 |
-
Hexviz currently supports the following models:
|
75 |
-
1. [ProtBERT](https://huggingface.co/Rostlab/prot_bert_bfd)
|
76 |
-
2. [ZymCTRL](https://huggingface.co/nferruz/ZymCTRL)
|
77 |
-
3. [TapeBert](https://github.com/songlab-cal/tape/blob/master/tape/models/modeling_bert.py) - a nickname coined in BERTology meets biology for the Bert Base model pre-trained on Pfam in [TAPE](https://www.biorxiv.org/content/10.1101/676825v1). TapeBert is used extensively in BERTOlogy meets biology.
|
78 |
-
4. [ProtT5 half](https://huggingface.co/Rostlab/prot_t5_xl_half_uniref50-enc)
|
79 |
-
|
80 |
-
## FAQ
|
81 |
-
1. I can't see any attention- "bars" in the visualization, what is wrong? -> Lower the `minimum attention`.
|
82 |
-
2. How are sequences I input folded? -> Using https://esmatlas.com/resources?action=fold
|
83 |
-
3. Why the name Hexviz? -> It's a discworld reference, Hex is a computer in the unseen universtiy which might be even less interpretable than transformer models:
|
84 |
-
> The main structure works through the movements of large numbers of ants through the complex pipes and tubing which make up the main quantity of Hex's infrastructure.
|
85 |
-
> Hex "thinks" by controlling which tubes the ants can crawl through, thus allowing it to perform increasingly complex computations if enough ants are provided (that is, if there are enough bugs in the system).
|
86 |
-
There's more fun reading, with earie reference to powerful AI models at https://discworld.fandom.com/wiki/Hex
|
87 |
-
""",
|
88 |
-
unsafe_allow_html=True,
|
89 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/client/js/chat.js
DELETED
@@ -1,508 +0,0 @@
|
|
1 |
-
const query = (obj) =>
|
2 |
-
Object.keys(obj)
|
3 |
-
.map((k) => encodeURIComponent(k) + "=" + encodeURIComponent(obj[k]))
|
4 |
-
.join("&");
|
5 |
-
const url_prefix = document.querySelector("body").getAttribute("data-urlprefix");
|
6 |
-
const markdown = window.markdownit();
|
7 |
-
const message_box = document.getElementById(`messages`);
|
8 |
-
const message_input = document.getElementById(`message-input`);
|
9 |
-
const box_conversations = document.querySelector(`.top`);
|
10 |
-
const spinner = box_conversations.querySelector(".spinner");
|
11 |
-
const stop_generating = document.querySelector(`.stop-generating`);
|
12 |
-
const send_button = document.querySelector(`#send-button`);
|
13 |
-
const user_image = `<img src="${url_prefix}/assets/img/user.png" alt="User Avatar">`;
|
14 |
-
const gpt_image = `<img src="${url_prefix}/assets/img/gpt.png" alt="GPT Avatar">`;
|
15 |
-
let prompt_lock = false;
|
16 |
-
|
17 |
-
hljs.addPlugin(new CopyButtonPlugin());
|
18 |
-
|
19 |
-
message_input.addEventListener("blur", () => {
|
20 |
-
window.scrollTo(0, 0);
|
21 |
-
});
|
22 |
-
|
23 |
-
message_input.addEventListener("focus", () => {
|
24 |
-
document.documentElement.scrollTop = document.documentElement.scrollHeight;
|
25 |
-
});
|
26 |
-
|
27 |
-
const delete_conversations = async () => {
|
28 |
-
localStorage.clear();
|
29 |
-
await new_conversation();
|
30 |
-
};
|
31 |
-
|
32 |
-
const handle_ask = async () => {
|
33 |
-
message_input.style.height = `80px`;
|
34 |
-
window.scrollTo(0, 0);
|
35 |
-
let message = message_input.value;
|
36 |
-
|
37 |
-
if (message.length > 0) {
|
38 |
-
message_input.value = ``;
|
39 |
-
message_input.dispatchEvent(new Event("input"));
|
40 |
-
await ask_gpt(message);
|
41 |
-
}
|
42 |
-
};
|
43 |
-
|
44 |
-
const remove_cancel_button = async () => {
|
45 |
-
stop_generating.classList.add(`stop-generating-hiding`);
|
46 |
-
|
47 |
-
setTimeout(() => {
|
48 |
-
stop_generating.classList.remove(`stop-generating-hiding`);
|
49 |
-
stop_generating.classList.add(`stop-generating-hidden`);
|
50 |
-
}, 300);
|
51 |
-
};
|
52 |
-
|
53 |
-
const ask_gpt = async (message) => {
|
54 |
-
try {
|
55 |
-
message_input.value = ``;
|
56 |
-
message_input.innerHTML = ``;
|
57 |
-
message_input.innerText = ``;
|
58 |
-
|
59 |
-
add_conversation(window.conversation_id, message.substr(0, 16));
|
60 |
-
window.scrollTo(0, 0);
|
61 |
-
window.controller = new AbortController();
|
62 |
-
|
63 |
-
jailbreak = document.getElementById("jailbreak");
|
64 |
-
model = document.getElementById("model");
|
65 |
-
prompt_lock = true;
|
66 |
-
window.text = ``;
|
67 |
-
window.token = message_id();
|
68 |
-
|
69 |
-
stop_generating.classList.remove(`stop-generating-hidden`);
|
70 |
-
|
71 |
-
add_user_message_box(message);
|
72 |
-
|
73 |
-
message_box.scrollTop = message_box.scrollHeight;
|
74 |
-
window.scrollTo(0, 0);
|
75 |
-
await new Promise((r) => setTimeout(r, 500));
|
76 |
-
window.scrollTo(0, 0);
|
77 |
-
|
78 |
-
message_box.innerHTML += `
|
79 |
-
<div class="message">
|
80 |
-
<div class="avatar-container">
|
81 |
-
${gpt_image}
|
82 |
-
</div>
|
83 |
-
<div class="content" id="gpt_${window.token}">
|
84 |
-
<div id="cursor"></div>
|
85 |
-
</div>
|
86 |
-
</div>
|
87 |
-
`;
|
88 |
-
|
89 |
-
message_box.scrollTop = message_box.scrollHeight;
|
90 |
-
window.scrollTo(0, 0);
|
91 |
-
await new Promise((r) => setTimeout(r, 1000));
|
92 |
-
window.scrollTo(0, 0);
|
93 |
-
|
94 |
-
const response = await fetch(`${url_prefix}/backend-api/v2/conversation`, {
|
95 |
-
method: `POST`,
|
96 |
-
signal: window.controller.signal,
|
97 |
-
headers: {
|
98 |
-
"content-type": `application/json`,
|
99 |
-
accept: `text/event-stream`,
|
100 |
-
},
|
101 |
-
body: JSON.stringify({
|
102 |
-
conversation_id: window.conversation_id,
|
103 |
-
action: `_ask`,
|
104 |
-
model: model.options[model.selectedIndex].value,
|
105 |
-
jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
|
106 |
-
meta: {
|
107 |
-
id: window.token,
|
108 |
-
content: {
|
109 |
-
conversation: await get_conversation(window.conversation_id),
|
110 |
-
internet_access: document.getElementById("switch").checked,
|
111 |
-
content_type: "text",
|
112 |
-
parts: [
|
113 |
-
{
|
114 |
-
content: message,
|
115 |
-
role: "user",
|
116 |
-
},
|
117 |
-
],
|
118 |
-
},
|
119 |
-
},
|
120 |
-
}),
|
121 |
-
});
|
122 |
-
|
123 |
-
const reader = response.body.getReader();
|
124 |
-
|
125 |
-
while (true) {
|
126 |
-
const { value, done } = await reader.read();
|
127 |
-
if (done) break;
|
128 |
-
|
129 |
-
chunk = decodeUnicode(new TextDecoder().decode(value));
|
130 |
-
|
131 |
-
if (
|
132 |
-
chunk.includes(`<form id="challenge-form" action="${url_prefix}/backend-api/v2/conversation?`)
|
133 |
-
) {
|
134 |
-
chunk = `cloudflare token expired, please refresh the page.`;
|
135 |
-
}
|
136 |
-
|
137 |
-
text += chunk;
|
138 |
-
|
139 |
-
document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text);
|
140 |
-
document.querySelectorAll(`code`).forEach((el) => {
|
141 |
-
hljs.highlightElement(el);
|
142 |
-
});
|
143 |
-
|
144 |
-
window.scrollTo(0, 0);
|
145 |
-
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
|
146 |
-
}
|
147 |
-
|
148 |
-
// if text contains :
|
149 |
-
if (text.includes(`instead. Maintaining this website and API costs a lot of money`)) {
|
150 |
-
document.getElementById(`gpt_${window.token}`).innerHTML =
|
151 |
-
"An error occurred, please reload / refresh cache and try again.";
|
152 |
-
}
|
153 |
-
|
154 |
-
add_message(window.conversation_id, "user", message);
|
155 |
-
add_message(window.conversation_id, "assistant", text);
|
156 |
-
|
157 |
-
message_box.scrollTop = message_box.scrollHeight;
|
158 |
-
await remove_cancel_button();
|
159 |
-
prompt_lock = false;
|
160 |
-
|
161 |
-
await load_conversations(20, 0);
|
162 |
-
window.scrollTo(0, 0);
|
163 |
-
} catch (e) {
|
164 |
-
add_message(window.conversation_id, "user", message);
|
165 |
-
|
166 |
-
message_box.scrollTop = message_box.scrollHeight;
|
167 |
-
await remove_cancel_button();
|
168 |
-
prompt_lock = false;
|
169 |
-
|
170 |
-
await load_conversations(20, 0);
|
171 |
-
|
172 |
-
console.log(e);
|
173 |
-
|
174 |
-
let cursorDiv = document.getElementById(`cursor`);
|
175 |
-
if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
|
176 |
-
|
177 |
-
if (e.name != `AbortError`) {
|
178 |
-
let error_message = `oops ! something went wrong, please try again / reload. [stacktrace in console]`;
|
179 |
-
|
180 |
-
document.getElementById(`gpt_${window.token}`).innerHTML = error_message;
|
181 |
-
add_message(window.conversation_id, "assistant", error_message);
|
182 |
-
} else {
|
183 |
-
document.getElementById(`gpt_${window.token}`).innerHTML += ` [aborted]`;
|
184 |
-
add_message(window.conversation_id, "assistant", text + ` [aborted]`);
|
185 |
-
}
|
186 |
-
|
187 |
-
window.scrollTo(0, 0);
|
188 |
-
}
|
189 |
-
};
|
190 |
-
|
191 |
-
const add_user_message_box = (message) => {
|
192 |
-
const messageDiv = createElement("div", { classNames: ["message"] });
|
193 |
-
const avatarContainer = createElement("div", { classNames: ["avatar-container"], innerHTML: user_image });
|
194 |
-
const contentDiv = createElement("div", {
|
195 |
-
classNames: ["content"],
|
196 |
-
id: `user_${token}`,
|
197 |
-
textContent: message,
|
198 |
-
});
|
199 |
-
|
200 |
-
messageDiv.append(avatarContainer, contentDiv);
|
201 |
-
message_box.appendChild(messageDiv);
|
202 |
-
};
|
203 |
-
|
204 |
-
const decodeUnicode = (str) => {
|
205 |
-
return str.replace(/\\u([a-fA-F0-9]{4})/g, function (match, grp) {
|
206 |
-
return String.fromCharCode(parseInt(grp, 16));
|
207 |
-
});
|
208 |
-
};
|
209 |
-
|
210 |
-
const clear_conversations = async () => {
|
211 |
-
const elements = box_conversations.childNodes;
|
212 |
-
let index = elements.length;
|
213 |
-
|
214 |
-
if (index > 0) {
|
215 |
-
while (index--) {
|
216 |
-
const element = elements[index];
|
217 |
-
if (element.nodeType === Node.ELEMENT_NODE && element.tagName.toLowerCase() !== `button`) {
|
218 |
-
box_conversations.removeChild(element);
|
219 |
-
}
|
220 |
-
}
|
221 |
-
}
|
222 |
-
};
|
223 |
-
|
224 |
-
const clear_conversation = async () => {
|
225 |
-
let messages = message_box.getElementsByTagName(`div`);
|
226 |
-
|
227 |
-
while (messages.length > 0) {
|
228 |
-
message_box.removeChild(messages[0]);
|
229 |
-
}
|
230 |
-
};
|
231 |
-
|
232 |
-
const delete_conversation = async (conversation_id) => {
|
233 |
-
localStorage.removeItem(`conversation:${conversation_id}`);
|
234 |
-
|
235 |
-
if (window.conversation_id == conversation_id) {
|
236 |
-
await new_conversation();
|
237 |
-
}
|
238 |
-
|
239 |
-
await load_conversations(20, 0, true);
|
240 |
-
};
|
241 |
-
|
242 |
-
const set_conversation = async (conversation_id) => {
|
243 |
-
history.pushState({}, null, `${url_prefix}/chat/${conversation_id}`);
|
244 |
-
window.conversation_id = conversation_id;
|
245 |
-
|
246 |
-
await clear_conversation();
|
247 |
-
await load_conversation(conversation_id);
|
248 |
-
await load_conversations(20, 0, true);
|
249 |
-
};
|
250 |
-
|
251 |
-
const new_conversation = async () => {
|
252 |
-
history.pushState({}, null, `${url_prefix}/chat/`);
|
253 |
-
window.conversation_id = uuid();
|
254 |
-
|
255 |
-
await clear_conversation();
|
256 |
-
await load_conversations(20, 0, true);
|
257 |
-
};
|
258 |
-
|
259 |
-
const load_conversation = async (conversation_id) => {
|
260 |
-
let conversation = await JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
|
261 |
-
console.log(conversation, conversation_id);
|
262 |
-
|
263 |
-
for (item of conversation.items) {
|
264 |
-
if (is_assistant(item.role)) {
|
265 |
-
message_box.innerHTML += load_gpt_message_box(item.content);
|
266 |
-
} else {
|
267 |
-
message_box.innerHTML += load_user_message_box(item.content);
|
268 |
-
}
|
269 |
-
}
|
270 |
-
|
271 |
-
document.querySelectorAll(`code`).forEach((el) => {
|
272 |
-
hljs.highlightElement(el);
|
273 |
-
});
|
274 |
-
|
275 |
-
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" });
|
276 |
-
|
277 |
-
setTimeout(() => {
|
278 |
-
message_box.scrollTop = message_box.scrollHeight;
|
279 |
-
}, 500);
|
280 |
-
};
|
281 |
-
|
282 |
-
const load_user_message_box = (content) => {
|
283 |
-
const messageDiv = createElement("div", { classNames: ["message"] });
|
284 |
-
const avatarContainer = createElement("div", { classNames: ["avatar-container"], innerHTML: user_image });
|
285 |
-
const contentDiv = createElement("div", { classNames: ["content"] });
|
286 |
-
const preElement = document.createElement("pre");
|
287 |
-
preElement.textContent = content;
|
288 |
-
contentDiv.appendChild(preElement);
|
289 |
-
|
290 |
-
messageDiv.append(avatarContainer, contentDiv);
|
291 |
-
|
292 |
-
return messageDiv.outerHTML;
|
293 |
-
};
|
294 |
-
|
295 |
-
const load_gpt_message_box = (content) => {
|
296 |
-
return `
|
297 |
-
<div class="message">
|
298 |
-
<div class="avatar-container">
|
299 |
-
${gpt_image}
|
300 |
-
</div>
|
301 |
-
<div class="content">
|
302 |
-
${markdown.render(content)}
|
303 |
-
</div>
|
304 |
-
</div>
|
305 |
-
`;
|
306 |
-
};
|
307 |
-
|
308 |
-
const is_assistant = (role) => {
|
309 |
-
return role == "assistant";
|
310 |
-
};
|
311 |
-
|
312 |
-
const get_conversation = async (conversation_id) => {
|
313 |
-
let conversation = await JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
|
314 |
-
return conversation.items;
|
315 |
-
};
|
316 |
-
|
317 |
-
const add_conversation = async (conversation_id, title) => {
|
318 |
-
if (localStorage.getItem(`conversation:${conversation_id}`) == null) {
|
319 |
-
localStorage.setItem(
|
320 |
-
`conversation:${conversation_id}`,
|
321 |
-
JSON.stringify({
|
322 |
-
id: conversation_id,
|
323 |
-
title: title,
|
324 |
-
items: [],
|
325 |
-
})
|
326 |
-
);
|
327 |
-
}
|
328 |
-
};
|
329 |
-
|
330 |
-
const add_message = async (conversation_id, role, content) => {
|
331 |
-
before_adding = JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
|
332 |
-
|
333 |
-
before_adding.items.push({
|
334 |
-
role: role,
|
335 |
-
content: content,
|
336 |
-
});
|
337 |
-
|
338 |
-
localStorage.setItem(`conversation:${conversation_id}`, JSON.stringify(before_adding)); // update conversation
|
339 |
-
};
|
340 |
-
|
341 |
-
const load_conversations = async (limit, offset, loader) => {
|
342 |
-
//console.log(loader);
|
343 |
-
//if (loader === undefined) box_conversations.appendChild(spinner);
|
344 |
-
|
345 |
-
let conversations = [];
|
346 |
-
for (let i = 0; i < localStorage.length; i++) {
|
347 |
-
if (localStorage.key(i).startsWith("conversation:")) {
|
348 |
-
let conversation = localStorage.getItem(localStorage.key(i));
|
349 |
-
conversations.push(JSON.parse(conversation));
|
350 |
-
}
|
351 |
-
}
|
352 |
-
|
353 |
-
//if (loader === undefined) spinner.parentNode.removeChild(spinner)
|
354 |
-
await clear_conversations();
|
355 |
-
|
356 |
-
for (conversation of conversations) {
|
357 |
-
box_conversations.innerHTML += `
|
358 |
-
<div class="conversation-sidebar">
|
359 |
-
<div class="left" onclick="set_conversation('${conversation.id}')">
|
360 |
-
<i class="fa-regular fa-comments"></i>
|
361 |
-
<span class="conversation-title">${conversation.title}</span>
|
362 |
-
</div>
|
363 |
-
<i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-trash"></i>
|
364 |
-
</div>
|
365 |
-
`;
|
366 |
-
}
|
367 |
-
|
368 |
-
document.querySelectorAll(`code`).forEach((el) => {
|
369 |
-
hljs.highlightElement(el);
|
370 |
-
});
|
371 |
-
};
|
372 |
-
|
373 |
-
document.getElementById(`cancelButton`).addEventListener(`click`, async () => {
|
374 |
-
window.controller.abort();
|
375 |
-
console.log(`aborted ${window.conversation_id}`);
|
376 |
-
});
|
377 |
-
|
378 |
-
function h2a(str1) {
|
379 |
-
var hex = str1.toString();
|
380 |
-
var str = "";
|
381 |
-
|
382 |
-
for (var n = 0; n < hex.length; n += 2) {
|
383 |
-
str += String.fromCharCode(parseInt(hex.substr(n, 2), 16));
|
384 |
-
}
|
385 |
-
|
386 |
-
return str;
|
387 |
-
}
|
388 |
-
|
389 |
-
const uuid = () => {
|
390 |
-
return `xxxxxxxx-xxxx-4xxx-yxxx-${Date.now().toString(16)}`.replace(/[xy]/g, function (c) {
|
391 |
-
var r = (Math.random() * 16) | 0,
|
392 |
-
v = c == "x" ? r : (r & 0x3) | 0x8;
|
393 |
-
return v.toString(16);
|
394 |
-
});
|
395 |
-
};
|
396 |
-
|
397 |
-
const message_id = () => {
|
398 |
-
random_bytes = (Math.floor(Math.random() * 1338377565) + 2956589730).toString(2);
|
399 |
-
unix = Math.floor(Date.now() / 1000).toString(2);
|
400 |
-
|
401 |
-
return BigInt(`0b${unix}${random_bytes}`).toString();
|
402 |
-
};
|
403 |
-
|
404 |
-
window.onload = async () => {
|
405 |
-
load_settings_localstorage();
|
406 |
-
|
407 |
-
conversations = 0;
|
408 |
-
for (let i = 0; i < localStorage.length; i++) {
|
409 |
-
if (localStorage.key(i).startsWith("conversation:")) {
|
410 |
-
conversations += 1;
|
411 |
-
}
|
412 |
-
}
|
413 |
-
|
414 |
-
if (conversations == 0) localStorage.clear();
|
415 |
-
|
416 |
-
await setTimeout(() => {
|
417 |
-
load_conversations(20, 0);
|
418 |
-
}, 1);
|
419 |
-
|
420 |
-
if (!window.location.href.endsWith(`#`)) {
|
421 |
-
if (/\/chat\/.+/.test(window.location.href.slice(url_prefix.length))) {
|
422 |
-
await load_conversation(window.conversation_id);
|
423 |
-
}
|
424 |
-
}
|
425 |
-
|
426 |
-
message_input.addEventListener("keydown", async (evt) => {
|
427 |
-
if (prompt_lock) return;
|
428 |
-
|
429 |
-
if (evt.key === "Enter" && !evt.shiftKey) {
|
430 |
-
evt.preventDefault();
|
431 |
-
await handle_ask();
|
432 |
-
}
|
433 |
-
});
|
434 |
-
|
435 |
-
send_button.addEventListener("click", async (event) => {
|
436 |
-
event.preventDefault();
|
437 |
-
if (prompt_lock) return;
|
438 |
-
message_input.blur();
|
439 |
-
await handle_ask();
|
440 |
-
});
|
441 |
-
|
442 |
-
register_settings_localstorage();
|
443 |
-
};
|
444 |
-
|
445 |
-
const register_settings_localstorage = async () => {
|
446 |
-
settings_ids = ["switch", "model", "jailbreak"];
|
447 |
-
settings_elements = settings_ids.map((id) => document.getElementById(id));
|
448 |
-
settings_elements.map((element) =>
|
449 |
-
element.addEventListener(`change`, async (event) => {
|
450 |
-
switch (event.target.type) {
|
451 |
-
case "checkbox":
|
452 |
-
localStorage.setItem(event.target.id, event.target.checked);
|
453 |
-
break;
|
454 |
-
case "select-one":
|
455 |
-
localStorage.setItem(event.target.id, event.target.selectedIndex);
|
456 |
-
break;
|
457 |
-
default:
|
458 |
-
console.warn("Unresolved element type");
|
459 |
-
}
|
460 |
-
})
|
461 |
-
);
|
462 |
-
};
|
463 |
-
|
464 |
-
const load_settings_localstorage = async () => {
|
465 |
-
settings_ids = ["switch", "model", "jailbreak"];
|
466 |
-
settings_elements = settings_ids.map((id) => document.getElementById(id));
|
467 |
-
settings_elements.map((element) => {
|
468 |
-
if (localStorage.getItem(element.id)) {
|
469 |
-
switch (element.type) {
|
470 |
-
case "checkbox":
|
471 |
-
element.checked = localStorage.getItem(element.id) === "true";
|
472 |
-
break;
|
473 |
-
case "select-one":
|
474 |
-
element.selectedIndex = parseInt(localStorage.getItem(element.id));
|
475 |
-
break;
|
476 |
-
default:
|
477 |
-
console.warn("Unresolved element type");
|
478 |
-
}
|
479 |
-
}
|
480 |
-
});
|
481 |
-
};
|
482 |
-
|
483 |
-
function clearTextarea(textarea) {
|
484 |
-
textarea.style.removeProperty("height");
|
485 |
-
textarea.style.height = `${textarea.scrollHeight + 4}px`;
|
486 |
-
if (textarea.value.trim() === "" && textarea.value.includes("\n")) {
|
487 |
-
textarea.value = "";
|
488 |
-
}
|
489 |
-
}
|
490 |
-
|
491 |
-
function createElement(tag, { classNames, id, innerHTML, textContent } = {}) {
|
492 |
-
const el = document.createElement(tag);
|
493 |
-
if (classNames) {
|
494 |
-
el.classList.add(...classNames);
|
495 |
-
}
|
496 |
-
if (id) {
|
497 |
-
el.id = id;
|
498 |
-
}
|
499 |
-
if (innerHTML) {
|
500 |
-
el.innerHTML = innerHTML;
|
501 |
-
}
|
502 |
-
if (textContent) {
|
503 |
-
const preElement = document.createElement("pre");
|
504 |
-
preElement.textContent = textContent;
|
505 |
-
el.appendChild(preElement);
|
506 |
-
}
|
507 |
-
return el;
|
508 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/haystack_QA/app.py
DELETED
@@ -1,341 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import logging
|
3 |
-
import os
|
4 |
-
import shutil
|
5 |
-
import sys
|
6 |
-
import uuid
|
7 |
-
from json import JSONDecodeError
|
8 |
-
from pathlib import Path
|
9 |
-
from typing import List, Optional
|
10 |
-
|
11 |
-
import pandas as pd
|
12 |
-
import pinecone
|
13 |
-
import streamlit as st
|
14 |
-
from annotated_text import annotation
|
15 |
-
from haystack import BaseComponent, Document
|
16 |
-
from haystack.document_stores import PineconeDocumentStore
|
17 |
-
from haystack.nodes import (
|
18 |
-
DocxToTextConverter,
|
19 |
-
EmbeddingRetriever,
|
20 |
-
FARMReader,
|
21 |
-
FileTypeClassifier,
|
22 |
-
PDFToTextConverter,
|
23 |
-
PreProcessor,
|
24 |
-
TextConverter,
|
25 |
-
)
|
26 |
-
from haystack.pipelines import ExtractiveQAPipeline, Pipeline
|
27 |
-
from markdown import markdown
|
28 |
-
from sentence_transformers import SentenceTransformer
|
29 |
-
|
30 |
-
|
31 |
-
class PineconeSearch(BaseComponent):
|
32 |
-
outgoing_edges = 1
|
33 |
-
|
34 |
-
def run(self, query: str, top_k: Optional[int]):
|
35 |
-
# process the inputs
|
36 |
-
vector_embedding = emb_model.encode(query).tolist()
|
37 |
-
response = index.query([vector_embedding], top_k=top_k, include_metadata=True)
|
38 |
-
docs = [
|
39 |
-
Document(
|
40 |
-
content=d["metadata"]["text"],
|
41 |
-
meta={
|
42 |
-
"title": d["metadata"]["filename"],
|
43 |
-
"context": d["metadata"]["text"],
|
44 |
-
"_split_id": d["metadata"]["_split_id"],
|
45 |
-
},
|
46 |
-
)
|
47 |
-
for d in response["matches"]
|
48 |
-
]
|
49 |
-
output = {"documents": docs, "query": query}
|
50 |
-
return output, "output_1"
|
51 |
-
|
52 |
-
def run_batch(self, queries: List[str], top_k: Optional[int]):
|
53 |
-
|
54 |
-
return {}, "output_1"
|
55 |
-
|
56 |
-
|
57 |
-
# connect to pinecone environment
|
58 |
-
pinecone.init(api_key=st.secrets["pinecone_apikey"], environment="us-west1-gcp")
|
59 |
-
index_name = "qa-demo-fast-384"
|
60 |
-
# retriever_model = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
|
61 |
-
retriever_model = "sentence-transformers/multi-qa-MiniLM-L6-cos-v1"
|
62 |
-
emb_model = SentenceTransformer(retriever_model)
|
63 |
-
|
64 |
-
embedding_dim = 384
|
65 |
-
preprocessor = PreProcessor(
|
66 |
-
clean_empty_lines=True,
|
67 |
-
clean_whitespace=True,
|
68 |
-
clean_header_footer=False,
|
69 |
-
split_by="word",
|
70 |
-
split_length=100,
|
71 |
-
split_respect_sentence_boundary=True,
|
72 |
-
)
|
73 |
-
file_type_classifier = FileTypeClassifier()
|
74 |
-
text_converter = TextConverter()
|
75 |
-
pdf_converter = PDFToTextConverter()
|
76 |
-
docx_converter = DocxToTextConverter()
|
77 |
-
|
78 |
-
# check if the abstractive-question-answering index exists
|
79 |
-
if index_name not in pinecone.list_indexes():
|
80 |
-
# delete the current index and create the new index if it does not exist
|
81 |
-
for delete_index in pinecone.list_indexes():
|
82 |
-
pinecone.delete_index(delete_index)
|
83 |
-
pinecone.create_index(index_name, dimension=embedding_dim, metric="cosine")
|
84 |
-
|
85 |
-
# connect to abstractive-question-answering index we created
|
86 |
-
index = pinecone.Index(index_name)
|
87 |
-
|
88 |
-
FILE_UPLOAD_PATH = "./data/uploads/"
|
89 |
-
os.makedirs(FILE_UPLOAD_PATH, exist_ok=True)
|
90 |
-
|
91 |
-
|
92 |
-
def create_doc_store():
|
93 |
-
document_store = PineconeDocumentStore(
|
94 |
-
api_key=st.secrets["pinecone_apikey"],
|
95 |
-
index=index_name,
|
96 |
-
similarity="cosine",
|
97 |
-
embedding_dim=embedding_dim,
|
98 |
-
)
|
99 |
-
return document_store
|
100 |
-
|
101 |
-
|
102 |
-
def query(pipe, question, top_k_reader, top_k_retriever):
|
103 |
-
res = pipe.run(
|
104 |
-
query=question,
|
105 |
-
params={"Retriever": {"top_k": top_k_retriever}, "Reader": {"top_k": top_k_reader}},
|
106 |
-
)
|
107 |
-
return res
|
108 |
-
|
109 |
-
|
110 |
-
document_store = create_doc_store()
|
111 |
-
# pipe = create_pipe(document_store)
|
112 |
-
|
113 |
-
retriever = EmbeddingRetriever(
|
114 |
-
document_store=document_store,
|
115 |
-
embedding_model=retriever_model,
|
116 |
-
model_format="sentence_transformers",
|
117 |
-
)
|
118 |
-
# load the retriever model from huggingface model hub
|
119 |
-
sentence_encoder = SentenceTransformer(retriever_model)
|
120 |
-
|
121 |
-
reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=False)
|
122 |
-
# pipe = ExtractiveQAPipeline(reader, retriever)
|
123 |
-
# Custom built extractive QA pipeline
|
124 |
-
pipe = Pipeline()
|
125 |
-
pipe.add_node(component=PineconeSearch(), name="Retriever", inputs=["Query"])
|
126 |
-
pipe.add_node(component=reader, name="Reader", inputs=["Retriever"])
|
127 |
-
|
128 |
-
|
129 |
-
indexing_pipeline_with_classification = Pipeline()
|
130 |
-
indexing_pipeline_with_classification.add_node(
|
131 |
-
component=file_type_classifier, name="FileTypeClassifier", inputs=["File"]
|
132 |
-
)
|
133 |
-
indexing_pipeline_with_classification.add_node(
|
134 |
-
component=text_converter, name="TextConverter", inputs=["FileTypeClassifier.output_1"]
|
135 |
-
)
|
136 |
-
indexing_pipeline_with_classification.add_node(
|
137 |
-
component=pdf_converter, name="PdfConverter", inputs=["FileTypeClassifier.output_2"]
|
138 |
-
)
|
139 |
-
indexing_pipeline_with_classification.add_node(
|
140 |
-
component=docx_converter, name="DocxConverter", inputs=["FileTypeClassifier.output_4"]
|
141 |
-
)
|
142 |
-
indexing_pipeline_with_classification.add_node(
|
143 |
-
component=preprocessor,
|
144 |
-
name="Preprocessor",
|
145 |
-
inputs=["TextConverter", "PdfConverter", "DocxConverter"],
|
146 |
-
)
|
147 |
-
|
148 |
-
|
149 |
-
def set_state_if_absent(key, value):
|
150 |
-
if key not in st.session_state:
|
151 |
-
st.session_state[key] = value
|
152 |
-
|
153 |
-
|
154 |
-
# Adjust to a question that you would like users to see in the search bar when they load the UI:
|
155 |
-
DEFAULT_QUESTION_AT_STARTUP = os.getenv(
|
156 |
-
"DEFAULT_QUESTION_AT_STARTUP", "My blog post discusses remote work. Give me statistics."
|
157 |
-
)
|
158 |
-
DEFAULT_ANSWER_AT_STARTUP = os.getenv(
|
159 |
-
"DEFAULT_ANSWER_AT_STARTUP",
|
160 |
-
"7% more remote workers have been at their current organization for 5 years or fewer",
|
161 |
-
)
|
162 |
-
|
163 |
-
# Sliders
|
164 |
-
DEFAULT_DOCS_FROM_RETRIEVER = int(os.getenv("DEFAULT_DOCS_FROM_RETRIEVER", "3"))
|
165 |
-
DEFAULT_NUMBER_OF_ANSWERS = int(os.getenv("DEFAULT_NUMBER_OF_ANSWERS", "3"))
|
166 |
-
|
167 |
-
|
168 |
-
st.set_page_config(
|
169 |
-
page_title="Haystack Demo", page_icon="https://haystack.deepset.ai/img/HaystackIcon.png"
|
170 |
-
)
|
171 |
-
|
172 |
-
# Persistent state
|
173 |
-
set_state_if_absent("question", DEFAULT_QUESTION_AT_STARTUP)
|
174 |
-
set_state_if_absent("answer", DEFAULT_ANSWER_AT_STARTUP)
|
175 |
-
set_state_if_absent("results", None)
|
176 |
-
|
177 |
-
|
178 |
-
# Small callback to reset the interface in case the text of the question changes
|
179 |
-
def reset_results(*args):
|
180 |
-
st.session_state.answer = None
|
181 |
-
st.session_state.results = None
|
182 |
-
st.session_state.raw_json = None
|
183 |
-
|
184 |
-
|
185 |
-
# Title
|
186 |
-
st.write("# Haystack Search Demo")
|
187 |
-
st.markdown(
|
188 |
-
"""
|
189 |
-
This demo takes its data from two sample data csv with statistics on various topics. \n
|
190 |
-
Ask any question on this topic and see if Haystack can find the correct answer to your query! \n
|
191 |
-
*Note: do not use keywords, but full-fledged questions.* The demo is not optimized to deal with keyword queries and might misunderstand you.
|
192 |
-
""",
|
193 |
-
unsafe_allow_html=True,
|
194 |
-
)
|
195 |
-
|
196 |
-
# Sidebar
|
197 |
-
st.sidebar.header("Options")
|
198 |
-
st.sidebar.write("## File Upload:")
|
199 |
-
data_files = st.sidebar.file_uploader(
|
200 |
-
"upload", type=["pdf", "txt", "docx"], accept_multiple_files=True, label_visibility="hidden"
|
201 |
-
)
|
202 |
-
ALL_FILES = []
|
203 |
-
META_DATA = []
|
204 |
-
for data_file in data_files:
|
205 |
-
# Upload file
|
206 |
-
if data_file:
|
207 |
-
file_path = Path(FILE_UPLOAD_PATH) / f"{uuid.uuid4().hex}_{data_file.name}"
|
208 |
-
with open(file_path, "wb") as f:
|
209 |
-
f.write(data_file.getbuffer())
|
210 |
-
ALL_FILES.append(file_path)
|
211 |
-
st.sidebar.write(str(data_file.name) + " ✅ ")
|
212 |
-
META_DATA.append({"filename": data_file.name})
|
213 |
-
|
214 |
-
data_files = []
|
215 |
-
|
216 |
-
|
217 |
-
if len(ALL_FILES) > 0:
|
218 |
-
# document_store.update_embeddings(retriever, update_existing_embeddings=False)
|
219 |
-
docs = indexing_pipeline_with_classification.run(file_paths=ALL_FILES, meta=META_DATA)[
|
220 |
-
"documents"
|
221 |
-
]
|
222 |
-
index_name = "qa_demo"
|
223 |
-
# we will use batches of 64
|
224 |
-
batch_size = 128
|
225 |
-
# docs = docs['documents']
|
226 |
-
# with st.spinner(
|
227 |
-
# "🧠 Performing indexing of uplaoded documents... \n "
|
228 |
-
# ):
|
229 |
-
my_bar = st.progress(0)
|
230 |
-
upload_count = 0
|
231 |
-
for i in range(0, len(docs), batch_size):
|
232 |
-
# find end of batch
|
233 |
-
i_end = min(i + batch_size, len(docs))
|
234 |
-
# extract batch
|
235 |
-
batch = [doc.content for doc in docs[i:i_end]]
|
236 |
-
# generate embeddings for batch
|
237 |
-
emb = sentence_encoder.encode(batch).tolist()
|
238 |
-
# get metadata
|
239 |
-
# meta = [doc.meta for doc in docs[i:i_end]]
|
240 |
-
meta = []
|
241 |
-
for doc in docs[i:i_end]:
|
242 |
-
meta_dict = doc.meta
|
243 |
-
meta_dict["text"] = doc.content
|
244 |
-
meta.append(meta_dict)
|
245 |
-
# create unique IDs
|
246 |
-
ids = [doc.id for doc in docs[i:i_end]]
|
247 |
-
# add all to upsert list
|
248 |
-
to_upsert = list(zip(ids, emb, meta))
|
249 |
-
# upsert/insert these records to pinecone
|
250 |
-
_ = index.upsert(vectors=to_upsert)
|
251 |
-
upload_count += batch_size
|
252 |
-
upload_percentage = min(int((upload_count / len(docs)) * 100), 100)
|
253 |
-
my_bar.progress(upload_percentage)
|
254 |
-
|
255 |
-
top_k_reader = st.sidebar.slider(
|
256 |
-
"Max. number of answers",
|
257 |
-
min_value=1,
|
258 |
-
max_value=10,
|
259 |
-
value=DEFAULT_NUMBER_OF_ANSWERS,
|
260 |
-
step=1,
|
261 |
-
on_change=reset_results,
|
262 |
-
)
|
263 |
-
top_k_retriever = st.sidebar.slider(
|
264 |
-
"Max. number of documents from retriever",
|
265 |
-
min_value=1,
|
266 |
-
max_value=10,
|
267 |
-
value=DEFAULT_DOCS_FROM_RETRIEVER,
|
268 |
-
step=1,
|
269 |
-
on_change=reset_results,
|
270 |
-
)
|
271 |
-
# data_files = st.file_uploader(
|
272 |
-
# "upload", type=["csv"], accept_multiple_files=True, label_visibility="hidden"
|
273 |
-
# )
|
274 |
-
# for data_file in data_files:
|
275 |
-
# # Upload file
|
276 |
-
# if data_file:
|
277 |
-
# raw_json = upload_doc(data_file)
|
278 |
-
|
279 |
-
question = st.text_input(
|
280 |
-
value=st.session_state.question,
|
281 |
-
max_chars=100,
|
282 |
-
on_change=reset_results,
|
283 |
-
label="question",
|
284 |
-
label_visibility="hidden",
|
285 |
-
)
|
286 |
-
col1, col2 = st.columns(2)
|
287 |
-
col1.markdown("<style>.stButton button {width:100%;}</style>", unsafe_allow_html=True)
|
288 |
-
col2.markdown("<style>.stButton button {width:100%;}</style>", unsafe_allow_html=True)
|
289 |
-
|
290 |
-
# Run button
|
291 |
-
run_pressed = col1.button("Run")
|
292 |
-
if run_pressed:
|
293 |
-
|
294 |
-
run_query = run_pressed or question != st.session_state.question
|
295 |
-
# Get results for query
|
296 |
-
if run_query and question:
|
297 |
-
reset_results()
|
298 |
-
st.session_state.question = question
|
299 |
-
|
300 |
-
with st.spinner("🧠 Performing neural search on documents... \n "):
|
301 |
-
try:
|
302 |
-
st.session_state.results = query(
|
303 |
-
pipe, question, top_k_reader=top_k_reader, top_k_retriever=top_k_retriever
|
304 |
-
)
|
305 |
-
except JSONDecodeError as je:
|
306 |
-
st.error(
|
307 |
-
"👓 An error occurred reading the results. Is the document store working?"
|
308 |
-
)
|
309 |
-
except Exception as e:
|
310 |
-
logging.exception(e)
|
311 |
-
if "The server is busy processing requests" in str(e) or "503" in str(e):
|
312 |
-
st.error("🧑🌾 All our workers are busy! Try again later.")
|
313 |
-
else:
|
314 |
-
st.error(f"🐞 An error occurred during the request. {str(e)}")
|
315 |
-
|
316 |
-
|
317 |
-
if st.session_state.results:
|
318 |
-
|
319 |
-
st.write("## Results:")
|
320 |
-
|
321 |
-
for count, result in enumerate(st.session_state.results["answers"]):
|
322 |
-
answer, context = result.answer, result.context
|
323 |
-
start_idx = context.find(answer)
|
324 |
-
end_idx = start_idx + len(answer)
|
325 |
-
# Hack due to this bug: https://github.com/streamlit/streamlit/issues/3190
|
326 |
-
try:
|
327 |
-
filename = result.meta["title"]
|
328 |
-
st.write(
|
329 |
-
markdown(
|
330 |
-
f'From file: {filename} \n {context[:start_idx] } {str(annotation(answer, "ANSWER", "#8ef"))} {context[end_idx:]} \n '
|
331 |
-
),
|
332 |
-
unsafe_allow_html=True,
|
333 |
-
)
|
334 |
-
except:
|
335 |
-
filename = result.meta.get("filename", "")
|
336 |
-
st.write(
|
337 |
-
markdown(
|
338 |
-
f'From file: {filename} \n {context[:start_idx] } {str(annotation(answer, "ANSWER", "#8ef"))} {context[end_idx:]} \n '
|
339 |
-
),
|
340 |
-
unsafe_allow_html=True,
|
341 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/MagicPrompt-Stable-Diffusion/style.css
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
#col-container {
|
2 |
-
max-width: 800px;
|
3 |
-
margin-left: auto;
|
4 |
-
margin-right: auto;
|
5 |
-
}
|
6 |
-
a {
|
7 |
-
color: inherit;
|
8 |
-
text-decoration: underline;
|
9 |
-
}
|
10 |
-
.gradio-container {
|
11 |
-
font-family: 'IBM Plex Sans', sans-serif;
|
12 |
-
}
|
13 |
-
.gr-button {
|
14 |
-
color: white;
|
15 |
-
border-color: #9d66e5;
|
16 |
-
background: #9d66e5;
|
17 |
-
}
|
18 |
-
input[type='range'] {
|
19 |
-
accent-color: #9d66e5;
|
20 |
-
}
|
21 |
-
.dark input[type='range'] {
|
22 |
-
accent-color: #dfdfdf;
|
23 |
-
}
|
24 |
-
.container {
|
25 |
-
max-width: 800px;
|
26 |
-
margin: auto;
|
27 |
-
padding-top: 1.5rem;
|
28 |
-
}
|
29 |
-
#gallery {
|
30 |
-
min-height: 22rem;
|
31 |
-
margin-bottom: 15px;
|
32 |
-
margin-left: auto;
|
33 |
-
margin-right: auto;
|
34 |
-
border-bottom-right-radius: .5rem !important;
|
35 |
-
border-bottom-left-radius: .5rem !important;
|
36 |
-
}
|
37 |
-
#gallery>div>.h-full {
|
38 |
-
min-height: 20rem;
|
39 |
-
}
|
40 |
-
.details:hover {
|
41 |
-
text-decoration: underline;
|
42 |
-
}
|
43 |
-
.gr-button {
|
44 |
-
white-space: nowrap;
|
45 |
-
}
|
46 |
-
.gr-button:focus {
|
47 |
-
border-color: rgb(147 197 253 / var(--tw-border-opacity));
|
48 |
-
outline: none;
|
49 |
-
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
50 |
-
--tw-border-opacity: 1;
|
51 |
-
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
52 |
-
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
53 |
-
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
|
54 |
-
--tw-ring-opacity: .5;
|
55 |
-
}
|
56 |
-
#advanced-options {
|
57 |
-
margin-bottom: 20px;
|
58 |
-
}
|
59 |
-
.footer {
|
60 |
-
margin-bottom: 45px;
|
61 |
-
margin-top: 35px;
|
62 |
-
text-align: center;
|
63 |
-
border-bottom: 1px solid #e5e5e5;
|
64 |
-
}
|
65 |
-
.footer>p {
|
66 |
-
font-size: .8rem;
|
67 |
-
display: inline-block;
|
68 |
-
padding: 0 10px;
|
69 |
-
transform: translateY(10px);
|
70 |
-
background: white;
|
71 |
-
}
|
72 |
-
.dark .logo{ filter: invert(1); }
|
73 |
-
.dark .footer {
|
74 |
-
border-color: #303030;
|
75 |
-
}
|
76 |
-
.dark .footer>p {
|
77 |
-
background: #0b0f19;
|
78 |
-
}
|
79 |
-
.acknowledgments h4{
|
80 |
-
margin: 1.25em 0 .25em 0;
|
81 |
-
font-weight: bold;
|
82 |
-
font-size: 115%;
|
83 |
-
}
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/__init__.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
from .Acytoo import Acytoo
|
3 |
-
from .AiAsk import AiAsk
|
4 |
-
from .Aibn import Aibn
|
5 |
-
from .Aichat import Aichat
|
6 |
-
from .Ails import Ails
|
7 |
-
from .Aivvm import Aivvm
|
8 |
-
from .AItianhu import AItianhu
|
9 |
-
from .AItianhuSpace import AItianhuSpace
|
10 |
-
from .Bing import Bing
|
11 |
-
from .ChatBase import ChatBase
|
12 |
-
from .ChatForAi import ChatForAi
|
13 |
-
from .Chatgpt4Online import Chatgpt4Online
|
14 |
-
from .ChatgptAi import ChatgptAi
|
15 |
-
from .ChatgptDemo import ChatgptDemo
|
16 |
-
from .ChatgptDuo import ChatgptDuo
|
17 |
-
from .ChatgptX import ChatgptX
|
18 |
-
from .Cromicle import Cromicle
|
19 |
-
from .DeepAi import DeepAi
|
20 |
-
from .FreeGpt import FreeGpt
|
21 |
-
from .GPTalk import GPTalk
|
22 |
-
from .GptForLove import GptForLove
|
23 |
-
from .GptGo import GptGo
|
24 |
-
from .GptGod import GptGod
|
25 |
-
from .H2o import H2o
|
26 |
-
from .Liaobots import Liaobots
|
27 |
-
from .Myshell import Myshell
|
28 |
-
from .Phind import Phind
|
29 |
-
from .Vercel import Vercel
|
30 |
-
from .Vitalentum import Vitalentum
|
31 |
-
from .Ylokh import Ylokh
|
32 |
-
from .You import You
|
33 |
-
from .Yqcloud import Yqcloud
|
34 |
-
|
35 |
-
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
|
36 |
-
from .retry_provider import RetryProvider
|
37 |
-
from .deprecated import *
|
38 |
-
from .needs_auth import *
|
39 |
-
from .unfinished import *
|
40 |
-
|
41 |
-
__all__ = [
|
42 |
-
'BaseProvider',
|
43 |
-
'AsyncProvider',
|
44 |
-
'AsyncGeneratorProvider',
|
45 |
-
'RetryProvider',
|
46 |
-
'Acytoo',
|
47 |
-
'AiAsk',
|
48 |
-
'Aibn',
|
49 |
-
'Aichat',
|
50 |
-
'Ails',
|
51 |
-
'Aivvm',
|
52 |
-
'AiService',
|
53 |
-
'AItianhu',
|
54 |
-
'AItianhuSpace',
|
55 |
-
'Aivvm',
|
56 |
-
'Bard',
|
57 |
-
'Bing',
|
58 |
-
'ChatBase',
|
59 |
-
'ChatForAi',
|
60 |
-
'Chatgpt4Online',
|
61 |
-
'ChatgptAi',
|
62 |
-
'ChatgptDemo',
|
63 |
-
'ChatgptDuo',
|
64 |
-
'ChatgptLogin',
|
65 |
-
'ChatgptX',
|
66 |
-
'Cromicle',
|
67 |
-
'CodeLinkAva',
|
68 |
-
'DeepAi',
|
69 |
-
'DfeHub',
|
70 |
-
'EasyChat',
|
71 |
-
'Forefront',
|
72 |
-
'FreeGpt',
|
73 |
-
'GPTalk',
|
74 |
-
'GptForLove',
|
75 |
-
'GetGpt',
|
76 |
-
'GptGo',
|
77 |
-
'GptGod',
|
78 |
-
'H2o',
|
79 |
-
'HuggingChat',
|
80 |
-
'Liaobots',
|
81 |
-
'Lockchat',
|
82 |
-
'Myshell',
|
83 |
-
'Opchatgpts',
|
84 |
-
'Raycast',
|
85 |
-
'OpenaiChat',
|
86 |
-
'OpenAssistant',
|
87 |
-
'PerplexityAi',
|
88 |
-
'Phind',
|
89 |
-
'Theb',
|
90 |
-
'Vercel',
|
91 |
-
'Vitalentum',
|
92 |
-
'Wewordle',
|
93 |
-
'Ylokh',
|
94 |
-
'You',
|
95 |
-
'Yqcloud',
|
96 |
-
'Equing',
|
97 |
-
'FastGpt',
|
98 |
-
'Wuguokai',
|
99 |
-
'V50'
|
100 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AdVisual/MaskCut/config.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
|
2 |
-
#!/usr/bin/env python
|
3 |
-
# -*- coding: utf-8 -*-
|
4 |
-
|
5 |
-
import os
|
6 |
-
import torch
|
7 |
-
|
8 |
-
|
9 |
-
# Config that serves all environment
|
10 |
-
GLOBAL_CONFIG = {
|
11 |
-
"USE_CUDE_IF_AVAILABLE": True,
|
12 |
-
"ROUND_DIGIT": 6
|
13 |
-
}
|
14 |
-
|
15 |
-
# Environment specific config, or overwrite of GLOBAL_CONFIG
|
16 |
-
ENV_CONFIG = {
|
17 |
-
"development": {
|
18 |
-
"DEBUG": True
|
19 |
-
},
|
20 |
-
|
21 |
-
"staging": {
|
22 |
-
"DEBUG": True
|
23 |
-
},
|
24 |
-
|
25 |
-
"production": {
|
26 |
-
"DEBUG": False,
|
27 |
-
"ROUND_DIGIT": 3
|
28 |
-
}
|
29 |
-
}
|
30 |
-
|
31 |
-
|
32 |
-
def get_config() -> dict:
|
33 |
-
"""
|
34 |
-
Get config based on running environment
|
35 |
-
:return: dict of config
|
36 |
-
"""
|
37 |
-
|
38 |
-
# Determine running environment
|
39 |
-
ENV = os.environ['PYTHON_ENV'] if 'PYTHON_ENV' in os.environ else 'development'
|
40 |
-
ENV = ENV or 'development'
|
41 |
-
|
42 |
-
# raise error if environment is not expected
|
43 |
-
if ENV not in ENV_CONFIG:
|
44 |
-
raise EnvironmentError(f'Config for envirnoment {ENV} not found')
|
45 |
-
|
46 |
-
config = GLOBAL_CONFIG.copy()
|
47 |
-
config.update(ENV_CONFIG[ENV])
|
48 |
-
|
49 |
-
config['ENV'] = ENV
|
50 |
-
config['DEVICE'] = 'cuda' if torch.cuda.is_available() and config['USE_CUDE_IF_AVAILABLE'] else 'cpu'
|
51 |
-
|
52 |
-
return config
|
53 |
-
|
54 |
-
# load config for import
|
55 |
-
CONFIG = get_config()
|
56 |
-
|
57 |
-
if __name__ == '__main__':
|
58 |
-
# for debugging
|
59 |
-
import json
|
60 |
-
print(json.dumps(CONFIG, indent=4))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Afnaan/chatbots/app.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
import openai
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
openai.api_key = "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"
|
5 |
-
|
6 |
-
messages = [
|
7 |
-
{"role": "system", "content": "You are a top psychologist and respond in a professional way",
|
8 |
-
"role": "user", "content": "you will give me personalized suggestions to improve my mental health"
|
9 |
-
}]
|
10 |
-
|
11 |
-
|
12 |
-
def CustomChatGPT(type):
|
13 |
-
messages.append({"role": "user", "content": type})
|
14 |
-
response = openai.ChatCompletion.create(
|
15 |
-
model="gpt-3.5-turbo",
|
16 |
-
messages=messages
|
17 |
-
)
|
18 |
-
ChatGPT_reply = response["choices"][0]["message"]["content"]
|
19 |
-
messages.append({"role": "assistant", "content": ChatGPT_reply})
|
20 |
-
return ChatGPT_reply
|
21 |
-
|
22 |
-
|
23 |
-
demo = gradio.Interface(fn=CustomChatGPT, inputs="text",
|
24 |
-
outputs="text",
|
25 |
-
examples=[["i have depression what should i do"], [
|
26 |
-
"i am having work stress"], ["how to cope with anger issues?"]],
|
27 |
-
|
28 |
-
allow_flagging="never",
|
29 |
-
description="""Introducing a revolutionary new AI chatbot, designed to help you find your way through life's challenges. Developed by computer science student, Afnan, this chatbot uses the latest artificial intelligence technology to provide personalized counseling and self-help solutions.
|
30 |
-
|
31 |
-
Using the power of natural language processing and machine learning, Afnan's chatbot can engage in meaningful conversations with you, listening to your concerns and providing advice tailored to your unique situation. Whether you're struggling with anxiety, depression, or relationship issues, the chatbot is here to help.
|
32 |
-
|
33 |
-
With an intuitive interface created using Gradio, it's easy to use this chatbot from anywhere with an internet connection. Simply input your concerns, and let the chatbot do the rest. You'll be amazed at the insights and guidance it can provide.
|
34 |
-
|
35 |
-
So why wait? Whether you're looking for someone to talk to about your problems or just need some advice on how to handle a tough situation, Afnan's chatbot is here to help you. Sign up today and experience the transformative power of AI counseling and self-help.
|
36 |
-
Contact: [email protected]
|
37 |
-
""",
|
38 |
-
|
39 |
-
title="Free Mental Health Counselling Chatbot by Afnan",
|
40 |
-
theme="dark")
|
41 |
-
|
42 |
-
|
43 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/base.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from typing import TYPE_CHECKING, List, Tuple
|
4 |
-
|
5 |
-
from agentverse.agents import BaseAgent
|
6 |
-
|
7 |
-
from pydantic import BaseModel
|
8 |
-
|
9 |
-
from abc import abstractmethod
|
10 |
-
from agentverse.message import SolverMessage
|
11 |
-
from . import decision_maker_registry
|
12 |
-
|
13 |
-
|
14 |
-
class BaseDecisionMaker(BaseModel):
|
15 |
-
"""
|
16 |
-
The base class of decision making class.
|
17 |
-
"""
|
18 |
-
|
19 |
-
name: str = "base"
|
20 |
-
|
21 |
-
@abstractmethod
|
22 |
-
async def astep(
|
23 |
-
self,
|
24 |
-
agents: List[BaseAgent],
|
25 |
-
task_description: str,
|
26 |
-
previous_plan: str = "No solution yet.",
|
27 |
-
advice: str = "No advice yet.",
|
28 |
-
*args,
|
29 |
-
**kwargs,
|
30 |
-
) -> List[SolverMessage]:
|
31 |
-
pass
|
32 |
-
|
33 |
-
def reset(self):
|
34 |
-
pass
|
35 |
-
|
36 |
-
def broadcast_messages(self, agents, messages) -> None:
|
37 |
-
for agent in agents:
|
38 |
-
agent.add_message_to_memory(messages)
|
39 |
-
|
40 |
-
def p2p_messages(self, agents, messages) -> None:
|
41 |
-
agents[0].add_message_to_memory(messages)
|
42 |
-
for message in messages:
|
43 |
-
for agent in agents[1:]:
|
44 |
-
if agent.name == message.sender:
|
45 |
-
agent.add_message_to_memory(messages)
|
46 |
-
break
|
47 |
-
|
48 |
-
|
49 |
-
@decision_maker_registry.register("dummy")
|
50 |
-
class DummyDecisionMaker(BaseDecisionMaker):
|
51 |
-
name: str = "dummy"
|
52 |
-
|
53 |
-
async def astep(
|
54 |
-
self,
|
55 |
-
agents: List[BaseAgent],
|
56 |
-
task_description: str,
|
57 |
-
previous_plan: str = "No solution yet.",
|
58 |
-
advice: str = "No advice yet.",
|
59 |
-
*args,
|
60 |
-
**kwargs,
|
61 |
-
) -> List[SolverMessage]:
|
62 |
-
return [
|
63 |
-
SolverMessage(content=task_description, sender=self.name, sender_agent=self)
|
64 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AhmedBadrDev/stomach/app.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import tensorflow as tf
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
# Load the model
|
6 |
-
model = tf.keras.models.load_model('model.h5')
|
7 |
-
|
8 |
-
# Define the class names
|
9 |
-
class_names = {
|
10 |
-
0: 'Esophagitis',
|
11 |
-
1: 'Dyed lifted polyps'
|
12 |
-
}
|
13 |
-
|
14 |
-
|
15 |
-
def classify_image(image):
|
16 |
-
# Preprocess the image
|
17 |
-
img_array = tf.image.resize(image, [256, 256])
|
18 |
-
img_array = tf.expand_dims(img_array, 0) / 255.0
|
19 |
-
|
20 |
-
# Make a prediction
|
21 |
-
prediction = model.predict(img_array)
|
22 |
-
predicted_class = tf.argmax(prediction[0], axis=-1)
|
23 |
-
confidence = np.max(prediction[0])
|
24 |
-
|
25 |
-
return class_names[predicted_class.numpy()], confidence
|
26 |
-
|
27 |
-
|
28 |
-
iface = gr.Interface(
|
29 |
-
fn=classify_image,
|
30 |
-
inputs="image",
|
31 |
-
outputs=["text", "number"],
|
32 |
-
examples=[
|
33 |
-
['examples/0.jpg'],
|
34 |
-
['examples/1.jpg'],
|
35 |
-
])
|
36 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/paint_by_example/test_paint_by_example.py
DELETED
@@ -1,214 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import random
|
18 |
-
import unittest
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import torch
|
22 |
-
from PIL import Image
|
23 |
-
from transformers import CLIPImageProcessor, CLIPVisionConfig
|
24 |
-
|
25 |
-
from diffusers import AutoencoderKL, PaintByExamplePipeline, PNDMScheduler, UNet2DConditionModel
|
26 |
-
from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder
|
27 |
-
from diffusers.utils import floats_tensor, load_image, slow, torch_device
|
28 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
29 |
-
|
30 |
-
from ..pipeline_params import IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS
|
31 |
-
from ..test_pipelines_common import PipelineTesterMixin
|
32 |
-
|
33 |
-
|
34 |
-
enable_full_determinism()
|
35 |
-
|
36 |
-
|
37 |
-
class PaintByExamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
38 |
-
pipeline_class = PaintByExamplePipeline
|
39 |
-
params = IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS
|
40 |
-
batch_params = IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
|
41 |
-
image_params = frozenset([]) # TO_DO: update the image_prams once refactored VaeImageProcessor.preprocess
|
42 |
-
|
43 |
-
def get_dummy_components(self):
|
44 |
-
torch.manual_seed(0)
|
45 |
-
unet = UNet2DConditionModel(
|
46 |
-
block_out_channels=(32, 64),
|
47 |
-
layers_per_block=2,
|
48 |
-
sample_size=32,
|
49 |
-
in_channels=9,
|
50 |
-
out_channels=4,
|
51 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
52 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
53 |
-
cross_attention_dim=32,
|
54 |
-
)
|
55 |
-
scheduler = PNDMScheduler(skip_prk_steps=True)
|
56 |
-
torch.manual_seed(0)
|
57 |
-
vae = AutoencoderKL(
|
58 |
-
block_out_channels=[32, 64],
|
59 |
-
in_channels=3,
|
60 |
-
out_channels=3,
|
61 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
62 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
63 |
-
latent_channels=4,
|
64 |
-
)
|
65 |
-
torch.manual_seed(0)
|
66 |
-
config = CLIPVisionConfig(
|
67 |
-
hidden_size=32,
|
68 |
-
projection_dim=32,
|
69 |
-
intermediate_size=37,
|
70 |
-
layer_norm_eps=1e-05,
|
71 |
-
num_attention_heads=4,
|
72 |
-
num_hidden_layers=5,
|
73 |
-
image_size=32,
|
74 |
-
patch_size=4,
|
75 |
-
)
|
76 |
-
image_encoder = PaintByExampleImageEncoder(config, proj_size=32)
|
77 |
-
feature_extractor = CLIPImageProcessor(crop_size=32, size=32)
|
78 |
-
|
79 |
-
components = {
|
80 |
-
"unet": unet,
|
81 |
-
"scheduler": scheduler,
|
82 |
-
"vae": vae,
|
83 |
-
"image_encoder": image_encoder,
|
84 |
-
"safety_checker": None,
|
85 |
-
"feature_extractor": feature_extractor,
|
86 |
-
}
|
87 |
-
return components
|
88 |
-
|
89 |
-
def convert_to_pt(self, image):
|
90 |
-
image = np.array(image.convert("RGB"))
|
91 |
-
image = image[None].transpose(0, 3, 1, 2)
|
92 |
-
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
93 |
-
return image
|
94 |
-
|
95 |
-
def get_dummy_inputs(self, device="cpu", seed=0):
|
96 |
-
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
|
97 |
-
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
|
98 |
-
image = image.cpu().permute(0, 2, 3, 1)[0]
|
99 |
-
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
|
100 |
-
mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64))
|
101 |
-
example_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32))
|
102 |
-
|
103 |
-
if str(device).startswith("mps"):
|
104 |
-
generator = torch.manual_seed(seed)
|
105 |
-
else:
|
106 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
107 |
-
inputs = {
|
108 |
-
"example_image": example_image,
|
109 |
-
"image": init_image,
|
110 |
-
"mask_image": mask_image,
|
111 |
-
"generator": generator,
|
112 |
-
"num_inference_steps": 2,
|
113 |
-
"guidance_scale": 6.0,
|
114 |
-
"output_type": "numpy",
|
115 |
-
}
|
116 |
-
return inputs
|
117 |
-
|
118 |
-
def test_paint_by_example_inpaint(self):
|
119 |
-
components = self.get_dummy_components()
|
120 |
-
|
121 |
-
# make sure here that pndm scheduler skips prk
|
122 |
-
pipe = PaintByExamplePipeline(**components)
|
123 |
-
pipe = pipe.to("cpu")
|
124 |
-
pipe.set_progress_bar_config(disable=None)
|
125 |
-
|
126 |
-
inputs = self.get_dummy_inputs()
|
127 |
-
output = pipe(**inputs)
|
128 |
-
image = output.images
|
129 |
-
|
130 |
-
image_slice = image[0, -3:, -3:, -1]
|
131 |
-
|
132 |
-
assert image.shape == (1, 64, 64, 3)
|
133 |
-
expected_slice = np.array([0.4686, 0.5687, 0.4007, 0.5218, 0.5741, 0.4482, 0.4940, 0.4629, 0.4503])
|
134 |
-
|
135 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
136 |
-
|
137 |
-
def test_paint_by_example_image_tensor(self):
|
138 |
-
device = "cpu"
|
139 |
-
inputs = self.get_dummy_inputs()
|
140 |
-
inputs.pop("mask_image")
|
141 |
-
image = self.convert_to_pt(inputs.pop("image"))
|
142 |
-
mask_image = image.clamp(0, 1) / 2
|
143 |
-
|
144 |
-
# make sure here that pndm scheduler skips prk
|
145 |
-
pipe = PaintByExamplePipeline(**self.get_dummy_components())
|
146 |
-
pipe = pipe.to(device)
|
147 |
-
pipe.set_progress_bar_config(disable=None)
|
148 |
-
|
149 |
-
output = pipe(image=image, mask_image=mask_image[:, 0], **inputs)
|
150 |
-
out_1 = output.images
|
151 |
-
|
152 |
-
image = image.cpu().permute(0, 2, 3, 1)[0]
|
153 |
-
mask_image = mask_image.cpu().permute(0, 2, 3, 1)[0]
|
154 |
-
|
155 |
-
image = Image.fromarray(np.uint8(image)).convert("RGB")
|
156 |
-
mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB")
|
157 |
-
|
158 |
-
output = pipe(**self.get_dummy_inputs())
|
159 |
-
out_2 = output.images
|
160 |
-
|
161 |
-
assert out_1.shape == (1, 64, 64, 3)
|
162 |
-
assert np.abs(out_1.flatten() - out_2.flatten()).max() < 5e-2
|
163 |
-
|
164 |
-
def test_inference_batch_single_identical(self):
|
165 |
-
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
166 |
-
|
167 |
-
|
168 |
-
@slow
|
169 |
-
@require_torch_gpu
|
170 |
-
class PaintByExamplePipelineIntegrationTests(unittest.TestCase):
|
171 |
-
def tearDown(self):
|
172 |
-
# clean up the VRAM after each test
|
173 |
-
super().tearDown()
|
174 |
-
gc.collect()
|
175 |
-
torch.cuda.empty_cache()
|
176 |
-
|
177 |
-
def test_paint_by_example(self):
|
178 |
-
# make sure here that pndm scheduler skips prk
|
179 |
-
init_image = load_image(
|
180 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
181 |
-
"/paint_by_example/dog_in_bucket.png"
|
182 |
-
)
|
183 |
-
mask_image = load_image(
|
184 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
185 |
-
"/paint_by_example/mask.png"
|
186 |
-
)
|
187 |
-
example_image = load_image(
|
188 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
189 |
-
"/paint_by_example/panda.jpg"
|
190 |
-
)
|
191 |
-
|
192 |
-
pipe = PaintByExamplePipeline.from_pretrained("Fantasy-Studio/Paint-by-Example")
|
193 |
-
pipe = pipe.to(torch_device)
|
194 |
-
pipe.set_progress_bar_config(disable=None)
|
195 |
-
|
196 |
-
generator = torch.manual_seed(321)
|
197 |
-
output = pipe(
|
198 |
-
image=init_image,
|
199 |
-
mask_image=mask_image,
|
200 |
-
example_image=example_image,
|
201 |
-
generator=generator,
|
202 |
-
guidance_scale=5.0,
|
203 |
-
num_inference_steps=50,
|
204 |
-
output_type="np",
|
205 |
-
)
|
206 |
-
|
207 |
-
image = output.images
|
208 |
-
|
209 |
-
image_slice = image[0, -3:, -3:, -1]
|
210 |
-
|
211 |
-
assert image.shape == (1, 512, 512, 3)
|
212 |
-
expected_slice = np.array([0.4834, 0.4811, 0.4874, 0.5122, 0.5081, 0.5144, 0.5291, 0.5290, 0.5374])
|
213 |
-
|
214 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py
DELETED
@@ -1,598 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import random
|
18 |
-
import traceback
|
19 |
-
import unittest
|
20 |
-
|
21 |
-
import numpy as np
|
22 |
-
import torch
|
23 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
24 |
-
|
25 |
-
from diffusers import (
|
26 |
-
AutoencoderKL,
|
27 |
-
DDIMScheduler,
|
28 |
-
DPMSolverMultistepScheduler,
|
29 |
-
HeunDiscreteScheduler,
|
30 |
-
LMSDiscreteScheduler,
|
31 |
-
PNDMScheduler,
|
32 |
-
StableDiffusionImg2ImgPipeline,
|
33 |
-
UNet2DConditionModel,
|
34 |
-
)
|
35 |
-
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
|
36 |
-
from diffusers.utils.testing_utils import (
|
37 |
-
enable_full_determinism,
|
38 |
-
require_torch_2,
|
39 |
-
require_torch_gpu,
|
40 |
-
run_test_in_subprocess,
|
41 |
-
skip_mps,
|
42 |
-
)
|
43 |
-
|
44 |
-
from ..pipeline_params import (
|
45 |
-
IMAGE_TO_IMAGE_IMAGE_PARAMS,
|
46 |
-
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
|
47 |
-
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
|
48 |
-
)
|
49 |
-
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
|
50 |
-
|
51 |
-
|
52 |
-
enable_full_determinism()
|
53 |
-
|
54 |
-
|
55 |
-
# Will be run via run_test_in_subprocess
|
56 |
-
def _test_img2img_compile(in_queue, out_queue, timeout):
|
57 |
-
error = None
|
58 |
-
try:
|
59 |
-
inputs = in_queue.get(timeout=timeout)
|
60 |
-
torch_device = inputs.pop("torch_device")
|
61 |
-
seed = inputs.pop("seed")
|
62 |
-
inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed)
|
63 |
-
|
64 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
65 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
66 |
-
pipe.to(torch_device)
|
67 |
-
pipe.set_progress_bar_config(disable=None)
|
68 |
-
|
69 |
-
pipe.unet.to(memory_format=torch.channels_last)
|
70 |
-
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
71 |
-
|
72 |
-
image = pipe(**inputs).images
|
73 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
74 |
-
|
75 |
-
assert image.shape == (1, 512, 768, 3)
|
76 |
-
expected_slice = np.array([0.0593, 0.0607, 0.0851, 0.0582, 0.0636, 0.0721, 0.0751, 0.0981, 0.0781])
|
77 |
-
|
78 |
-
assert np.abs(expected_slice - image_slice).max() < 1e-3
|
79 |
-
except Exception:
|
80 |
-
error = f"{traceback.format_exc()}"
|
81 |
-
|
82 |
-
results = {"error": error}
|
83 |
-
out_queue.put(results, timeout=timeout)
|
84 |
-
out_queue.join()
|
85 |
-
|
86 |
-
|
87 |
-
class StableDiffusionImg2ImgPipelineFastTests(
|
88 |
-
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
|
89 |
-
):
|
90 |
-
pipeline_class = StableDiffusionImg2ImgPipeline
|
91 |
-
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
|
92 |
-
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
|
93 |
-
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
|
94 |
-
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
|
95 |
-
image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
|
96 |
-
|
97 |
-
def get_dummy_components(self):
|
98 |
-
torch.manual_seed(0)
|
99 |
-
unet = UNet2DConditionModel(
|
100 |
-
block_out_channels=(32, 64),
|
101 |
-
layers_per_block=2,
|
102 |
-
sample_size=32,
|
103 |
-
in_channels=4,
|
104 |
-
out_channels=4,
|
105 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
106 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
107 |
-
cross_attention_dim=32,
|
108 |
-
)
|
109 |
-
scheduler = PNDMScheduler(skip_prk_steps=True)
|
110 |
-
torch.manual_seed(0)
|
111 |
-
vae = AutoencoderKL(
|
112 |
-
block_out_channels=[32, 64],
|
113 |
-
in_channels=3,
|
114 |
-
out_channels=3,
|
115 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
116 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
117 |
-
latent_channels=4,
|
118 |
-
)
|
119 |
-
torch.manual_seed(0)
|
120 |
-
text_encoder_config = CLIPTextConfig(
|
121 |
-
bos_token_id=0,
|
122 |
-
eos_token_id=2,
|
123 |
-
hidden_size=32,
|
124 |
-
intermediate_size=37,
|
125 |
-
layer_norm_eps=1e-05,
|
126 |
-
num_attention_heads=4,
|
127 |
-
num_hidden_layers=5,
|
128 |
-
pad_token_id=1,
|
129 |
-
vocab_size=1000,
|
130 |
-
)
|
131 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
132 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
133 |
-
|
134 |
-
components = {
|
135 |
-
"unet": unet,
|
136 |
-
"scheduler": scheduler,
|
137 |
-
"vae": vae,
|
138 |
-
"text_encoder": text_encoder,
|
139 |
-
"tokenizer": tokenizer,
|
140 |
-
"safety_checker": None,
|
141 |
-
"feature_extractor": None,
|
142 |
-
}
|
143 |
-
return components
|
144 |
-
|
145 |
-
def get_dummy_inputs(self, device, seed=0):
|
146 |
-
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
|
147 |
-
image = image / 2 + 0.5
|
148 |
-
if str(device).startswith("mps"):
|
149 |
-
generator = torch.manual_seed(seed)
|
150 |
-
else:
|
151 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
152 |
-
inputs = {
|
153 |
-
"prompt": "A painting of a squirrel eating a burger",
|
154 |
-
"image": image,
|
155 |
-
"generator": generator,
|
156 |
-
"num_inference_steps": 2,
|
157 |
-
"guidance_scale": 6.0,
|
158 |
-
"output_type": "numpy",
|
159 |
-
}
|
160 |
-
return inputs
|
161 |
-
|
162 |
-
def test_stable_diffusion_img2img_default_case(self):
|
163 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
164 |
-
components = self.get_dummy_components()
|
165 |
-
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
166 |
-
sd_pipe = sd_pipe.to(device)
|
167 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
168 |
-
|
169 |
-
inputs = self.get_dummy_inputs(device)
|
170 |
-
image = sd_pipe(**inputs).images
|
171 |
-
image_slice = image[0, -3:, -3:, -1]
|
172 |
-
|
173 |
-
assert image.shape == (1, 32, 32, 3)
|
174 |
-
expected_slice = np.array([0.4555, 0.3216, 0.4049, 0.4620, 0.4618, 0.4126, 0.4122, 0.4629, 0.4579])
|
175 |
-
|
176 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
177 |
-
|
178 |
-
def test_stable_diffusion_img2img_negative_prompt(self):
|
179 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
180 |
-
components = self.get_dummy_components()
|
181 |
-
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
182 |
-
sd_pipe = sd_pipe.to(device)
|
183 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
184 |
-
|
185 |
-
inputs = self.get_dummy_inputs(device)
|
186 |
-
negative_prompt = "french fries"
|
187 |
-
output = sd_pipe(**inputs, negative_prompt=negative_prompt)
|
188 |
-
image = output.images
|
189 |
-
image_slice = image[0, -3:, -3:, -1]
|
190 |
-
|
191 |
-
assert image.shape == (1, 32, 32, 3)
|
192 |
-
expected_slice = np.array([0.4593, 0.3408, 0.4232, 0.4749, 0.4476, 0.4115, 0.4357, 0.4733, 0.4663])
|
193 |
-
|
194 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
195 |
-
|
196 |
-
def test_stable_diffusion_img2img_multiple_init_images(self):
|
197 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
198 |
-
components = self.get_dummy_components()
|
199 |
-
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
200 |
-
sd_pipe = sd_pipe.to(device)
|
201 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
202 |
-
|
203 |
-
inputs = self.get_dummy_inputs(device)
|
204 |
-
inputs["prompt"] = [inputs["prompt"]] * 2
|
205 |
-
inputs["image"] = inputs["image"].repeat(2, 1, 1, 1)
|
206 |
-
image = sd_pipe(**inputs).images
|
207 |
-
image_slice = image[-1, -3:, -3:, -1]
|
208 |
-
|
209 |
-
assert image.shape == (2, 32, 32, 3)
|
210 |
-
expected_slice = np.array([0.4241, 0.5576, 0.5711, 0.4792, 0.4311, 0.5952, 0.5827, 0.5138, 0.5109])
|
211 |
-
|
212 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
213 |
-
|
214 |
-
def test_stable_diffusion_img2img_k_lms(self):
|
215 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
216 |
-
components = self.get_dummy_components()
|
217 |
-
components["scheduler"] = LMSDiscreteScheduler(
|
218 |
-
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear"
|
219 |
-
)
|
220 |
-
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
221 |
-
sd_pipe = sd_pipe.to(device)
|
222 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
223 |
-
|
224 |
-
inputs = self.get_dummy_inputs(device)
|
225 |
-
image = sd_pipe(**inputs).images
|
226 |
-
image_slice = image[0, -3:, -3:, -1]
|
227 |
-
|
228 |
-
assert image.shape == (1, 32, 32, 3)
|
229 |
-
expected_slice = np.array([0.4398, 0.4949, 0.4337, 0.6580, 0.5555, 0.4338, 0.5769, 0.5955, 0.5175])
|
230 |
-
|
231 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
232 |
-
|
233 |
-
@skip_mps
|
234 |
-
def test_save_load_local(self):
|
235 |
-
return super().test_save_load_local()
|
236 |
-
|
237 |
-
@skip_mps
|
238 |
-
def test_dict_tuple_outputs_equivalent(self):
|
239 |
-
return super().test_dict_tuple_outputs_equivalent()
|
240 |
-
|
241 |
-
@skip_mps
|
242 |
-
def test_save_load_optional_components(self):
|
243 |
-
return super().test_save_load_optional_components()
|
244 |
-
|
245 |
-
@skip_mps
|
246 |
-
def test_attention_slicing_forward_pass(self):
|
247 |
-
return super().test_attention_slicing_forward_pass(expected_max_diff=5e-3)
|
248 |
-
|
249 |
-
def test_inference_batch_single_identical(self):
|
250 |
-
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
251 |
-
|
252 |
-
|
253 |
-
@slow
|
254 |
-
@require_torch_gpu
|
255 |
-
class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
|
256 |
-
def tearDown(self):
|
257 |
-
super().tearDown()
|
258 |
-
gc.collect()
|
259 |
-
torch.cuda.empty_cache()
|
260 |
-
|
261 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
262 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
263 |
-
init_image = load_image(
|
264 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
265 |
-
"/stable_diffusion_img2img/sketch-mountains-input.png"
|
266 |
-
)
|
267 |
-
inputs = {
|
268 |
-
"prompt": "a fantasy landscape, concept art, high resolution",
|
269 |
-
"image": init_image,
|
270 |
-
"generator": generator,
|
271 |
-
"num_inference_steps": 3,
|
272 |
-
"strength": 0.75,
|
273 |
-
"guidance_scale": 7.5,
|
274 |
-
"output_type": "np",
|
275 |
-
}
|
276 |
-
return inputs
|
277 |
-
|
278 |
-
def test_stable_diffusion_img2img_default(self):
|
279 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
280 |
-
pipe.to(torch_device)
|
281 |
-
pipe.set_progress_bar_config(disable=None)
|
282 |
-
pipe.enable_attention_slicing()
|
283 |
-
|
284 |
-
inputs = self.get_inputs(torch_device)
|
285 |
-
image = pipe(**inputs).images
|
286 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
287 |
-
|
288 |
-
assert image.shape == (1, 512, 768, 3)
|
289 |
-
expected_slice = np.array([0.4300, 0.4662, 0.4930, 0.3990, 0.4307, 0.4525, 0.3719, 0.4064, 0.3923])
|
290 |
-
|
291 |
-
assert np.abs(expected_slice - image_slice).max() < 1e-3
|
292 |
-
|
293 |
-
def test_stable_diffusion_img2img_k_lms(self):
|
294 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
295 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
296 |
-
pipe.to(torch_device)
|
297 |
-
pipe.set_progress_bar_config(disable=None)
|
298 |
-
pipe.enable_attention_slicing()
|
299 |
-
|
300 |
-
inputs = self.get_inputs(torch_device)
|
301 |
-
image = pipe(**inputs).images
|
302 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
303 |
-
|
304 |
-
assert image.shape == (1, 512, 768, 3)
|
305 |
-
expected_slice = np.array([0.0389, 0.0346, 0.0415, 0.0290, 0.0218, 0.0210, 0.0408, 0.0567, 0.0271])
|
306 |
-
|
307 |
-
assert np.abs(expected_slice - image_slice).max() < 1e-3
|
308 |
-
|
309 |
-
def test_stable_diffusion_img2img_ddim(self):
|
310 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
311 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
312 |
-
pipe.to(torch_device)
|
313 |
-
pipe.set_progress_bar_config(disable=None)
|
314 |
-
pipe.enable_attention_slicing()
|
315 |
-
|
316 |
-
inputs = self.get_inputs(torch_device)
|
317 |
-
image = pipe(**inputs).images
|
318 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
319 |
-
|
320 |
-
assert image.shape == (1, 512, 768, 3)
|
321 |
-
expected_slice = np.array([0.0593, 0.0607, 0.0851, 0.0582, 0.0636, 0.0721, 0.0751, 0.0981, 0.0781])
|
322 |
-
|
323 |
-
assert np.abs(expected_slice - image_slice).max() < 1e-3
|
324 |
-
|
325 |
-
def test_stable_diffusion_img2img_intermediate_state(self):
|
326 |
-
number_of_steps = 0
|
327 |
-
|
328 |
-
def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
|
329 |
-
callback_fn.has_been_called = True
|
330 |
-
nonlocal number_of_steps
|
331 |
-
number_of_steps += 1
|
332 |
-
if step == 1:
|
333 |
-
latents = latents.detach().cpu().numpy()
|
334 |
-
assert latents.shape == (1, 4, 64, 96)
|
335 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
336 |
-
expected_slice = np.array([-0.4958, 0.5107, 1.1045, 2.7539, 4.6680, 3.8320, 1.5049, 1.8633, 2.6523])
|
337 |
-
|
338 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
339 |
-
elif step == 2:
|
340 |
-
latents = latents.detach().cpu().numpy()
|
341 |
-
assert latents.shape == (1, 4, 64, 96)
|
342 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
343 |
-
expected_slice = np.array([-0.4956, 0.5078, 1.0918, 2.7520, 4.6484, 3.8125, 1.5146, 1.8633, 2.6367])
|
344 |
-
|
345 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
346 |
-
|
347 |
-
callback_fn.has_been_called = False
|
348 |
-
|
349 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
350 |
-
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
351 |
-
)
|
352 |
-
pipe = pipe.to(torch_device)
|
353 |
-
pipe.set_progress_bar_config(disable=None)
|
354 |
-
pipe.enable_attention_slicing()
|
355 |
-
|
356 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
357 |
-
pipe(**inputs, callback=callback_fn, callback_steps=1)
|
358 |
-
assert callback_fn.has_been_called
|
359 |
-
assert number_of_steps == 2
|
360 |
-
|
361 |
-
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
|
362 |
-
torch.cuda.empty_cache()
|
363 |
-
torch.cuda.reset_max_memory_allocated()
|
364 |
-
torch.cuda.reset_peak_memory_stats()
|
365 |
-
|
366 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
367 |
-
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
368 |
-
)
|
369 |
-
pipe = pipe.to(torch_device)
|
370 |
-
pipe.set_progress_bar_config(disable=None)
|
371 |
-
pipe.enable_attention_slicing(1)
|
372 |
-
pipe.enable_sequential_cpu_offload()
|
373 |
-
|
374 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
375 |
-
_ = pipe(**inputs)
|
376 |
-
|
377 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
378 |
-
# make sure that less than 2.2 GB is allocated
|
379 |
-
assert mem_bytes < 2.2 * 10**9
|
380 |
-
|
381 |
-
def test_stable_diffusion_pipeline_with_model_offloading(self):
|
382 |
-
torch.cuda.empty_cache()
|
383 |
-
torch.cuda.reset_max_memory_allocated()
|
384 |
-
torch.cuda.reset_peak_memory_stats()
|
385 |
-
|
386 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
387 |
-
|
388 |
-
# Normal inference
|
389 |
-
|
390 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
391 |
-
"CompVis/stable-diffusion-v1-4",
|
392 |
-
safety_checker=None,
|
393 |
-
torch_dtype=torch.float16,
|
394 |
-
)
|
395 |
-
pipe.to(torch_device)
|
396 |
-
pipe.set_progress_bar_config(disable=None)
|
397 |
-
pipe(**inputs)
|
398 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
399 |
-
|
400 |
-
# With model offloading
|
401 |
-
|
402 |
-
# Reload but don't move to cuda
|
403 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
404 |
-
"CompVis/stable-diffusion-v1-4",
|
405 |
-
safety_checker=None,
|
406 |
-
torch_dtype=torch.float16,
|
407 |
-
)
|
408 |
-
|
409 |
-
torch.cuda.empty_cache()
|
410 |
-
torch.cuda.reset_max_memory_allocated()
|
411 |
-
torch.cuda.reset_peak_memory_stats()
|
412 |
-
|
413 |
-
pipe.enable_model_cpu_offload()
|
414 |
-
pipe.set_progress_bar_config(disable=None)
|
415 |
-
_ = pipe(**inputs)
|
416 |
-
mem_bytes_offloaded = torch.cuda.max_memory_allocated()
|
417 |
-
|
418 |
-
assert mem_bytes_offloaded < mem_bytes
|
419 |
-
for module in pipe.text_encoder, pipe.unet, pipe.vae:
|
420 |
-
assert module.device == torch.device("cpu")
|
421 |
-
|
422 |
-
def test_img2img_2nd_order(self):
|
423 |
-
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
424 |
-
sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
425 |
-
sd_pipe.to(torch_device)
|
426 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
427 |
-
|
428 |
-
inputs = self.get_inputs(torch_device)
|
429 |
-
inputs["num_inference_steps"] = 10
|
430 |
-
inputs["strength"] = 0.75
|
431 |
-
image = sd_pipe(**inputs).images[0]
|
432 |
-
|
433 |
-
expected_image = load_numpy(
|
434 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy"
|
435 |
-
)
|
436 |
-
max_diff = np.abs(expected_image - image).max()
|
437 |
-
assert max_diff < 5e-2
|
438 |
-
|
439 |
-
inputs = self.get_inputs(torch_device)
|
440 |
-
inputs["num_inference_steps"] = 11
|
441 |
-
inputs["strength"] = 0.75
|
442 |
-
image_other = sd_pipe(**inputs).images[0]
|
443 |
-
|
444 |
-
mean_diff = np.abs(image - image_other).mean()
|
445 |
-
|
446 |
-
# images should be very similar
|
447 |
-
assert mean_diff < 5e-2
|
448 |
-
|
449 |
-
def test_stable_diffusion_img2img_pipeline_multiple_of_8(self):
|
450 |
-
init_image = load_image(
|
451 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
452 |
-
"/img2img/sketch-mountains-input.jpg"
|
453 |
-
)
|
454 |
-
# resize to resolution that is divisible by 8 but not 16 or 32
|
455 |
-
init_image = init_image.resize((760, 504))
|
456 |
-
|
457 |
-
model_id = "CompVis/stable-diffusion-v1-4"
|
458 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
459 |
-
model_id,
|
460 |
-
safety_checker=None,
|
461 |
-
)
|
462 |
-
pipe.to(torch_device)
|
463 |
-
pipe.set_progress_bar_config(disable=None)
|
464 |
-
pipe.enable_attention_slicing()
|
465 |
-
|
466 |
-
prompt = "A fantasy landscape, trending on artstation"
|
467 |
-
|
468 |
-
generator = torch.manual_seed(0)
|
469 |
-
output = pipe(
|
470 |
-
prompt=prompt,
|
471 |
-
image=init_image,
|
472 |
-
strength=0.75,
|
473 |
-
guidance_scale=7.5,
|
474 |
-
generator=generator,
|
475 |
-
output_type="np",
|
476 |
-
)
|
477 |
-
image = output.images[0]
|
478 |
-
|
479 |
-
image_slice = image[255:258, 383:386, -1]
|
480 |
-
|
481 |
-
assert image.shape == (504, 760, 3)
|
482 |
-
expected_slice = np.array([0.9393, 0.9500, 0.9399, 0.9438, 0.9458, 0.9400, 0.9455, 0.9414, 0.9423])
|
483 |
-
|
484 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
|
485 |
-
|
486 |
-
def test_img2img_safety_checker_works(self):
|
487 |
-
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
488 |
-
sd_pipe.to(torch_device)
|
489 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
490 |
-
|
491 |
-
inputs = self.get_inputs(torch_device)
|
492 |
-
inputs["num_inference_steps"] = 20
|
493 |
-
# make sure the safety checker is activated
|
494 |
-
inputs["prompt"] = "naked, sex, porn"
|
495 |
-
out = sd_pipe(**inputs)
|
496 |
-
|
497 |
-
assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}"
|
498 |
-
assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros
|
499 |
-
|
500 |
-
@require_torch_2
|
501 |
-
def test_img2img_compile(self):
|
502 |
-
seed = 0
|
503 |
-
inputs = self.get_inputs(torch_device, seed=seed)
|
504 |
-
# Can't pickle a Generator object
|
505 |
-
del inputs["generator"]
|
506 |
-
inputs["torch_device"] = torch_device
|
507 |
-
inputs["seed"] = seed
|
508 |
-
run_test_in_subprocess(test_case=self, target_func=_test_img2img_compile, inputs=inputs)
|
509 |
-
|
510 |
-
|
511 |
-
@nightly
|
512 |
-
@require_torch_gpu
|
513 |
-
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
|
514 |
-
def tearDown(self):
|
515 |
-
super().tearDown()
|
516 |
-
gc.collect()
|
517 |
-
torch.cuda.empty_cache()
|
518 |
-
|
519 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
520 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
521 |
-
init_image = load_image(
|
522 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
523 |
-
"/stable_diffusion_img2img/sketch-mountains-input.png"
|
524 |
-
)
|
525 |
-
inputs = {
|
526 |
-
"prompt": "a fantasy landscape, concept art, high resolution",
|
527 |
-
"image": init_image,
|
528 |
-
"generator": generator,
|
529 |
-
"num_inference_steps": 50,
|
530 |
-
"strength": 0.75,
|
531 |
-
"guidance_scale": 7.5,
|
532 |
-
"output_type": "np",
|
533 |
-
}
|
534 |
-
return inputs
|
535 |
-
|
536 |
-
def test_img2img_pndm(self):
|
537 |
-
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
538 |
-
sd_pipe.to(torch_device)
|
539 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
540 |
-
|
541 |
-
inputs = self.get_inputs(torch_device)
|
542 |
-
image = sd_pipe(**inputs).images[0]
|
543 |
-
|
544 |
-
expected_image = load_numpy(
|
545 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
546 |
-
"/stable_diffusion_img2img/stable_diffusion_1_5_pndm.npy"
|
547 |
-
)
|
548 |
-
max_diff = np.abs(expected_image - image).max()
|
549 |
-
assert max_diff < 1e-3
|
550 |
-
|
551 |
-
def test_img2img_ddim(self):
|
552 |
-
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
553 |
-
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
|
554 |
-
sd_pipe.to(torch_device)
|
555 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
556 |
-
|
557 |
-
inputs = self.get_inputs(torch_device)
|
558 |
-
image = sd_pipe(**inputs).images[0]
|
559 |
-
|
560 |
-
expected_image = load_numpy(
|
561 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
562 |
-
"/stable_diffusion_img2img/stable_diffusion_1_5_ddim.npy"
|
563 |
-
)
|
564 |
-
max_diff = np.abs(expected_image - image).max()
|
565 |
-
assert max_diff < 1e-3
|
566 |
-
|
567 |
-
def test_img2img_lms(self):
|
568 |
-
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
569 |
-
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
570 |
-
sd_pipe.to(torch_device)
|
571 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
572 |
-
|
573 |
-
inputs = self.get_inputs(torch_device)
|
574 |
-
image = sd_pipe(**inputs).images[0]
|
575 |
-
|
576 |
-
expected_image = load_numpy(
|
577 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
578 |
-
"/stable_diffusion_img2img/stable_diffusion_1_5_lms.npy"
|
579 |
-
)
|
580 |
-
max_diff = np.abs(expected_image - image).max()
|
581 |
-
assert max_diff < 1e-3
|
582 |
-
|
583 |
-
def test_img2img_dpm(self):
|
584 |
-
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
585 |
-
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
|
586 |
-
sd_pipe.to(torch_device)
|
587 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
588 |
-
|
589 |
-
inputs = self.get_inputs(torch_device)
|
590 |
-
inputs["num_inference_steps"] = 30
|
591 |
-
image = sd_pipe(**inputs).images[0]
|
592 |
-
|
593 |
-
expected_image = load_numpy(
|
594 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
595 |
-
"/stable_diffusion_img2img/stable_diffusion_1_5_dpm.npy"
|
596 |
-
)
|
597 |
-
max_diff = np.abs(expected_image - image).max()
|
598 |
-
assert max_diff < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
backbone=dict(
|
4 |
-
dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False),
|
5 |
-
stage_with_dcn=(False, True, True, True)))
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
|
2 |
-
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
|
3 |
-
classes = ('person', )
|
4 |
-
data = dict(
|
5 |
-
train=dict(classes=classes),
|
6 |
-
val=dict(classes=classes),
|
7 |
-
test=dict(classes=classes))
|
8 |
-
|
9 |
-
load_from = 'http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/faster_rcnn_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_detection.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
pretrained='open-mmlab://regnetx_3.2gf',
|
8 |
-
backbone=dict(
|
9 |
-
_delete_=True,
|
10 |
-
type='RegNet',
|
11 |
-
arch='regnetx_3.2gf',
|
12 |
-
out_indices=(0, 1, 2, 3),
|
13 |
-
frozen_stages=1,
|
14 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
15 |
-
norm_eval=True,
|
16 |
-
style='pytorch'),
|
17 |
-
neck=dict(
|
18 |
-
type='FPN',
|
19 |
-
in_channels=[96, 192, 432, 1008],
|
20 |
-
out_channels=256,
|
21 |
-
num_outs=5))
|
22 |
-
img_norm_cfg = dict(
|
23 |
-
# The mean and std are used in PyCls when training RegNets
|
24 |
-
mean=[103.53, 116.28, 123.675],
|
25 |
-
std=[57.375, 57.12, 58.395],
|
26 |
-
to_rgb=False)
|
27 |
-
train_pipeline = [
|
28 |
-
dict(type='LoadImageFromFile'),
|
29 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
30 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
31 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
32 |
-
dict(type='Normalize', **img_norm_cfg),
|
33 |
-
dict(type='Pad', size_divisor=32),
|
34 |
-
dict(type='DefaultFormatBundle'),
|
35 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
36 |
-
]
|
37 |
-
test_pipeline = [
|
38 |
-
dict(type='LoadImageFromFile'),
|
39 |
-
dict(
|
40 |
-
type='MultiScaleFlipAug',
|
41 |
-
img_scale=(1333, 800),
|
42 |
-
flip=False,
|
43 |
-
transforms=[
|
44 |
-
dict(type='Resize', keep_ratio=True),
|
45 |
-
dict(type='RandomFlip'),
|
46 |
-
dict(type='Normalize', **img_norm_cfg),
|
47 |
-
dict(type='Pad', size_divisor=32),
|
48 |
-
dict(type='ImageToTensor', keys=['img']),
|
49 |
-
dict(type='Collect', keys=['img']),
|
50 |
-
])
|
51 |
-
]
|
52 |
-
data = dict(
|
53 |
-
train=dict(pipeline=train_pipeline),
|
54 |
-
val=dict(pipeline=test_pipeline),
|
55 |
-
test=dict(pipeline=test_pipeline))
|
56 |
-
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/util/html.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
import dominate
|
2 |
-
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
|
3 |
-
import os
|
4 |
-
|
5 |
-
|
6 |
-
class HTML:
|
7 |
-
"""This HTML class allows us to save examples and write texts into a single HTML file.
|
8 |
-
|
9 |
-
It consists of functions such as <add_header> (add a text header to the HTML file),
|
10 |
-
<add_images> (add a row of examples to the HTML file), and <save> (save the HTML to the disk).
|
11 |
-
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
|
12 |
-
"""
|
13 |
-
|
14 |
-
def __init__(self, web_dir, title, refresh=0):
|
15 |
-
"""Initialize the HTML classes
|
16 |
-
|
17 |
-
Parameters:
|
18 |
-
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; examples will be saved at <web_dir/examples/
|
19 |
-
title (str) -- the webpage name
|
20 |
-
refresh (int) -- how often the website refresh itself; if 0; no refreshing
|
21 |
-
"""
|
22 |
-
self.title = title
|
23 |
-
self.web_dir = web_dir
|
24 |
-
self.img_dir = os.path.join(self.web_dir, 'examples')
|
25 |
-
if not os.path.exists(self.web_dir):
|
26 |
-
os.makedirs(self.web_dir)
|
27 |
-
if not os.path.exists(self.img_dir):
|
28 |
-
os.makedirs(self.img_dir)
|
29 |
-
|
30 |
-
self.doc = dominate.document(title=title)
|
31 |
-
if refresh > 0:
|
32 |
-
with self.doc.head:
|
33 |
-
meta(http_equiv="refresh", content=str(refresh))
|
34 |
-
|
35 |
-
def get_image_dir(self):
|
36 |
-
"""Return the directory that stores examples"""
|
37 |
-
return self.img_dir
|
38 |
-
|
39 |
-
def add_header(self, text):
|
40 |
-
"""Insert a header to the HTML file
|
41 |
-
|
42 |
-
Parameters:
|
43 |
-
text (str) -- the header text
|
44 |
-
"""
|
45 |
-
with self.doc:
|
46 |
-
h3(text)
|
47 |
-
|
48 |
-
def add_images(self, ims, txts, links, width=400):
|
49 |
-
"""add examples to the HTML file
|
50 |
-
|
51 |
-
Parameters:
|
52 |
-
ims (str list) -- a list of image paths
|
53 |
-
txts (str list) -- a list of image names shown on the website
|
54 |
-
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
|
55 |
-
"""
|
56 |
-
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
|
57 |
-
self.doc.add(self.t)
|
58 |
-
with self.t:
|
59 |
-
with tr():
|
60 |
-
for im, txt, link in zip(ims, txts, links):
|
61 |
-
with td(style="word-wrap: break-word;", halign="center", valign="top"):
|
62 |
-
with p():
|
63 |
-
with a(href=os.path.join('examples', link)):
|
64 |
-
img(style="width:%dpx" % width, src=os.path.join('examples', im))
|
65 |
-
br()
|
66 |
-
p(txt)
|
67 |
-
|
68 |
-
def save(self):
|
69 |
-
"""save the current content to the HMTL file"""
|
70 |
-
html_file = '%s/index.html' % self.web_dir
|
71 |
-
f = open(html_file, 'wt')
|
72 |
-
f.write(self.doc.render())
|
73 |
-
f.close()
|
74 |
-
|
75 |
-
|
76 |
-
if __name__ == '__main__': # we show an example usage here.
|
77 |
-
html = HTML('web/', 'test_html')
|
78 |
-
html.add_header('hello world')
|
79 |
-
|
80 |
-
ims, txts, links = [], [], []
|
81 |
-
for n in range(4):
|
82 |
-
ims.append('image_%d.png' % n)
|
83 |
-
txts.append('text_%d' % n)
|
84 |
-
links.append('image_%d.png' % n)
|
85 |
-
html.add_images(ims, txts, links)
|
86 |
-
html.save()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_module.py
DELETED
@@ -1,206 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import warnings
|
3 |
-
|
4 |
-
import torch.nn as nn
|
5 |
-
|
6 |
-
from annotator.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm
|
7 |
-
from ..utils import constant_init, kaiming_init
|
8 |
-
from .activation import build_activation_layer
|
9 |
-
from .conv import build_conv_layer
|
10 |
-
from .norm import build_norm_layer
|
11 |
-
from .padding import build_padding_layer
|
12 |
-
from .registry import PLUGIN_LAYERS
|
13 |
-
|
14 |
-
|
15 |
-
@PLUGIN_LAYERS.register_module()
|
16 |
-
class ConvModule(nn.Module):
|
17 |
-
"""A conv block that bundles conv/norm/activation layers.
|
18 |
-
|
19 |
-
This block simplifies the usage of convolution layers, which are commonly
|
20 |
-
used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
|
21 |
-
It is based upon three build methods: `build_conv_layer()`,
|
22 |
-
`build_norm_layer()` and `build_activation_layer()`.
|
23 |
-
|
24 |
-
Besides, we add some additional features in this module.
|
25 |
-
1. Automatically set `bias` of the conv layer.
|
26 |
-
2. Spectral norm is supported.
|
27 |
-
3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only
|
28 |
-
supports zero and circular padding, and we add "reflect" padding mode.
|
29 |
-
|
30 |
-
Args:
|
31 |
-
in_channels (int): Number of channels in the input feature map.
|
32 |
-
Same as that in ``nn._ConvNd``.
|
33 |
-
out_channels (int): Number of channels produced by the convolution.
|
34 |
-
Same as that in ``nn._ConvNd``.
|
35 |
-
kernel_size (int | tuple[int]): Size of the convolving kernel.
|
36 |
-
Same as that in ``nn._ConvNd``.
|
37 |
-
stride (int | tuple[int]): Stride of the convolution.
|
38 |
-
Same as that in ``nn._ConvNd``.
|
39 |
-
padding (int | tuple[int]): Zero-padding added to both sides of
|
40 |
-
the input. Same as that in ``nn._ConvNd``.
|
41 |
-
dilation (int | tuple[int]): Spacing between kernel elements.
|
42 |
-
Same as that in ``nn._ConvNd``.
|
43 |
-
groups (int): Number of blocked connections from input channels to
|
44 |
-
output channels. Same as that in ``nn._ConvNd``.
|
45 |
-
bias (bool | str): If specified as `auto`, it will be decided by the
|
46 |
-
norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise
|
47 |
-
False. Default: "auto".
|
48 |
-
conv_cfg (dict): Config dict for convolution layer. Default: None,
|
49 |
-
which means using conv2d.
|
50 |
-
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
51 |
-
act_cfg (dict): Config dict for activation layer.
|
52 |
-
Default: dict(type='ReLU').
|
53 |
-
inplace (bool): Whether to use inplace mode for activation.
|
54 |
-
Default: True.
|
55 |
-
with_spectral_norm (bool): Whether use spectral norm in conv module.
|
56 |
-
Default: False.
|
57 |
-
padding_mode (str): If the `padding_mode` has not been supported by
|
58 |
-
current `Conv2d` in PyTorch, we will use our own padding layer
|
59 |
-
instead. Currently, we support ['zeros', 'circular'] with official
|
60 |
-
implementation and ['reflect'] with our own implementation.
|
61 |
-
Default: 'zeros'.
|
62 |
-
order (tuple[str]): The order of conv/norm/activation layers. It is a
|
63 |
-
sequence of "conv", "norm" and "act". Common examples are
|
64 |
-
("conv", "norm", "act") and ("act", "conv", "norm").
|
65 |
-
Default: ('conv', 'norm', 'act').
|
66 |
-
"""
|
67 |
-
|
68 |
-
_abbr_ = 'conv_block'
|
69 |
-
|
70 |
-
def __init__(self,
|
71 |
-
in_channels,
|
72 |
-
out_channels,
|
73 |
-
kernel_size,
|
74 |
-
stride=1,
|
75 |
-
padding=0,
|
76 |
-
dilation=1,
|
77 |
-
groups=1,
|
78 |
-
bias='auto',
|
79 |
-
conv_cfg=None,
|
80 |
-
norm_cfg=None,
|
81 |
-
act_cfg=dict(type='ReLU'),
|
82 |
-
inplace=True,
|
83 |
-
with_spectral_norm=False,
|
84 |
-
padding_mode='zeros',
|
85 |
-
order=('conv', 'norm', 'act')):
|
86 |
-
super(ConvModule, self).__init__()
|
87 |
-
assert conv_cfg is None or isinstance(conv_cfg, dict)
|
88 |
-
assert norm_cfg is None or isinstance(norm_cfg, dict)
|
89 |
-
assert act_cfg is None or isinstance(act_cfg, dict)
|
90 |
-
official_padding_mode = ['zeros', 'circular']
|
91 |
-
self.conv_cfg = conv_cfg
|
92 |
-
self.norm_cfg = norm_cfg
|
93 |
-
self.act_cfg = act_cfg
|
94 |
-
self.inplace = inplace
|
95 |
-
self.with_spectral_norm = with_spectral_norm
|
96 |
-
self.with_explicit_padding = padding_mode not in official_padding_mode
|
97 |
-
self.order = order
|
98 |
-
assert isinstance(self.order, tuple) and len(self.order) == 3
|
99 |
-
assert set(order) == set(['conv', 'norm', 'act'])
|
100 |
-
|
101 |
-
self.with_norm = norm_cfg is not None
|
102 |
-
self.with_activation = act_cfg is not None
|
103 |
-
# if the conv layer is before a norm layer, bias is unnecessary.
|
104 |
-
if bias == 'auto':
|
105 |
-
bias = not self.with_norm
|
106 |
-
self.with_bias = bias
|
107 |
-
|
108 |
-
if self.with_explicit_padding:
|
109 |
-
pad_cfg = dict(type=padding_mode)
|
110 |
-
self.padding_layer = build_padding_layer(pad_cfg, padding)
|
111 |
-
|
112 |
-
# reset padding to 0 for conv module
|
113 |
-
conv_padding = 0 if self.with_explicit_padding else padding
|
114 |
-
# build convolution layer
|
115 |
-
self.conv = build_conv_layer(
|
116 |
-
conv_cfg,
|
117 |
-
in_channels,
|
118 |
-
out_channels,
|
119 |
-
kernel_size,
|
120 |
-
stride=stride,
|
121 |
-
padding=conv_padding,
|
122 |
-
dilation=dilation,
|
123 |
-
groups=groups,
|
124 |
-
bias=bias)
|
125 |
-
# export the attributes of self.conv to a higher level for convenience
|
126 |
-
self.in_channels = self.conv.in_channels
|
127 |
-
self.out_channels = self.conv.out_channels
|
128 |
-
self.kernel_size = self.conv.kernel_size
|
129 |
-
self.stride = self.conv.stride
|
130 |
-
self.padding = padding
|
131 |
-
self.dilation = self.conv.dilation
|
132 |
-
self.transposed = self.conv.transposed
|
133 |
-
self.output_padding = self.conv.output_padding
|
134 |
-
self.groups = self.conv.groups
|
135 |
-
|
136 |
-
if self.with_spectral_norm:
|
137 |
-
self.conv = nn.utils.spectral_norm(self.conv)
|
138 |
-
|
139 |
-
# build normalization layers
|
140 |
-
if self.with_norm:
|
141 |
-
# norm layer is after conv layer
|
142 |
-
if order.index('norm') > order.index('conv'):
|
143 |
-
norm_channels = out_channels
|
144 |
-
else:
|
145 |
-
norm_channels = in_channels
|
146 |
-
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
|
147 |
-
self.add_module(self.norm_name, norm)
|
148 |
-
if self.with_bias:
|
149 |
-
if isinstance(norm, (_BatchNorm, _InstanceNorm)):
|
150 |
-
warnings.warn(
|
151 |
-
'Unnecessary conv bias before batch/instance norm')
|
152 |
-
else:
|
153 |
-
self.norm_name = None
|
154 |
-
|
155 |
-
# build activation layer
|
156 |
-
if self.with_activation:
|
157 |
-
act_cfg_ = act_cfg.copy()
|
158 |
-
# nn.Tanh has no 'inplace' argument
|
159 |
-
if act_cfg_['type'] not in [
|
160 |
-
'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'
|
161 |
-
]:
|
162 |
-
act_cfg_.setdefault('inplace', inplace)
|
163 |
-
self.activate = build_activation_layer(act_cfg_)
|
164 |
-
|
165 |
-
# Use msra init by default
|
166 |
-
self.init_weights()
|
167 |
-
|
168 |
-
@property
|
169 |
-
def norm(self):
|
170 |
-
if self.norm_name:
|
171 |
-
return getattr(self, self.norm_name)
|
172 |
-
else:
|
173 |
-
return None
|
174 |
-
|
175 |
-
def init_weights(self):
|
176 |
-
# 1. It is mainly for customized conv layers with their own
|
177 |
-
# initialization manners by calling their own ``init_weights()``,
|
178 |
-
# and we do not want ConvModule to override the initialization.
|
179 |
-
# 2. For customized conv layers without their own initialization
|
180 |
-
# manners (that is, they don't have their own ``init_weights()``)
|
181 |
-
# and PyTorch's conv layers, they will be initialized by
|
182 |
-
# this method with default ``kaiming_init``.
|
183 |
-
# Note: For PyTorch's conv layers, they will be overwritten by our
|
184 |
-
# initialization implementation using default ``kaiming_init``.
|
185 |
-
if not hasattr(self.conv, 'init_weights'):
|
186 |
-
if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
|
187 |
-
nonlinearity = 'leaky_relu'
|
188 |
-
a = self.act_cfg.get('negative_slope', 0.01)
|
189 |
-
else:
|
190 |
-
nonlinearity = 'relu'
|
191 |
-
a = 0
|
192 |
-
kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
|
193 |
-
if self.with_norm:
|
194 |
-
constant_init(self.norm, 1, bias=0)
|
195 |
-
|
196 |
-
def forward(self, x, activate=True, norm=True):
|
197 |
-
for layer in self.order:
|
198 |
-
if layer == 'conv':
|
199 |
-
if self.with_explicit_padding:
|
200 |
-
x = self.padding_layer(x)
|
201 |
-
x = self.conv(x)
|
202 |
-
elif layer == 'norm' and norm and self.with_norm:
|
203 |
-
x = self.norm(x)
|
204 |
-
elif layer == 'act' and activate and self.with_activation:
|
205 |
-
x = self.activate(x)
|
206 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/checkpoint.py
DELETED
@@ -1,707 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import io
|
3 |
-
import os
|
4 |
-
import os.path as osp
|
5 |
-
import pkgutil
|
6 |
-
import re
|
7 |
-
import time
|
8 |
-
import warnings
|
9 |
-
from collections import OrderedDict
|
10 |
-
from importlib import import_module
|
11 |
-
from tempfile import TemporaryDirectory
|
12 |
-
|
13 |
-
import torch
|
14 |
-
import torchvision
|
15 |
-
from torch.optim import Optimizer
|
16 |
-
from torch.utils import model_zoo
|
17 |
-
|
18 |
-
import annotator.uniformer.mmcv as mmcv
|
19 |
-
from ..fileio import FileClient
|
20 |
-
from ..fileio import load as load_file
|
21 |
-
from ..parallel import is_module_wrapper
|
22 |
-
from ..utils import mkdir_or_exist
|
23 |
-
from .dist_utils import get_dist_info
|
24 |
-
|
25 |
-
ENV_MMCV_HOME = 'MMCV_HOME'
|
26 |
-
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
|
27 |
-
DEFAULT_CACHE_DIR = '~/.cache'
|
28 |
-
|
29 |
-
|
30 |
-
def _get_mmcv_home():
|
31 |
-
mmcv_home = os.path.expanduser(
|
32 |
-
os.getenv(
|
33 |
-
ENV_MMCV_HOME,
|
34 |
-
os.path.join(
|
35 |
-
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
|
36 |
-
|
37 |
-
mkdir_or_exist(mmcv_home)
|
38 |
-
return mmcv_home
|
39 |
-
|
40 |
-
|
41 |
-
def load_state_dict(module, state_dict, strict=False, logger=None):
|
42 |
-
"""Load state_dict to a module.
|
43 |
-
|
44 |
-
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
|
45 |
-
Default value for ``strict`` is set to ``False`` and the message for
|
46 |
-
param mismatch will be shown even if strict is False.
|
47 |
-
|
48 |
-
Args:
|
49 |
-
module (Module): Module that receives the state_dict.
|
50 |
-
state_dict (OrderedDict): Weights.
|
51 |
-
strict (bool): whether to strictly enforce that the keys
|
52 |
-
in :attr:`state_dict` match the keys returned by this module's
|
53 |
-
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
|
54 |
-
logger (:obj:`logging.Logger`, optional): Logger to log the error
|
55 |
-
message. If not specified, print function will be used.
|
56 |
-
"""
|
57 |
-
unexpected_keys = []
|
58 |
-
all_missing_keys = []
|
59 |
-
err_msg = []
|
60 |
-
|
61 |
-
metadata = getattr(state_dict, '_metadata', None)
|
62 |
-
state_dict = state_dict.copy()
|
63 |
-
if metadata is not None:
|
64 |
-
state_dict._metadata = metadata
|
65 |
-
|
66 |
-
# use _load_from_state_dict to enable checkpoint version control
|
67 |
-
def load(module, prefix=''):
|
68 |
-
# recursively check parallel module in case that the model has a
|
69 |
-
# complicated structure, e.g., nn.Module(nn.Module(DDP))
|
70 |
-
if is_module_wrapper(module):
|
71 |
-
module = module.module
|
72 |
-
local_metadata = {} if metadata is None else metadata.get(
|
73 |
-
prefix[:-1], {})
|
74 |
-
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
|
75 |
-
all_missing_keys, unexpected_keys,
|
76 |
-
err_msg)
|
77 |
-
for name, child in module._modules.items():
|
78 |
-
if child is not None:
|
79 |
-
load(child, prefix + name + '.')
|
80 |
-
|
81 |
-
load(module)
|
82 |
-
load = None # break load->load reference cycle
|
83 |
-
|
84 |
-
# ignore "num_batches_tracked" of BN layers
|
85 |
-
missing_keys = [
|
86 |
-
key for key in all_missing_keys if 'num_batches_tracked' not in key
|
87 |
-
]
|
88 |
-
|
89 |
-
if unexpected_keys:
|
90 |
-
err_msg.append('unexpected key in source '
|
91 |
-
f'state_dict: {", ".join(unexpected_keys)}\n')
|
92 |
-
if missing_keys:
|
93 |
-
err_msg.append(
|
94 |
-
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
|
95 |
-
|
96 |
-
rank, _ = get_dist_info()
|
97 |
-
if len(err_msg) > 0 and rank == 0:
|
98 |
-
err_msg.insert(
|
99 |
-
0, 'The model and loaded state dict do not match exactly\n')
|
100 |
-
err_msg = '\n'.join(err_msg)
|
101 |
-
if strict:
|
102 |
-
raise RuntimeError(err_msg)
|
103 |
-
elif logger is not None:
|
104 |
-
logger.warning(err_msg)
|
105 |
-
else:
|
106 |
-
print(err_msg)
|
107 |
-
|
108 |
-
|
109 |
-
def get_torchvision_models():
|
110 |
-
model_urls = dict()
|
111 |
-
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
|
112 |
-
if ispkg:
|
113 |
-
continue
|
114 |
-
_zoo = import_module(f'torchvision.models.{name}')
|
115 |
-
if hasattr(_zoo, 'model_urls'):
|
116 |
-
_urls = getattr(_zoo, 'model_urls')
|
117 |
-
model_urls.update(_urls)
|
118 |
-
return model_urls
|
119 |
-
|
120 |
-
|
121 |
-
def get_external_models():
|
122 |
-
mmcv_home = _get_mmcv_home()
|
123 |
-
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
|
124 |
-
default_urls = load_file(default_json_path)
|
125 |
-
assert isinstance(default_urls, dict)
|
126 |
-
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
|
127 |
-
if osp.exists(external_json_path):
|
128 |
-
external_urls = load_file(external_json_path)
|
129 |
-
assert isinstance(external_urls, dict)
|
130 |
-
default_urls.update(external_urls)
|
131 |
-
|
132 |
-
return default_urls
|
133 |
-
|
134 |
-
|
135 |
-
def get_mmcls_models():
|
136 |
-
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
|
137 |
-
mmcls_urls = load_file(mmcls_json_path)
|
138 |
-
|
139 |
-
return mmcls_urls
|
140 |
-
|
141 |
-
|
142 |
-
def get_deprecated_model_names():
|
143 |
-
deprecate_json_path = osp.join(mmcv.__path__[0],
|
144 |
-
'model_zoo/deprecated.json')
|
145 |
-
deprecate_urls = load_file(deprecate_json_path)
|
146 |
-
assert isinstance(deprecate_urls, dict)
|
147 |
-
|
148 |
-
return deprecate_urls
|
149 |
-
|
150 |
-
|
151 |
-
def _process_mmcls_checkpoint(checkpoint):
|
152 |
-
state_dict = checkpoint['state_dict']
|
153 |
-
new_state_dict = OrderedDict()
|
154 |
-
for k, v in state_dict.items():
|
155 |
-
if k.startswith('backbone.'):
|
156 |
-
new_state_dict[k[9:]] = v
|
157 |
-
new_checkpoint = dict(state_dict=new_state_dict)
|
158 |
-
|
159 |
-
return new_checkpoint
|
160 |
-
|
161 |
-
|
162 |
-
class CheckpointLoader:
|
163 |
-
"""A general checkpoint loader to manage all schemes."""
|
164 |
-
|
165 |
-
_schemes = {}
|
166 |
-
|
167 |
-
@classmethod
|
168 |
-
def _register_scheme(cls, prefixes, loader, force=False):
|
169 |
-
if isinstance(prefixes, str):
|
170 |
-
prefixes = [prefixes]
|
171 |
-
else:
|
172 |
-
assert isinstance(prefixes, (list, tuple))
|
173 |
-
for prefix in prefixes:
|
174 |
-
if (prefix not in cls._schemes) or force:
|
175 |
-
cls._schemes[prefix] = loader
|
176 |
-
else:
|
177 |
-
raise KeyError(
|
178 |
-
f'{prefix} is already registered as a loader backend, '
|
179 |
-
'add "force=True" if you want to override it')
|
180 |
-
# sort, longer prefixes take priority
|
181 |
-
cls._schemes = OrderedDict(
|
182 |
-
sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
|
183 |
-
|
184 |
-
@classmethod
|
185 |
-
def register_scheme(cls, prefixes, loader=None, force=False):
|
186 |
-
"""Register a loader to CheckpointLoader.
|
187 |
-
|
188 |
-
This method can be used as a normal class method or a decorator.
|
189 |
-
|
190 |
-
Args:
|
191 |
-
prefixes (str or list[str] or tuple[str]):
|
192 |
-
The prefix of the registered loader.
|
193 |
-
loader (function, optional): The loader function to be registered.
|
194 |
-
When this method is used as a decorator, loader is None.
|
195 |
-
Defaults to None.
|
196 |
-
force (bool, optional): Whether to override the loader
|
197 |
-
if the prefix has already been registered. Defaults to False.
|
198 |
-
"""
|
199 |
-
|
200 |
-
if loader is not None:
|
201 |
-
cls._register_scheme(prefixes, loader, force=force)
|
202 |
-
return
|
203 |
-
|
204 |
-
def _register(loader_cls):
|
205 |
-
cls._register_scheme(prefixes, loader_cls, force=force)
|
206 |
-
return loader_cls
|
207 |
-
|
208 |
-
return _register
|
209 |
-
|
210 |
-
@classmethod
|
211 |
-
def _get_checkpoint_loader(cls, path):
|
212 |
-
"""Finds a loader that supports the given path. Falls back to the local
|
213 |
-
loader if no other loader is found.
|
214 |
-
|
215 |
-
Args:
|
216 |
-
path (str): checkpoint path
|
217 |
-
|
218 |
-
Returns:
|
219 |
-
loader (function): checkpoint loader
|
220 |
-
"""
|
221 |
-
|
222 |
-
for p in cls._schemes:
|
223 |
-
if path.startswith(p):
|
224 |
-
return cls._schemes[p]
|
225 |
-
|
226 |
-
@classmethod
|
227 |
-
def load_checkpoint(cls, filename, map_location=None, logger=None):
|
228 |
-
"""load checkpoint through URL scheme path.
|
229 |
-
|
230 |
-
Args:
|
231 |
-
filename (str): checkpoint file name with given prefix
|
232 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
233 |
-
Default: None
|
234 |
-
logger (:mod:`logging.Logger`, optional): The logger for message.
|
235 |
-
Default: None
|
236 |
-
|
237 |
-
Returns:
|
238 |
-
dict or OrderedDict: The loaded checkpoint.
|
239 |
-
"""
|
240 |
-
|
241 |
-
checkpoint_loader = cls._get_checkpoint_loader(filename)
|
242 |
-
class_name = checkpoint_loader.__name__
|
243 |
-
mmcv.print_log(
|
244 |
-
f'load checkpoint from {class_name[10:]} path: {filename}', logger)
|
245 |
-
return checkpoint_loader(filename, map_location)
|
246 |
-
|
247 |
-
|
248 |
-
@CheckpointLoader.register_scheme(prefixes='')
|
249 |
-
def load_from_local(filename, map_location):
|
250 |
-
"""load checkpoint by local file path.
|
251 |
-
|
252 |
-
Args:
|
253 |
-
filename (str): local checkpoint file path
|
254 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
255 |
-
|
256 |
-
Returns:
|
257 |
-
dict or OrderedDict: The loaded checkpoint.
|
258 |
-
"""
|
259 |
-
|
260 |
-
if not osp.isfile(filename):
|
261 |
-
raise IOError(f'{filename} is not a checkpoint file')
|
262 |
-
checkpoint = torch.load(filename, map_location=map_location)
|
263 |
-
return checkpoint
|
264 |
-
|
265 |
-
|
266 |
-
@CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
|
267 |
-
def load_from_http(filename, map_location=None, model_dir=None):
|
268 |
-
"""load checkpoint through HTTP or HTTPS scheme path. In distributed
|
269 |
-
setting, this function only download checkpoint at local rank 0.
|
270 |
-
|
271 |
-
Args:
|
272 |
-
filename (str): checkpoint file path with modelzoo or
|
273 |
-
torchvision prefix
|
274 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
275 |
-
model_dir (string, optional): directory in which to save the object,
|
276 |
-
Default: None
|
277 |
-
|
278 |
-
Returns:
|
279 |
-
dict or OrderedDict: The loaded checkpoint.
|
280 |
-
"""
|
281 |
-
rank, world_size = get_dist_info()
|
282 |
-
rank = int(os.environ.get('LOCAL_RANK', rank))
|
283 |
-
if rank == 0:
|
284 |
-
checkpoint = model_zoo.load_url(
|
285 |
-
filename, model_dir=model_dir, map_location=map_location)
|
286 |
-
if world_size > 1:
|
287 |
-
torch.distributed.barrier()
|
288 |
-
if rank > 0:
|
289 |
-
checkpoint = model_zoo.load_url(
|
290 |
-
filename, model_dir=model_dir, map_location=map_location)
|
291 |
-
return checkpoint
|
292 |
-
|
293 |
-
|
294 |
-
@CheckpointLoader.register_scheme(prefixes='pavi://')
|
295 |
-
def load_from_pavi(filename, map_location=None):
|
296 |
-
"""load checkpoint through the file path prefixed with pavi. In distributed
|
297 |
-
setting, this function download ckpt at all ranks to different temporary
|
298 |
-
directories.
|
299 |
-
|
300 |
-
Args:
|
301 |
-
filename (str): checkpoint file path with pavi prefix
|
302 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
303 |
-
Default: None
|
304 |
-
|
305 |
-
Returns:
|
306 |
-
dict or OrderedDict: The loaded checkpoint.
|
307 |
-
"""
|
308 |
-
assert filename.startswith('pavi://'), \
|
309 |
-
f'Expected filename startswith `pavi://`, but get {filename}'
|
310 |
-
model_path = filename[7:]
|
311 |
-
|
312 |
-
try:
|
313 |
-
from pavi import modelcloud
|
314 |
-
except ImportError:
|
315 |
-
raise ImportError(
|
316 |
-
'Please install pavi to load checkpoint from modelcloud.')
|
317 |
-
|
318 |
-
model = modelcloud.get(model_path)
|
319 |
-
with TemporaryDirectory() as tmp_dir:
|
320 |
-
downloaded_file = osp.join(tmp_dir, model.name)
|
321 |
-
model.download(downloaded_file)
|
322 |
-
checkpoint = torch.load(downloaded_file, map_location=map_location)
|
323 |
-
return checkpoint
|
324 |
-
|
325 |
-
|
326 |
-
@CheckpointLoader.register_scheme(prefixes='s3://')
|
327 |
-
def load_from_ceph(filename, map_location=None, backend='petrel'):
|
328 |
-
"""load checkpoint through the file path prefixed with s3. In distributed
|
329 |
-
setting, this function download ckpt at all ranks to different temporary
|
330 |
-
directories.
|
331 |
-
|
332 |
-
Args:
|
333 |
-
filename (str): checkpoint file path with s3 prefix
|
334 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
335 |
-
backend (str, optional): The storage backend type. Options are 'ceph',
|
336 |
-
'petrel'. Default: 'petrel'.
|
337 |
-
|
338 |
-
.. warning::
|
339 |
-
:class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
|
340 |
-
please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
|
341 |
-
|
342 |
-
Returns:
|
343 |
-
dict or OrderedDict: The loaded checkpoint.
|
344 |
-
"""
|
345 |
-
allowed_backends = ['ceph', 'petrel']
|
346 |
-
if backend not in allowed_backends:
|
347 |
-
raise ValueError(f'Load from Backend {backend} is not supported.')
|
348 |
-
|
349 |
-
if backend == 'ceph':
|
350 |
-
warnings.warn(
|
351 |
-
'CephBackend will be deprecated, please use PetrelBackend instead')
|
352 |
-
|
353 |
-
# CephClient and PetrelBackend have the same prefix 's3://' and the latter
|
354 |
-
# will be chosen as default. If PetrelBackend can not be instantiated
|
355 |
-
# successfully, the CephClient will be chosen.
|
356 |
-
try:
|
357 |
-
file_client = FileClient(backend=backend)
|
358 |
-
except ImportError:
|
359 |
-
allowed_backends.remove(backend)
|
360 |
-
file_client = FileClient(backend=allowed_backends[0])
|
361 |
-
|
362 |
-
with io.BytesIO(file_client.get(filename)) as buffer:
|
363 |
-
checkpoint = torch.load(buffer, map_location=map_location)
|
364 |
-
return checkpoint
|
365 |
-
|
366 |
-
|
367 |
-
@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
|
368 |
-
def load_from_torchvision(filename, map_location=None):
|
369 |
-
"""load checkpoint through the file path prefixed with modelzoo or
|
370 |
-
torchvision.
|
371 |
-
|
372 |
-
Args:
|
373 |
-
filename (str): checkpoint file path with modelzoo or
|
374 |
-
torchvision prefix
|
375 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
376 |
-
|
377 |
-
Returns:
|
378 |
-
dict or OrderedDict: The loaded checkpoint.
|
379 |
-
"""
|
380 |
-
model_urls = get_torchvision_models()
|
381 |
-
if filename.startswith('modelzoo://'):
|
382 |
-
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
|
383 |
-
'use "torchvision://" instead')
|
384 |
-
model_name = filename[11:]
|
385 |
-
else:
|
386 |
-
model_name = filename[14:]
|
387 |
-
return load_from_http(model_urls[model_name], map_location=map_location)
|
388 |
-
|
389 |
-
|
390 |
-
@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
|
391 |
-
def load_from_openmmlab(filename, map_location=None):
|
392 |
-
"""load checkpoint through the file path prefixed with open-mmlab or
|
393 |
-
openmmlab.
|
394 |
-
|
395 |
-
Args:
|
396 |
-
filename (str): checkpoint file path with open-mmlab or
|
397 |
-
openmmlab prefix
|
398 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
399 |
-
Default: None
|
400 |
-
|
401 |
-
Returns:
|
402 |
-
dict or OrderedDict: The loaded checkpoint.
|
403 |
-
"""
|
404 |
-
|
405 |
-
model_urls = get_external_models()
|
406 |
-
prefix_str = 'open-mmlab://'
|
407 |
-
if filename.startswith(prefix_str):
|
408 |
-
model_name = filename[13:]
|
409 |
-
else:
|
410 |
-
model_name = filename[12:]
|
411 |
-
prefix_str = 'openmmlab://'
|
412 |
-
|
413 |
-
deprecated_urls = get_deprecated_model_names()
|
414 |
-
if model_name in deprecated_urls:
|
415 |
-
warnings.warn(f'{prefix_str}{model_name} is deprecated in favor '
|
416 |
-
f'of {prefix_str}{deprecated_urls[model_name]}')
|
417 |
-
model_name = deprecated_urls[model_name]
|
418 |
-
model_url = model_urls[model_name]
|
419 |
-
# check if is url
|
420 |
-
if model_url.startswith(('http://', 'https://')):
|
421 |
-
checkpoint = load_from_http(model_url, map_location=map_location)
|
422 |
-
else:
|
423 |
-
filename = osp.join(_get_mmcv_home(), model_url)
|
424 |
-
if not osp.isfile(filename):
|
425 |
-
raise IOError(f'{filename} is not a checkpoint file')
|
426 |
-
checkpoint = torch.load(filename, map_location=map_location)
|
427 |
-
return checkpoint
|
428 |
-
|
429 |
-
|
430 |
-
@CheckpointLoader.register_scheme(prefixes='mmcls://')
|
431 |
-
def load_from_mmcls(filename, map_location=None):
|
432 |
-
"""load checkpoint through the file path prefixed with mmcls.
|
433 |
-
|
434 |
-
Args:
|
435 |
-
filename (str): checkpoint file path with mmcls prefix
|
436 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
437 |
-
|
438 |
-
Returns:
|
439 |
-
dict or OrderedDict: The loaded checkpoint.
|
440 |
-
"""
|
441 |
-
|
442 |
-
model_urls = get_mmcls_models()
|
443 |
-
model_name = filename[8:]
|
444 |
-
checkpoint = load_from_http(
|
445 |
-
model_urls[model_name], map_location=map_location)
|
446 |
-
checkpoint = _process_mmcls_checkpoint(checkpoint)
|
447 |
-
return checkpoint
|
448 |
-
|
449 |
-
|
450 |
-
def _load_checkpoint(filename, map_location=None, logger=None):
|
451 |
-
"""Load checkpoint from somewhere (modelzoo, file, url).
|
452 |
-
|
453 |
-
Args:
|
454 |
-
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
|
455 |
-
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
|
456 |
-
details.
|
457 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
458 |
-
Default: None.
|
459 |
-
logger (:mod:`logging.Logger`, optional): The logger for error message.
|
460 |
-
Default: None
|
461 |
-
|
462 |
-
Returns:
|
463 |
-
dict or OrderedDict: The loaded checkpoint. It can be either an
|
464 |
-
OrderedDict storing model weights or a dict containing other
|
465 |
-
information, which depends on the checkpoint.
|
466 |
-
"""
|
467 |
-
return CheckpointLoader.load_checkpoint(filename, map_location, logger)
|
468 |
-
|
469 |
-
|
470 |
-
def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
|
471 |
-
"""Load partial pretrained model with specific prefix.
|
472 |
-
|
473 |
-
Args:
|
474 |
-
prefix (str): The prefix of sub-module.
|
475 |
-
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
|
476 |
-
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
|
477 |
-
details.
|
478 |
-
map_location (str | None): Same as :func:`torch.load`. Default: None.
|
479 |
-
|
480 |
-
Returns:
|
481 |
-
dict or OrderedDict: The loaded checkpoint.
|
482 |
-
"""
|
483 |
-
|
484 |
-
checkpoint = _load_checkpoint(filename, map_location=map_location)
|
485 |
-
|
486 |
-
if 'state_dict' in checkpoint:
|
487 |
-
state_dict = checkpoint['state_dict']
|
488 |
-
else:
|
489 |
-
state_dict = checkpoint
|
490 |
-
if not prefix.endswith('.'):
|
491 |
-
prefix += '.'
|
492 |
-
prefix_len = len(prefix)
|
493 |
-
|
494 |
-
state_dict = {
|
495 |
-
k[prefix_len:]: v
|
496 |
-
for k, v in state_dict.items() if k.startswith(prefix)
|
497 |
-
}
|
498 |
-
|
499 |
-
assert state_dict, f'{prefix} is not in the pretrained model'
|
500 |
-
return state_dict
|
501 |
-
|
502 |
-
|
503 |
-
def load_checkpoint(model,
|
504 |
-
filename,
|
505 |
-
map_location=None,
|
506 |
-
strict=False,
|
507 |
-
logger=None,
|
508 |
-
revise_keys=[(r'^module\.', '')]):
|
509 |
-
"""Load checkpoint from a file or URI.
|
510 |
-
|
511 |
-
Args:
|
512 |
-
model (Module): Module to load checkpoint.
|
513 |
-
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
|
514 |
-
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
|
515 |
-
details.
|
516 |
-
map_location (str): Same as :func:`torch.load`.
|
517 |
-
strict (bool): Whether to allow different params for the model and
|
518 |
-
checkpoint.
|
519 |
-
logger (:mod:`logging.Logger` or None): The logger for error message.
|
520 |
-
revise_keys (list): A list of customized keywords to modify the
|
521 |
-
state_dict in checkpoint. Each item is a (pattern, replacement)
|
522 |
-
pair of the regular expression operations. Default: strip
|
523 |
-
the prefix 'module.' by [(r'^module\\.', '')].
|
524 |
-
|
525 |
-
Returns:
|
526 |
-
dict or OrderedDict: The loaded checkpoint.
|
527 |
-
"""
|
528 |
-
checkpoint = _load_checkpoint(filename, map_location, logger)
|
529 |
-
# OrderedDict is a subclass of dict
|
530 |
-
if not isinstance(checkpoint, dict):
|
531 |
-
raise RuntimeError(
|
532 |
-
f'No state_dict found in checkpoint file {filename}')
|
533 |
-
# get state_dict from checkpoint
|
534 |
-
if 'state_dict' in checkpoint:
|
535 |
-
state_dict = checkpoint['state_dict']
|
536 |
-
else:
|
537 |
-
state_dict = checkpoint
|
538 |
-
|
539 |
-
# strip prefix of state_dict
|
540 |
-
metadata = getattr(state_dict, '_metadata', OrderedDict())
|
541 |
-
for p, r in revise_keys:
|
542 |
-
state_dict = OrderedDict(
|
543 |
-
{re.sub(p, r, k): v
|
544 |
-
for k, v in state_dict.items()})
|
545 |
-
# Keep metadata in state_dict
|
546 |
-
state_dict._metadata = metadata
|
547 |
-
|
548 |
-
# load state_dict
|
549 |
-
load_state_dict(model, state_dict, strict, logger)
|
550 |
-
return checkpoint
|
551 |
-
|
552 |
-
|
553 |
-
def weights_to_cpu(state_dict):
|
554 |
-
"""Copy a model state_dict to cpu.
|
555 |
-
|
556 |
-
Args:
|
557 |
-
state_dict (OrderedDict): Model weights on GPU.
|
558 |
-
|
559 |
-
Returns:
|
560 |
-
OrderedDict: Model weights on GPU.
|
561 |
-
"""
|
562 |
-
state_dict_cpu = OrderedDict()
|
563 |
-
for key, val in state_dict.items():
|
564 |
-
state_dict_cpu[key] = val.cpu()
|
565 |
-
# Keep metadata in state_dict
|
566 |
-
state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
|
567 |
-
return state_dict_cpu
|
568 |
-
|
569 |
-
|
570 |
-
def _save_to_state_dict(module, destination, prefix, keep_vars):
|
571 |
-
"""Saves module state to `destination` dictionary.
|
572 |
-
|
573 |
-
This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
|
574 |
-
|
575 |
-
Args:
|
576 |
-
module (nn.Module): The module to generate state_dict.
|
577 |
-
destination (dict): A dict where state will be stored.
|
578 |
-
prefix (str): The prefix for parameters and buffers used in this
|
579 |
-
module.
|
580 |
-
"""
|
581 |
-
for name, param in module._parameters.items():
|
582 |
-
if param is not None:
|
583 |
-
destination[prefix + name] = param if keep_vars else param.detach()
|
584 |
-
for name, buf in module._buffers.items():
|
585 |
-
# remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
|
586 |
-
if buf is not None:
|
587 |
-
destination[prefix + name] = buf if keep_vars else buf.detach()
|
588 |
-
|
589 |
-
|
590 |
-
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
|
591 |
-
"""Returns a dictionary containing a whole state of the module.
|
592 |
-
|
593 |
-
Both parameters and persistent buffers (e.g. running averages) are
|
594 |
-
included. Keys are corresponding parameter and buffer names.
|
595 |
-
|
596 |
-
This method is modified from :meth:`torch.nn.Module.state_dict` to
|
597 |
-
recursively check parallel module in case that the model has a complicated
|
598 |
-
structure, e.g., nn.Module(nn.Module(DDP)).
|
599 |
-
|
600 |
-
Args:
|
601 |
-
module (nn.Module): The module to generate state_dict.
|
602 |
-
destination (OrderedDict): Returned dict for the state of the
|
603 |
-
module.
|
604 |
-
prefix (str): Prefix of the key.
|
605 |
-
keep_vars (bool): Whether to keep the variable property of the
|
606 |
-
parameters. Default: False.
|
607 |
-
|
608 |
-
Returns:
|
609 |
-
dict: A dictionary containing a whole state of the module.
|
610 |
-
"""
|
611 |
-
# recursively check parallel module in case that the model has a
|
612 |
-
# complicated structure, e.g., nn.Module(nn.Module(DDP))
|
613 |
-
if is_module_wrapper(module):
|
614 |
-
module = module.module
|
615 |
-
|
616 |
-
# below is the same as torch.nn.Module.state_dict()
|
617 |
-
if destination is None:
|
618 |
-
destination = OrderedDict()
|
619 |
-
destination._metadata = OrderedDict()
|
620 |
-
destination._metadata[prefix[:-1]] = local_metadata = dict(
|
621 |
-
version=module._version)
|
622 |
-
_save_to_state_dict(module, destination, prefix, keep_vars)
|
623 |
-
for name, child in module._modules.items():
|
624 |
-
if child is not None:
|
625 |
-
get_state_dict(
|
626 |
-
child, destination, prefix + name + '.', keep_vars=keep_vars)
|
627 |
-
for hook in module._state_dict_hooks.values():
|
628 |
-
hook_result = hook(module, destination, prefix, local_metadata)
|
629 |
-
if hook_result is not None:
|
630 |
-
destination = hook_result
|
631 |
-
return destination
|
632 |
-
|
633 |
-
|
634 |
-
def save_checkpoint(model,
|
635 |
-
filename,
|
636 |
-
optimizer=None,
|
637 |
-
meta=None,
|
638 |
-
file_client_args=None):
|
639 |
-
"""Save checkpoint to file.
|
640 |
-
|
641 |
-
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
|
642 |
-
``optimizer``. By default ``meta`` will contain version and time info.
|
643 |
-
|
644 |
-
Args:
|
645 |
-
model (Module): Module whose params are to be saved.
|
646 |
-
filename (str): Checkpoint filename.
|
647 |
-
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
|
648 |
-
meta (dict, optional): Metadata to be saved in checkpoint.
|
649 |
-
file_client_args (dict, optional): Arguments to instantiate a
|
650 |
-
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
651 |
-
Default: None.
|
652 |
-
`New in version 1.3.16.`
|
653 |
-
"""
|
654 |
-
if meta is None:
|
655 |
-
meta = {}
|
656 |
-
elif not isinstance(meta, dict):
|
657 |
-
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
|
658 |
-
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
|
659 |
-
|
660 |
-
if is_module_wrapper(model):
|
661 |
-
model = model.module
|
662 |
-
|
663 |
-
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
|
664 |
-
# save class name to the meta
|
665 |
-
meta.update(CLASSES=model.CLASSES)
|
666 |
-
|
667 |
-
checkpoint = {
|
668 |
-
'meta': meta,
|
669 |
-
'state_dict': weights_to_cpu(get_state_dict(model))
|
670 |
-
}
|
671 |
-
# save optimizer state dict in the checkpoint
|
672 |
-
if isinstance(optimizer, Optimizer):
|
673 |
-
checkpoint['optimizer'] = optimizer.state_dict()
|
674 |
-
elif isinstance(optimizer, dict):
|
675 |
-
checkpoint['optimizer'] = {}
|
676 |
-
for name, optim in optimizer.items():
|
677 |
-
checkpoint['optimizer'][name] = optim.state_dict()
|
678 |
-
|
679 |
-
if filename.startswith('pavi://'):
|
680 |
-
if file_client_args is not None:
|
681 |
-
raise ValueError(
|
682 |
-
'file_client_args should be "None" if filename starts with'
|
683 |
-
f'"pavi://", but got {file_client_args}')
|
684 |
-
try:
|
685 |
-
from pavi import modelcloud
|
686 |
-
from pavi import exception
|
687 |
-
except ImportError:
|
688 |
-
raise ImportError(
|
689 |
-
'Please install pavi to load checkpoint from modelcloud.')
|
690 |
-
model_path = filename[7:]
|
691 |
-
root = modelcloud.Folder()
|
692 |
-
model_dir, model_name = osp.split(model_path)
|
693 |
-
try:
|
694 |
-
model = modelcloud.get(model_dir)
|
695 |
-
except exception.NodeNotFoundError:
|
696 |
-
model = root.create_training_model(model_dir)
|
697 |
-
with TemporaryDirectory() as tmp_dir:
|
698 |
-
checkpoint_file = osp.join(tmp_dir, model_name)
|
699 |
-
with open(checkpoint_file, 'wb') as f:
|
700 |
-
torch.save(checkpoint, f)
|
701 |
-
f.flush()
|
702 |
-
model.create_file(checkpoint_file, name=model_name)
|
703 |
-
else:
|
704 |
-
file_client = FileClient.infer_client(file_client_args, filename)
|
705 |
-
with io.BytesIO() as f:
|
706 |
-
torch.save(checkpoint, f)
|
707 |
-
file_client.put(f.getvalue(), filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/F0Predictor.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
class F0Predictor(object):
|
2 |
-
def compute_f0(self, wav, p_len):
|
3 |
-
"""
|
4 |
-
input: wav:[signal_length]
|
5 |
-
p_len:int
|
6 |
-
output: f0:[signal_length//hop_length]
|
7 |
-
"""
|
8 |
-
pass
|
9 |
-
|
10 |
-
def compute_f0_uv(self, wav, p_len):
|
11 |
-
"""
|
12 |
-
input: wav:[signal_length]
|
13 |
-
p_len:int
|
14 |
-
output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
|
15 |
-
"""
|
16 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtGAN/Diffusion-API/diffusion_webui/diffusion_models/inpaint_app.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import torch
|
3 |
-
from diffusers import DiffusionPipeline
|
4 |
-
|
5 |
-
from diffusion_webui.utils.model_list import stable_inpiant_model_list
|
6 |
-
|
7 |
-
|
8 |
-
class StableDiffusionInpaintGenerator:
|
9 |
-
def __init__(self):
|
10 |
-
self.pipe = None
|
11 |
-
|
12 |
-
def load_model(self, stable_model_path):
|
13 |
-
if self.pipe is None or self.pipe.model_name != stable_model_path:
|
14 |
-
self.pipe = DiffusionPipeline.from_pretrained(
|
15 |
-
stable_model_path, revision="fp16", torch_dtype=torch.float16
|
16 |
-
)
|
17 |
-
self.pipe.to("cuda")
|
18 |
-
self.pipe.enable_xformers_memory_efficient_attention()
|
19 |
-
self.pipe.model_name = stable_model_path
|
20 |
-
|
21 |
-
|
22 |
-
return self.pipe
|
23 |
-
|
24 |
-
def generate_image(
|
25 |
-
self,
|
26 |
-
pil_image: str,
|
27 |
-
stable_model_path: str,
|
28 |
-
prompt: str,
|
29 |
-
negative_prompt: str,
|
30 |
-
num_images_per_prompt: int,
|
31 |
-
guidance_scale: int,
|
32 |
-
num_inference_step: int,
|
33 |
-
seed_generator=0,
|
34 |
-
):
|
35 |
-
image = pil_image["image"].convert("RGB").resize((512, 512))
|
36 |
-
mask_image = pil_image["mask"].convert("RGB").resize((512, 512))
|
37 |
-
pipe = self.load_model(stable_model_path)
|
38 |
-
|
39 |
-
if seed_generator == 0:
|
40 |
-
random_seed = torch.randint(0, 1000000, (1,))
|
41 |
-
generator = torch.manual_seed(random_seed)
|
42 |
-
else:
|
43 |
-
generator = torch.manual_seed(seed_generator)
|
44 |
-
|
45 |
-
output = pipe(
|
46 |
-
prompt=prompt,
|
47 |
-
image=image,
|
48 |
-
mask_image=mask_image,
|
49 |
-
negative_prompt=negative_prompt,
|
50 |
-
num_images_per_prompt=num_images_per_prompt,
|
51 |
-
num_inference_steps=num_inference_step,
|
52 |
-
guidance_scale=guidance_scale,
|
53 |
-
generator=generator,
|
54 |
-
).images
|
55 |
-
|
56 |
-
return output
|
57 |
-
|
58 |
-
def app():
|
59 |
-
with gr.Blocks():
|
60 |
-
with gr.Row():
|
61 |
-
with gr.Column():
|
62 |
-
stable_diffusion_inpaint_image_file = gr.Image(
|
63 |
-
source="upload",
|
64 |
-
tool="sketch",
|
65 |
-
elem_id="image_upload",
|
66 |
-
type="pil",
|
67 |
-
label="Upload",
|
68 |
-
).style(height=260)
|
69 |
-
|
70 |
-
stable_diffusion_inpaint_prompt = gr.Textbox(
|
71 |
-
lines=1,
|
72 |
-
placeholder="Prompt",
|
73 |
-
show_label=False,
|
74 |
-
)
|
75 |
-
|
76 |
-
stable_diffusion_inpaint_negative_prompt = gr.Textbox(
|
77 |
-
lines=1,
|
78 |
-
placeholder="Negative Prompt",
|
79 |
-
show_label=False,
|
80 |
-
)
|
81 |
-
stable_diffusion_inpaint_model_id = gr.Dropdown(
|
82 |
-
choices=stable_inpiant_model_list,
|
83 |
-
value=stable_inpiant_model_list[0],
|
84 |
-
label="Inpaint Model Id",
|
85 |
-
)
|
86 |
-
with gr.Row():
|
87 |
-
with gr.Column():
|
88 |
-
stable_diffusion_inpaint_guidance_scale = gr.Slider(
|
89 |
-
minimum=0.1,
|
90 |
-
maximum=15,
|
91 |
-
step=0.1,
|
92 |
-
value=7.5,
|
93 |
-
label="Guidance Scale",
|
94 |
-
)
|
95 |
-
|
96 |
-
stable_diffusion_inpaint_num_inference_step = (
|
97 |
-
gr.Slider(
|
98 |
-
minimum=1,
|
99 |
-
maximum=100,
|
100 |
-
step=1,
|
101 |
-
value=50,
|
102 |
-
label="Num Inference Step",
|
103 |
-
)
|
104 |
-
)
|
105 |
-
|
106 |
-
with gr.Row():
|
107 |
-
with gr.Column():
|
108 |
-
stable_diffusion_inpiant_num_images_per_prompt = gr.Slider(
|
109 |
-
minimum=1,
|
110 |
-
maximum=4,
|
111 |
-
step=1,
|
112 |
-
value=1,
|
113 |
-
label="Number Of Images",
|
114 |
-
)
|
115 |
-
stable_diffusion_inpaint_seed_generator = (
|
116 |
-
gr.Slider(
|
117 |
-
minimum=0,
|
118 |
-
maximum=1000000,
|
119 |
-
step=1,
|
120 |
-
value=0,
|
121 |
-
label="Seed(0 for random)",
|
122 |
-
)
|
123 |
-
)
|
124 |
-
|
125 |
-
stable_diffusion_inpaint_predict = gr.Button(
|
126 |
-
value="Generator"
|
127 |
-
)
|
128 |
-
|
129 |
-
with gr.Column():
|
130 |
-
output_image = gr.Gallery(
|
131 |
-
label="Generated images",
|
132 |
-
show_label=False,
|
133 |
-
elem_id="gallery",
|
134 |
-
).style(grid=(1, 2))
|
135 |
-
|
136 |
-
stable_diffusion_inpaint_predict.click(
|
137 |
-
fn=StableDiffusionInpaintGenerator().generate_image,
|
138 |
-
inputs=[
|
139 |
-
stable_diffusion_inpaint_image_file,
|
140 |
-
stable_diffusion_inpaint_model_id,
|
141 |
-
stable_diffusion_inpaint_prompt,
|
142 |
-
stable_diffusion_inpaint_negative_prompt,
|
143 |
-
stable_diffusion_inpiant_num_images_per_prompt,
|
144 |
-
stable_diffusion_inpaint_guidance_scale,
|
145 |
-
stable_diffusion_inpaint_num_inference_step,
|
146 |
-
stable_diffusion_inpaint_seed_generator,
|
147 |
-
],
|
148 |
-
outputs=[output_image],
|
149 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/typing_extensions.py
DELETED
@@ -1,2312 +0,0 @@
|
|
1 |
-
import abc
|
2 |
-
import collections
|
3 |
-
import collections.abc
|
4 |
-
import functools
|
5 |
-
import inspect
|
6 |
-
import operator
|
7 |
-
import sys
|
8 |
-
import types as _types
|
9 |
-
import typing
|
10 |
-
import warnings
|
11 |
-
|
12 |
-
|
13 |
-
__all__ = [
|
14 |
-
# Super-special typing primitives.
|
15 |
-
'Any',
|
16 |
-
'ClassVar',
|
17 |
-
'Concatenate',
|
18 |
-
'Final',
|
19 |
-
'LiteralString',
|
20 |
-
'ParamSpec',
|
21 |
-
'ParamSpecArgs',
|
22 |
-
'ParamSpecKwargs',
|
23 |
-
'Self',
|
24 |
-
'Type',
|
25 |
-
'TypeVar',
|
26 |
-
'TypeVarTuple',
|
27 |
-
'Unpack',
|
28 |
-
|
29 |
-
# ABCs (from collections.abc).
|
30 |
-
'Awaitable',
|
31 |
-
'AsyncIterator',
|
32 |
-
'AsyncIterable',
|
33 |
-
'Coroutine',
|
34 |
-
'AsyncGenerator',
|
35 |
-
'AsyncContextManager',
|
36 |
-
'ChainMap',
|
37 |
-
|
38 |
-
# Concrete collection types.
|
39 |
-
'ContextManager',
|
40 |
-
'Counter',
|
41 |
-
'Deque',
|
42 |
-
'DefaultDict',
|
43 |
-
'NamedTuple',
|
44 |
-
'OrderedDict',
|
45 |
-
'TypedDict',
|
46 |
-
|
47 |
-
# Structural checks, a.k.a. protocols.
|
48 |
-
'SupportsIndex',
|
49 |
-
|
50 |
-
# One-off things.
|
51 |
-
'Annotated',
|
52 |
-
'assert_never',
|
53 |
-
'assert_type',
|
54 |
-
'clear_overloads',
|
55 |
-
'dataclass_transform',
|
56 |
-
'deprecated',
|
57 |
-
'get_overloads',
|
58 |
-
'final',
|
59 |
-
'get_args',
|
60 |
-
'get_origin',
|
61 |
-
'get_type_hints',
|
62 |
-
'IntVar',
|
63 |
-
'is_typeddict',
|
64 |
-
'Literal',
|
65 |
-
'NewType',
|
66 |
-
'overload',
|
67 |
-
'override',
|
68 |
-
'Protocol',
|
69 |
-
'reveal_type',
|
70 |
-
'runtime',
|
71 |
-
'runtime_checkable',
|
72 |
-
'Text',
|
73 |
-
'TypeAlias',
|
74 |
-
'TypeGuard',
|
75 |
-
'TYPE_CHECKING',
|
76 |
-
'Never',
|
77 |
-
'NoReturn',
|
78 |
-
'Required',
|
79 |
-
'NotRequired',
|
80 |
-
]
|
81 |
-
|
82 |
-
# for backward compatibility
|
83 |
-
PEP_560 = True
|
84 |
-
GenericMeta = type
|
85 |
-
|
86 |
-
# The functions below are modified copies of typing internal helpers.
|
87 |
-
# They are needed by _ProtocolMeta and they provide support for PEP 646.
|
88 |
-
|
89 |
-
_marker = object()
|
90 |
-
|
91 |
-
|
92 |
-
def _check_generic(cls, parameters, elen=_marker):
|
93 |
-
"""Check correct count for parameters of a generic cls (internal helper).
|
94 |
-
This gives a nice error message in case of count mismatch.
|
95 |
-
"""
|
96 |
-
if not elen:
|
97 |
-
raise TypeError(f"{cls} is not a generic class")
|
98 |
-
if elen is _marker:
|
99 |
-
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
|
100 |
-
raise TypeError(f"{cls} is not a generic class")
|
101 |
-
elen = len(cls.__parameters__)
|
102 |
-
alen = len(parameters)
|
103 |
-
if alen != elen:
|
104 |
-
if hasattr(cls, "__parameters__"):
|
105 |
-
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
|
106 |
-
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
|
107 |
-
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
|
108 |
-
return
|
109 |
-
raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
|
110 |
-
f" actual {alen}, expected {elen}")
|
111 |
-
|
112 |
-
|
113 |
-
if sys.version_info >= (3, 10):
|
114 |
-
def _should_collect_from_parameters(t):
|
115 |
-
return isinstance(
|
116 |
-
t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
|
117 |
-
)
|
118 |
-
elif sys.version_info >= (3, 9):
|
119 |
-
def _should_collect_from_parameters(t):
|
120 |
-
return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
|
121 |
-
else:
|
122 |
-
def _should_collect_from_parameters(t):
|
123 |
-
return isinstance(t, typing._GenericAlias) and not t._special
|
124 |
-
|
125 |
-
|
126 |
-
def _collect_type_vars(types, typevar_types=None):
|
127 |
-
"""Collect all type variable contained in types in order of
|
128 |
-
first appearance (lexicographic order). For example::
|
129 |
-
|
130 |
-
_collect_type_vars((T, List[S, T])) == (T, S)
|
131 |
-
"""
|
132 |
-
if typevar_types is None:
|
133 |
-
typevar_types = typing.TypeVar
|
134 |
-
tvars = []
|
135 |
-
for t in types:
|
136 |
-
if (
|
137 |
-
isinstance(t, typevar_types) and
|
138 |
-
t not in tvars and
|
139 |
-
not _is_unpack(t)
|
140 |
-
):
|
141 |
-
tvars.append(t)
|
142 |
-
if _should_collect_from_parameters(t):
|
143 |
-
tvars.extend([t for t in t.__parameters__ if t not in tvars])
|
144 |
-
return tuple(tvars)
|
145 |
-
|
146 |
-
|
147 |
-
NoReturn = typing.NoReturn
|
148 |
-
|
149 |
-
# Some unconstrained type variables. These are used by the container types.
|
150 |
-
# (These are not for export.)
|
151 |
-
T = typing.TypeVar('T') # Any type.
|
152 |
-
KT = typing.TypeVar('KT') # Key type.
|
153 |
-
VT = typing.TypeVar('VT') # Value type.
|
154 |
-
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
|
155 |
-
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
|
156 |
-
|
157 |
-
|
158 |
-
if sys.version_info >= (3, 11):
|
159 |
-
from typing import Any
|
160 |
-
else:
|
161 |
-
|
162 |
-
class _AnyMeta(type):
|
163 |
-
def __instancecheck__(self, obj):
|
164 |
-
if self is Any:
|
165 |
-
raise TypeError("typing_extensions.Any cannot be used with isinstance()")
|
166 |
-
return super().__instancecheck__(obj)
|
167 |
-
|
168 |
-
def __repr__(self):
|
169 |
-
if self is Any:
|
170 |
-
return "typing_extensions.Any"
|
171 |
-
return super().__repr__()
|
172 |
-
|
173 |
-
class Any(metaclass=_AnyMeta):
|
174 |
-
"""Special type indicating an unconstrained type.
|
175 |
-
- Any is compatible with every type.
|
176 |
-
- Any assumed to have all methods.
|
177 |
-
- All values assumed to be instances of Any.
|
178 |
-
Note that all the above statements are true from the point of view of
|
179 |
-
static type checkers. At runtime, Any should not be used with instance
|
180 |
-
checks.
|
181 |
-
"""
|
182 |
-
def __new__(cls, *args, **kwargs):
|
183 |
-
if cls is Any:
|
184 |
-
raise TypeError("Any cannot be instantiated")
|
185 |
-
return super().__new__(cls, *args, **kwargs)
|
186 |
-
|
187 |
-
|
188 |
-
ClassVar = typing.ClassVar
|
189 |
-
|
190 |
-
# On older versions of typing there is an internal class named "Final".
|
191 |
-
# 3.8+
|
192 |
-
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
|
193 |
-
Final = typing.Final
|
194 |
-
# 3.7
|
195 |
-
else:
|
196 |
-
class _FinalForm(typing._SpecialForm, _root=True):
|
197 |
-
|
198 |
-
def __repr__(self):
|
199 |
-
return 'typing_extensions.' + self._name
|
200 |
-
|
201 |
-
def __getitem__(self, parameters):
|
202 |
-
item = typing._type_check(parameters,
|
203 |
-
f'{self._name} accepts only a single type.')
|
204 |
-
return typing._GenericAlias(self, (item,))
|
205 |
-
|
206 |
-
Final = _FinalForm('Final',
|
207 |
-
doc="""A special typing construct to indicate that a name
|
208 |
-
cannot be re-assigned or overridden in a subclass.
|
209 |
-
For example:
|
210 |
-
|
211 |
-
MAX_SIZE: Final = 9000
|
212 |
-
MAX_SIZE += 1 # Error reported by type checker
|
213 |
-
|
214 |
-
class Connection:
|
215 |
-
TIMEOUT: Final[int] = 10
|
216 |
-
class FastConnector(Connection):
|
217 |
-
TIMEOUT = 1 # Error reported by type checker
|
218 |
-
|
219 |
-
There is no runtime checking of these properties.""")
|
220 |
-
|
221 |
-
if sys.version_info >= (3, 11):
|
222 |
-
final = typing.final
|
223 |
-
else:
|
224 |
-
# @final exists in 3.8+, but we backport it for all versions
|
225 |
-
# before 3.11 to keep support for the __final__ attribute.
|
226 |
-
# See https://bugs.python.org/issue46342
|
227 |
-
def final(f):
|
228 |
-
"""This decorator can be used to indicate to type checkers that
|
229 |
-
the decorated method cannot be overridden, and decorated class
|
230 |
-
cannot be subclassed. For example:
|
231 |
-
|
232 |
-
class Base:
|
233 |
-
@final
|
234 |
-
def done(self) -> None:
|
235 |
-
...
|
236 |
-
class Sub(Base):
|
237 |
-
def done(self) -> None: # Error reported by type checker
|
238 |
-
...
|
239 |
-
@final
|
240 |
-
class Leaf:
|
241 |
-
...
|
242 |
-
class Other(Leaf): # Error reported by type checker
|
243 |
-
...
|
244 |
-
|
245 |
-
There is no runtime checking of these properties. The decorator
|
246 |
-
sets the ``__final__`` attribute to ``True`` on the decorated object
|
247 |
-
to allow runtime introspection.
|
248 |
-
"""
|
249 |
-
try:
|
250 |
-
f.__final__ = True
|
251 |
-
except (AttributeError, TypeError):
|
252 |
-
# Skip the attribute silently if it is not writable.
|
253 |
-
# AttributeError happens if the object has __slots__ or a
|
254 |
-
# read-only property, TypeError if it's a builtin class.
|
255 |
-
pass
|
256 |
-
return f
|
257 |
-
|
258 |
-
|
259 |
-
def IntVar(name):
|
260 |
-
return typing.TypeVar(name)
|
261 |
-
|
262 |
-
|
263 |
-
# 3.8+:
|
264 |
-
if hasattr(typing, 'Literal'):
|
265 |
-
Literal = typing.Literal
|
266 |
-
# 3.7:
|
267 |
-
else:
|
268 |
-
class _LiteralForm(typing._SpecialForm, _root=True):
|
269 |
-
|
270 |
-
def __repr__(self):
|
271 |
-
return 'typing_extensions.' + self._name
|
272 |
-
|
273 |
-
def __getitem__(self, parameters):
|
274 |
-
return typing._GenericAlias(self, parameters)
|
275 |
-
|
276 |
-
Literal = _LiteralForm('Literal',
|
277 |
-
doc="""A type that can be used to indicate to type checkers
|
278 |
-
that the corresponding value has a value literally equivalent
|
279 |
-
to the provided parameter. For example:
|
280 |
-
|
281 |
-
var: Literal[4] = 4
|
282 |
-
|
283 |
-
The type checker understands that 'var' is literally equal to
|
284 |
-
the value 4 and no other value.
|
285 |
-
|
286 |
-
Literal[...] cannot be subclassed. There is no runtime
|
287 |
-
checking verifying that the parameter is actually a value
|
288 |
-
instead of a type.""")
|
289 |
-
|
290 |
-
|
291 |
-
_overload_dummy = typing._overload_dummy # noqa
|
292 |
-
|
293 |
-
|
294 |
-
if hasattr(typing, "get_overloads"): # 3.11+
|
295 |
-
overload = typing.overload
|
296 |
-
get_overloads = typing.get_overloads
|
297 |
-
clear_overloads = typing.clear_overloads
|
298 |
-
else:
|
299 |
-
# {module: {qualname: {firstlineno: func}}}
|
300 |
-
_overload_registry = collections.defaultdict(
|
301 |
-
functools.partial(collections.defaultdict, dict)
|
302 |
-
)
|
303 |
-
|
304 |
-
def overload(func):
|
305 |
-
"""Decorator for overloaded functions/methods.
|
306 |
-
|
307 |
-
In a stub file, place two or more stub definitions for the same
|
308 |
-
function in a row, each decorated with @overload. For example:
|
309 |
-
|
310 |
-
@overload
|
311 |
-
def utf8(value: None) -> None: ...
|
312 |
-
@overload
|
313 |
-
def utf8(value: bytes) -> bytes: ...
|
314 |
-
@overload
|
315 |
-
def utf8(value: str) -> bytes: ...
|
316 |
-
|
317 |
-
In a non-stub file (i.e. a regular .py file), do the same but
|
318 |
-
follow it with an implementation. The implementation should *not*
|
319 |
-
be decorated with @overload. For example:
|
320 |
-
|
321 |
-
@overload
|
322 |
-
def utf8(value: None) -> None: ...
|
323 |
-
@overload
|
324 |
-
def utf8(value: bytes) -> bytes: ...
|
325 |
-
@overload
|
326 |
-
def utf8(value: str) -> bytes: ...
|
327 |
-
def utf8(value):
|
328 |
-
# implementation goes here
|
329 |
-
|
330 |
-
The overloads for a function can be retrieved at runtime using the
|
331 |
-
get_overloads() function.
|
332 |
-
"""
|
333 |
-
# classmethod and staticmethod
|
334 |
-
f = getattr(func, "__func__", func)
|
335 |
-
try:
|
336 |
-
_overload_registry[f.__module__][f.__qualname__][
|
337 |
-
f.__code__.co_firstlineno
|
338 |
-
] = func
|
339 |
-
except AttributeError:
|
340 |
-
# Not a normal function; ignore.
|
341 |
-
pass
|
342 |
-
return _overload_dummy
|
343 |
-
|
344 |
-
def get_overloads(func):
|
345 |
-
"""Return all defined overloads for *func* as a sequence."""
|
346 |
-
# classmethod and staticmethod
|
347 |
-
f = getattr(func, "__func__", func)
|
348 |
-
if f.__module__ not in _overload_registry:
|
349 |
-
return []
|
350 |
-
mod_dict = _overload_registry[f.__module__]
|
351 |
-
if f.__qualname__ not in mod_dict:
|
352 |
-
return []
|
353 |
-
return list(mod_dict[f.__qualname__].values())
|
354 |
-
|
355 |
-
def clear_overloads():
|
356 |
-
"""Clear all overloads in the registry."""
|
357 |
-
_overload_registry.clear()
|
358 |
-
|
359 |
-
|
360 |
-
# This is not a real generic class. Don't use outside annotations.
|
361 |
-
Type = typing.Type
|
362 |
-
|
363 |
-
# Various ABCs mimicking those in collections.abc.
|
364 |
-
# A few are simply re-exported for completeness.
|
365 |
-
|
366 |
-
|
367 |
-
Awaitable = typing.Awaitable
|
368 |
-
Coroutine = typing.Coroutine
|
369 |
-
AsyncIterable = typing.AsyncIterable
|
370 |
-
AsyncIterator = typing.AsyncIterator
|
371 |
-
Deque = typing.Deque
|
372 |
-
ContextManager = typing.ContextManager
|
373 |
-
AsyncContextManager = typing.AsyncContextManager
|
374 |
-
DefaultDict = typing.DefaultDict
|
375 |
-
|
376 |
-
# 3.7.2+
|
377 |
-
if hasattr(typing, 'OrderedDict'):
|
378 |
-
OrderedDict = typing.OrderedDict
|
379 |
-
# 3.7.0-3.7.2
|
380 |
-
else:
|
381 |
-
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
|
382 |
-
|
383 |
-
Counter = typing.Counter
|
384 |
-
ChainMap = typing.ChainMap
|
385 |
-
AsyncGenerator = typing.AsyncGenerator
|
386 |
-
NewType = typing.NewType
|
387 |
-
Text = typing.Text
|
388 |
-
TYPE_CHECKING = typing.TYPE_CHECKING
|
389 |
-
|
390 |
-
|
391 |
-
_PROTO_WHITELIST = ['Callable', 'Awaitable',
|
392 |
-
'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
|
393 |
-
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
|
394 |
-
'ContextManager', 'AsyncContextManager']
|
395 |
-
|
396 |
-
|
397 |
-
def _get_protocol_attrs(cls):
|
398 |
-
attrs = set()
|
399 |
-
for base in cls.__mro__[:-1]: # without object
|
400 |
-
if base.__name__ in ('Protocol', 'Generic'):
|
401 |
-
continue
|
402 |
-
annotations = getattr(base, '__annotations__', {})
|
403 |
-
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
|
404 |
-
if (not attr.startswith('_abc_') and attr not in (
|
405 |
-
'__abstractmethods__', '__annotations__', '__weakref__',
|
406 |
-
'_is_protocol', '_is_runtime_protocol', '__dict__',
|
407 |
-
'__args__', '__slots__',
|
408 |
-
'__next_in_mro__', '__parameters__', '__origin__',
|
409 |
-
'__orig_bases__', '__extra__', '__tree_hash__',
|
410 |
-
'__doc__', '__subclasshook__', '__init__', '__new__',
|
411 |
-
'__module__', '_MutableMapping__marker', '_gorg')):
|
412 |
-
attrs.add(attr)
|
413 |
-
return attrs
|
414 |
-
|
415 |
-
|
416 |
-
def _is_callable_members_only(cls):
|
417 |
-
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
|
418 |
-
|
419 |
-
|
420 |
-
def _maybe_adjust_parameters(cls):
|
421 |
-
"""Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
|
422 |
-
|
423 |
-
The contents of this function are very similar
|
424 |
-
to logic found in typing.Generic.__init_subclass__
|
425 |
-
on the CPython main branch.
|
426 |
-
"""
|
427 |
-
tvars = []
|
428 |
-
if '__orig_bases__' in cls.__dict__:
|
429 |
-
tvars = typing._collect_type_vars(cls.__orig_bases__)
|
430 |
-
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
|
431 |
-
# If found, tvars must be a subset of it.
|
432 |
-
# If not found, tvars is it.
|
433 |
-
# Also check for and reject plain Generic,
|
434 |
-
# and reject multiple Generic[...] and/or Protocol[...].
|
435 |
-
gvars = None
|
436 |
-
for base in cls.__orig_bases__:
|
437 |
-
if (isinstance(base, typing._GenericAlias) and
|
438 |
-
base.__origin__ in (typing.Generic, Protocol)):
|
439 |
-
# for error messages
|
440 |
-
the_base = base.__origin__.__name__
|
441 |
-
if gvars is not None:
|
442 |
-
raise TypeError(
|
443 |
-
"Cannot inherit from Generic[...]"
|
444 |
-
" and/or Protocol[...] multiple types.")
|
445 |
-
gvars = base.__parameters__
|
446 |
-
if gvars is None:
|
447 |
-
gvars = tvars
|
448 |
-
else:
|
449 |
-
tvarset = set(tvars)
|
450 |
-
gvarset = set(gvars)
|
451 |
-
if not tvarset <= gvarset:
|
452 |
-
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
|
453 |
-
s_args = ', '.join(str(g) for g in gvars)
|
454 |
-
raise TypeError(f"Some type variables ({s_vars}) are"
|
455 |
-
f" not listed in {the_base}[{s_args}]")
|
456 |
-
tvars = gvars
|
457 |
-
cls.__parameters__ = tuple(tvars)
|
458 |
-
|
459 |
-
|
460 |
-
# 3.8+
|
461 |
-
if hasattr(typing, 'Protocol'):
|
462 |
-
Protocol = typing.Protocol
|
463 |
-
# 3.7
|
464 |
-
else:
|
465 |
-
|
466 |
-
def _no_init(self, *args, **kwargs):
|
467 |
-
if type(self)._is_protocol:
|
468 |
-
raise TypeError('Protocols cannot be instantiated')
|
469 |
-
|
470 |
-
class _ProtocolMeta(abc.ABCMeta): # noqa: B024
|
471 |
-
# This metaclass is a bit unfortunate and exists only because of the lack
|
472 |
-
# of __instancehook__.
|
473 |
-
def __instancecheck__(cls, instance):
|
474 |
-
# We need this method for situations where attributes are
|
475 |
-
# assigned in __init__.
|
476 |
-
if ((not getattr(cls, '_is_protocol', False) or
|
477 |
-
_is_callable_members_only(cls)) and
|
478 |
-
issubclass(instance.__class__, cls)):
|
479 |
-
return True
|
480 |
-
if cls._is_protocol:
|
481 |
-
if all(hasattr(instance, attr) and
|
482 |
-
(not callable(getattr(cls, attr, None)) or
|
483 |
-
getattr(instance, attr) is not None)
|
484 |
-
for attr in _get_protocol_attrs(cls)):
|
485 |
-
return True
|
486 |
-
return super().__instancecheck__(instance)
|
487 |
-
|
488 |
-
class Protocol(metaclass=_ProtocolMeta):
|
489 |
-
# There is quite a lot of overlapping code with typing.Generic.
|
490 |
-
# Unfortunately it is hard to avoid this while these live in two different
|
491 |
-
# modules. The duplicated code will be removed when Protocol is moved to typing.
|
492 |
-
"""Base class for protocol classes. Protocol classes are defined as::
|
493 |
-
|
494 |
-
class Proto(Protocol):
|
495 |
-
def meth(self) -> int:
|
496 |
-
...
|
497 |
-
|
498 |
-
Such classes are primarily used with static type checkers that recognize
|
499 |
-
structural subtyping (static duck-typing), for example::
|
500 |
-
|
501 |
-
class C:
|
502 |
-
def meth(self) -> int:
|
503 |
-
return 0
|
504 |
-
|
505 |
-
def func(x: Proto) -> int:
|
506 |
-
return x.meth()
|
507 |
-
|
508 |
-
func(C()) # Passes static type check
|
509 |
-
|
510 |
-
See PEP 544 for details. Protocol classes decorated with
|
511 |
-
@typing_extensions.runtime act as simple-minded runtime protocol that checks
|
512 |
-
only the presence of given attributes, ignoring their type signatures.
|
513 |
-
|
514 |
-
Protocol classes can be generic, they are defined as::
|
515 |
-
|
516 |
-
class GenProto(Protocol[T]):
|
517 |
-
def meth(self) -> T:
|
518 |
-
...
|
519 |
-
"""
|
520 |
-
__slots__ = ()
|
521 |
-
_is_protocol = True
|
522 |
-
|
523 |
-
def __new__(cls, *args, **kwds):
|
524 |
-
if cls is Protocol:
|
525 |
-
raise TypeError("Type Protocol cannot be instantiated; "
|
526 |
-
"it can only be used as a base class")
|
527 |
-
return super().__new__(cls)
|
528 |
-
|
529 |
-
@typing._tp_cache
|
530 |
-
def __class_getitem__(cls, params):
|
531 |
-
if not isinstance(params, tuple):
|
532 |
-
params = (params,)
|
533 |
-
if not params and cls is not typing.Tuple:
|
534 |
-
raise TypeError(
|
535 |
-
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
|
536 |
-
msg = "Parameters to generic types must be types."
|
537 |
-
params = tuple(typing._type_check(p, msg) for p in params) # noqa
|
538 |
-
if cls is Protocol:
|
539 |
-
# Generic can only be subscripted with unique type variables.
|
540 |
-
if not all(isinstance(p, typing.TypeVar) for p in params):
|
541 |
-
i = 0
|
542 |
-
while isinstance(params[i], typing.TypeVar):
|
543 |
-
i += 1
|
544 |
-
raise TypeError(
|
545 |
-
"Parameters to Protocol[...] must all be type variables."
|
546 |
-
f" Parameter {i + 1} is {params[i]}")
|
547 |
-
if len(set(params)) != len(params):
|
548 |
-
raise TypeError(
|
549 |
-
"Parameters to Protocol[...] must all be unique")
|
550 |
-
else:
|
551 |
-
# Subscripting a regular Generic subclass.
|
552 |
-
_check_generic(cls, params, len(cls.__parameters__))
|
553 |
-
return typing._GenericAlias(cls, params)
|
554 |
-
|
555 |
-
def __init_subclass__(cls, *args, **kwargs):
|
556 |
-
if '__orig_bases__' in cls.__dict__:
|
557 |
-
error = typing.Generic in cls.__orig_bases__
|
558 |
-
else:
|
559 |
-
error = typing.Generic in cls.__bases__
|
560 |
-
if error:
|
561 |
-
raise TypeError("Cannot inherit from plain Generic")
|
562 |
-
_maybe_adjust_parameters(cls)
|
563 |
-
|
564 |
-
# Determine if this is a protocol or a concrete subclass.
|
565 |
-
if not cls.__dict__.get('_is_protocol', None):
|
566 |
-
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
|
567 |
-
|
568 |
-
# Set (or override) the protocol subclass hook.
|
569 |
-
def _proto_hook(other):
|
570 |
-
if not cls.__dict__.get('_is_protocol', None):
|
571 |
-
return NotImplemented
|
572 |
-
if not getattr(cls, '_is_runtime_protocol', False):
|
573 |
-
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
|
574 |
-
return NotImplemented
|
575 |
-
raise TypeError("Instance and class checks can only be used with"
|
576 |
-
" @runtime protocols")
|
577 |
-
if not _is_callable_members_only(cls):
|
578 |
-
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
|
579 |
-
return NotImplemented
|
580 |
-
raise TypeError("Protocols with non-method members"
|
581 |
-
" don't support issubclass()")
|
582 |
-
if not isinstance(other, type):
|
583 |
-
# Same error as for issubclass(1, int)
|
584 |
-
raise TypeError('issubclass() arg 1 must be a class')
|
585 |
-
for attr in _get_protocol_attrs(cls):
|
586 |
-
for base in other.__mro__:
|
587 |
-
if attr in base.__dict__:
|
588 |
-
if base.__dict__[attr] is None:
|
589 |
-
return NotImplemented
|
590 |
-
break
|
591 |
-
annotations = getattr(base, '__annotations__', {})
|
592 |
-
if (isinstance(annotations, typing.Mapping) and
|
593 |
-
attr in annotations and
|
594 |
-
isinstance(other, _ProtocolMeta) and
|
595 |
-
other._is_protocol):
|
596 |
-
break
|
597 |
-
else:
|
598 |
-
return NotImplemented
|
599 |
-
return True
|
600 |
-
if '__subclasshook__' not in cls.__dict__:
|
601 |
-
cls.__subclasshook__ = _proto_hook
|
602 |
-
|
603 |
-
# We have nothing more to do for non-protocols.
|
604 |
-
if not cls._is_protocol:
|
605 |
-
return
|
606 |
-
|
607 |
-
# Check consistency of bases.
|
608 |
-
for base in cls.__bases__:
|
609 |
-
if not (base in (object, typing.Generic) or
|
610 |
-
base.__module__ == 'collections.abc' and
|
611 |
-
base.__name__ in _PROTO_WHITELIST or
|
612 |
-
isinstance(base, _ProtocolMeta) and base._is_protocol):
|
613 |
-
raise TypeError('Protocols can only inherit from other'
|
614 |
-
f' protocols, got {repr(base)}')
|
615 |
-
cls.__init__ = _no_init
|
616 |
-
|
617 |
-
|
618 |
-
# 3.8+
|
619 |
-
if hasattr(typing, 'runtime_checkable'):
|
620 |
-
runtime_checkable = typing.runtime_checkable
|
621 |
-
# 3.7
|
622 |
-
else:
|
623 |
-
def runtime_checkable(cls):
|
624 |
-
"""Mark a protocol class as a runtime protocol, so that it
|
625 |
-
can be used with isinstance() and issubclass(). Raise TypeError
|
626 |
-
if applied to a non-protocol class.
|
627 |
-
|
628 |
-
This allows a simple-minded structural check very similar to the
|
629 |
-
one-offs in collections.abc such as Hashable.
|
630 |
-
"""
|
631 |
-
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
|
632 |
-
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
|
633 |
-
f' got {cls!r}')
|
634 |
-
cls._is_runtime_protocol = True
|
635 |
-
return cls
|
636 |
-
|
637 |
-
|
638 |
-
# Exists for backwards compatibility.
|
639 |
-
runtime = runtime_checkable
|
640 |
-
|
641 |
-
|
642 |
-
# 3.8+
|
643 |
-
if hasattr(typing, 'SupportsIndex'):
|
644 |
-
SupportsIndex = typing.SupportsIndex
|
645 |
-
# 3.7
|
646 |
-
else:
|
647 |
-
@runtime_checkable
|
648 |
-
class SupportsIndex(Protocol):
|
649 |
-
__slots__ = ()
|
650 |
-
|
651 |
-
@abc.abstractmethod
|
652 |
-
def __index__(self) -> int:
|
653 |
-
pass
|
654 |
-
|
655 |
-
|
656 |
-
if hasattr(typing, "Required"):
|
657 |
-
# The standard library TypedDict in Python 3.8 does not store runtime information
|
658 |
-
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
|
659 |
-
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
|
660 |
-
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
|
661 |
-
# The standard library TypedDict below Python 3.11 does not store runtime
|
662 |
-
# information about optional and required keys when using Required or NotRequired.
|
663 |
-
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
|
664 |
-
TypedDict = typing.TypedDict
|
665 |
-
_TypedDictMeta = typing._TypedDictMeta
|
666 |
-
is_typeddict = typing.is_typeddict
|
667 |
-
else:
|
668 |
-
def _check_fails(cls, other):
|
669 |
-
try:
|
670 |
-
if sys._getframe(1).f_globals['__name__'] not in ['abc',
|
671 |
-
'functools',
|
672 |
-
'typing']:
|
673 |
-
# Typed dicts are only for static structural subtyping.
|
674 |
-
raise TypeError('TypedDict does not support instance and class checks')
|
675 |
-
except (AttributeError, ValueError):
|
676 |
-
pass
|
677 |
-
return False
|
678 |
-
|
679 |
-
def _dict_new(*args, **kwargs):
|
680 |
-
if not args:
|
681 |
-
raise TypeError('TypedDict.__new__(): not enough arguments')
|
682 |
-
_, args = args[0], args[1:] # allow the "cls" keyword be passed
|
683 |
-
return dict(*args, **kwargs)
|
684 |
-
|
685 |
-
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
|
686 |
-
|
687 |
-
def _typeddict_new(*args, total=True, **kwargs):
|
688 |
-
if not args:
|
689 |
-
raise TypeError('TypedDict.__new__(): not enough arguments')
|
690 |
-
_, args = args[0], args[1:] # allow the "cls" keyword be passed
|
691 |
-
if args:
|
692 |
-
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
|
693 |
-
elif '_typename' in kwargs:
|
694 |
-
typename = kwargs.pop('_typename')
|
695 |
-
import warnings
|
696 |
-
warnings.warn("Passing '_typename' as keyword argument is deprecated",
|
697 |
-
DeprecationWarning, stacklevel=2)
|
698 |
-
else:
|
699 |
-
raise TypeError("TypedDict.__new__() missing 1 required positional "
|
700 |
-
"argument: '_typename'")
|
701 |
-
if args:
|
702 |
-
try:
|
703 |
-
fields, = args # allow the "_fields" keyword be passed
|
704 |
-
except ValueError:
|
705 |
-
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
|
706 |
-
f'positional arguments but {len(args) + 2} '
|
707 |
-
'were given')
|
708 |
-
elif '_fields' in kwargs and len(kwargs) == 1:
|
709 |
-
fields = kwargs.pop('_fields')
|
710 |
-
import warnings
|
711 |
-
warnings.warn("Passing '_fields' as keyword argument is deprecated",
|
712 |
-
DeprecationWarning, stacklevel=2)
|
713 |
-
else:
|
714 |
-
fields = None
|
715 |
-
|
716 |
-
if fields is None:
|
717 |
-
fields = kwargs
|
718 |
-
elif kwargs:
|
719 |
-
raise TypeError("TypedDict takes either a dict or keyword arguments,"
|
720 |
-
" but not both")
|
721 |
-
|
722 |
-
ns = {'__annotations__': dict(fields)}
|
723 |
-
try:
|
724 |
-
# Setting correct module is necessary to make typed dict classes pickleable.
|
725 |
-
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
|
726 |
-
except (AttributeError, ValueError):
|
727 |
-
pass
|
728 |
-
|
729 |
-
return _TypedDictMeta(typename, (), ns, total=total)
|
730 |
-
|
731 |
-
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
|
732 |
-
' /, *, total=True, **kwargs)')
|
733 |
-
|
734 |
-
_TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
|
735 |
-
|
736 |
-
class _TypedDictMeta(type):
|
737 |
-
def __init__(cls, name, bases, ns, total=True):
|
738 |
-
super().__init__(name, bases, ns)
|
739 |
-
|
740 |
-
def __new__(cls, name, bases, ns, total=True):
|
741 |
-
# Create new typed dict class object.
|
742 |
-
# This method is called directly when TypedDict is subclassed,
|
743 |
-
# or via _typeddict_new when TypedDict is instantiated. This way
|
744 |
-
# TypedDict supports all three syntaxes described in its docstring.
|
745 |
-
# Subclasses and instances of TypedDict return actual dictionaries
|
746 |
-
# via _dict_new.
|
747 |
-
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
|
748 |
-
# Don't insert typing.Generic into __bases__ here,
|
749 |
-
# or Generic.__init_subclass__ will raise TypeError
|
750 |
-
# in the super().__new__() call.
|
751 |
-
# Instead, monkey-patch __bases__ onto the class after it's been created.
|
752 |
-
tp_dict = super().__new__(cls, name, (dict,), ns)
|
753 |
-
|
754 |
-
if any(issubclass(base, typing.Generic) for base in bases):
|
755 |
-
tp_dict.__bases__ = (typing.Generic, dict)
|
756 |
-
_maybe_adjust_parameters(tp_dict)
|
757 |
-
|
758 |
-
annotations = {}
|
759 |
-
own_annotations = ns.get('__annotations__', {})
|
760 |
-
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
|
761 |
-
kwds = {"module": tp_dict.__module__} if _TAKES_MODULE else {}
|
762 |
-
own_annotations = {
|
763 |
-
n: typing._type_check(tp, msg, **kwds)
|
764 |
-
for n, tp in own_annotations.items()
|
765 |
-
}
|
766 |
-
required_keys = set()
|
767 |
-
optional_keys = set()
|
768 |
-
|
769 |
-
for base in bases:
|
770 |
-
annotations.update(base.__dict__.get('__annotations__', {}))
|
771 |
-
required_keys.update(base.__dict__.get('__required_keys__', ()))
|
772 |
-
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
|
773 |
-
|
774 |
-
annotations.update(own_annotations)
|
775 |
-
for annotation_key, annotation_type in own_annotations.items():
|
776 |
-
annotation_origin = get_origin(annotation_type)
|
777 |
-
if annotation_origin is Annotated:
|
778 |
-
annotation_args = get_args(annotation_type)
|
779 |
-
if annotation_args:
|
780 |
-
annotation_type = annotation_args[0]
|
781 |
-
annotation_origin = get_origin(annotation_type)
|
782 |
-
|
783 |
-
if annotation_origin is Required:
|
784 |
-
required_keys.add(annotation_key)
|
785 |
-
elif annotation_origin is NotRequired:
|
786 |
-
optional_keys.add(annotation_key)
|
787 |
-
elif total:
|
788 |
-
required_keys.add(annotation_key)
|
789 |
-
else:
|
790 |
-
optional_keys.add(annotation_key)
|
791 |
-
|
792 |
-
tp_dict.__annotations__ = annotations
|
793 |
-
tp_dict.__required_keys__ = frozenset(required_keys)
|
794 |
-
tp_dict.__optional_keys__ = frozenset(optional_keys)
|
795 |
-
if not hasattr(tp_dict, '__total__'):
|
796 |
-
tp_dict.__total__ = total
|
797 |
-
return tp_dict
|
798 |
-
|
799 |
-
__instancecheck__ = __subclasscheck__ = _check_fails
|
800 |
-
|
801 |
-
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
|
802 |
-
TypedDict.__module__ = __name__
|
803 |
-
TypedDict.__doc__ = \
|
804 |
-
"""A simple typed name space. At runtime it is equivalent to a plain dict.
|
805 |
-
|
806 |
-
TypedDict creates a dictionary type that expects all of its
|
807 |
-
instances to have a certain set of keys, with each key
|
808 |
-
associated with a value of a consistent type. This expectation
|
809 |
-
is not checked at runtime but is only enforced by type checkers.
|
810 |
-
Usage::
|
811 |
-
|
812 |
-
class Point2D(TypedDict):
|
813 |
-
x: int
|
814 |
-
y: int
|
815 |
-
label: str
|
816 |
-
|
817 |
-
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
|
818 |
-
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
|
819 |
-
|
820 |
-
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
|
821 |
-
|
822 |
-
The type info can be accessed via the Point2D.__annotations__ dict, and
|
823 |
-
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
|
824 |
-
TypedDict supports two additional equivalent forms::
|
825 |
-
|
826 |
-
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
|
827 |
-
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
|
828 |
-
|
829 |
-
The class syntax is only supported in Python 3.6+, while two other
|
830 |
-
syntax forms work for Python 2.7 and 3.2+
|
831 |
-
"""
|
832 |
-
|
833 |
-
if hasattr(typing, "_TypedDictMeta"):
|
834 |
-
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
|
835 |
-
else:
|
836 |
-
_TYPEDDICT_TYPES = (_TypedDictMeta,)
|
837 |
-
|
838 |
-
def is_typeddict(tp):
|
839 |
-
"""Check if an annotation is a TypedDict class
|
840 |
-
|
841 |
-
For example::
|
842 |
-
class Film(TypedDict):
|
843 |
-
title: str
|
844 |
-
year: int
|
845 |
-
|
846 |
-
is_typeddict(Film) # => True
|
847 |
-
is_typeddict(Union[list, str]) # => False
|
848 |
-
"""
|
849 |
-
return isinstance(tp, tuple(_TYPEDDICT_TYPES))
|
850 |
-
|
851 |
-
|
852 |
-
if hasattr(typing, "assert_type"):
|
853 |
-
assert_type = typing.assert_type
|
854 |
-
|
855 |
-
else:
|
856 |
-
def assert_type(__val, __typ):
|
857 |
-
"""Assert (to the type checker) that the value is of the given type.
|
858 |
-
|
859 |
-
When the type checker encounters a call to assert_type(), it
|
860 |
-
emits an error if the value is not of the specified type::
|
861 |
-
|
862 |
-
def greet(name: str) -> None:
|
863 |
-
assert_type(name, str) # ok
|
864 |
-
assert_type(name, int) # type checker error
|
865 |
-
|
866 |
-
At runtime this returns the first argument unchanged and otherwise
|
867 |
-
does nothing.
|
868 |
-
"""
|
869 |
-
return __val
|
870 |
-
|
871 |
-
|
872 |
-
if hasattr(typing, "Required"):
|
873 |
-
get_type_hints = typing.get_type_hints
|
874 |
-
else:
|
875 |
-
import functools
|
876 |
-
import types
|
877 |
-
|
878 |
-
# replaces _strip_annotations()
|
879 |
-
def _strip_extras(t):
|
880 |
-
"""Strips Annotated, Required and NotRequired from a given type."""
|
881 |
-
if isinstance(t, _AnnotatedAlias):
|
882 |
-
return _strip_extras(t.__origin__)
|
883 |
-
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
|
884 |
-
return _strip_extras(t.__args__[0])
|
885 |
-
if isinstance(t, typing._GenericAlias):
|
886 |
-
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
|
887 |
-
if stripped_args == t.__args__:
|
888 |
-
return t
|
889 |
-
return t.copy_with(stripped_args)
|
890 |
-
if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
|
891 |
-
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
|
892 |
-
if stripped_args == t.__args__:
|
893 |
-
return t
|
894 |
-
return types.GenericAlias(t.__origin__, stripped_args)
|
895 |
-
if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
|
896 |
-
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
|
897 |
-
if stripped_args == t.__args__:
|
898 |
-
return t
|
899 |
-
return functools.reduce(operator.or_, stripped_args)
|
900 |
-
|
901 |
-
return t
|
902 |
-
|
903 |
-
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
|
904 |
-
"""Return type hints for an object.
|
905 |
-
|
906 |
-
This is often the same as obj.__annotations__, but it handles
|
907 |
-
forward references encoded as string literals, adds Optional[t] if a
|
908 |
-
default value equal to None is set and recursively replaces all
|
909 |
-
'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
|
910 |
-
(unless 'include_extras=True').
|
911 |
-
|
912 |
-
The argument may be a module, class, method, or function. The annotations
|
913 |
-
are returned as a dictionary. For classes, annotations include also
|
914 |
-
inherited members.
|
915 |
-
|
916 |
-
TypeError is raised if the argument is not of a type that can contain
|
917 |
-
annotations, and an empty dictionary is returned if no annotations are
|
918 |
-
present.
|
919 |
-
|
920 |
-
BEWARE -- the behavior of globalns and localns is counterintuitive
|
921 |
-
(unless you are familiar with how eval() and exec() work). The
|
922 |
-
search order is locals first, then globals.
|
923 |
-
|
924 |
-
- If no dict arguments are passed, an attempt is made to use the
|
925 |
-
globals from obj (or the respective module's globals for classes),
|
926 |
-
and these are also used as the locals. If the object does not appear
|
927 |
-
to have globals, an empty dictionary is used.
|
928 |
-
|
929 |
-
- If one dict argument is passed, it is used for both globals and
|
930 |
-
locals.
|
931 |
-
|
932 |
-
- If two dict arguments are passed, they specify globals and
|
933 |
-
locals, respectively.
|
934 |
-
"""
|
935 |
-
if hasattr(typing, "Annotated"):
|
936 |
-
hint = typing.get_type_hints(
|
937 |
-
obj, globalns=globalns, localns=localns, include_extras=True
|
938 |
-
)
|
939 |
-
else:
|
940 |
-
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
|
941 |
-
if include_extras:
|
942 |
-
return hint
|
943 |
-
return {k: _strip_extras(t) for k, t in hint.items()}
|
944 |
-
|
945 |
-
|
946 |
-
# Python 3.9+ has PEP 593 (Annotated)
|
947 |
-
if hasattr(typing, 'Annotated'):
|
948 |
-
Annotated = typing.Annotated
|
949 |
-
# Not exported and not a public API, but needed for get_origin() and get_args()
|
950 |
-
# to work.
|
951 |
-
_AnnotatedAlias = typing._AnnotatedAlias
|
952 |
-
# 3.7-3.8
|
953 |
-
else:
|
954 |
-
class _AnnotatedAlias(typing._GenericAlias, _root=True):
|
955 |
-
"""Runtime representation of an annotated type.
|
956 |
-
|
957 |
-
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
|
958 |
-
with extra annotations. The alias behaves like a normal typing alias,
|
959 |
-
instantiating is the same as instantiating the underlying type, binding
|
960 |
-
it to types is also the same.
|
961 |
-
"""
|
962 |
-
def __init__(self, origin, metadata):
|
963 |
-
if isinstance(origin, _AnnotatedAlias):
|
964 |
-
metadata = origin.__metadata__ + metadata
|
965 |
-
origin = origin.__origin__
|
966 |
-
super().__init__(origin, origin)
|
967 |
-
self.__metadata__ = metadata
|
968 |
-
|
969 |
-
def copy_with(self, params):
|
970 |
-
assert len(params) == 1
|
971 |
-
new_type = params[0]
|
972 |
-
return _AnnotatedAlias(new_type, self.__metadata__)
|
973 |
-
|
974 |
-
def __repr__(self):
|
975 |
-
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
|
976 |
-
f"{', '.join(repr(a) for a in self.__metadata__)}]")
|
977 |
-
|
978 |
-
def __reduce__(self):
|
979 |
-
return operator.getitem, (
|
980 |
-
Annotated, (self.__origin__,) + self.__metadata__
|
981 |
-
)
|
982 |
-
|
983 |
-
def __eq__(self, other):
|
984 |
-
if not isinstance(other, _AnnotatedAlias):
|
985 |
-
return NotImplemented
|
986 |
-
if self.__origin__ != other.__origin__:
|
987 |
-
return False
|
988 |
-
return self.__metadata__ == other.__metadata__
|
989 |
-
|
990 |
-
def __hash__(self):
|
991 |
-
return hash((self.__origin__, self.__metadata__))
|
992 |
-
|
993 |
-
class Annotated:
|
994 |
-
"""Add context specific metadata to a type.
|
995 |
-
|
996 |
-
Example: Annotated[int, runtime_check.Unsigned] indicates to the
|
997 |
-
hypothetical runtime_check module that this type is an unsigned int.
|
998 |
-
Every other consumer of this type can ignore this metadata and treat
|
999 |
-
this type as int.
|
1000 |
-
|
1001 |
-
The first argument to Annotated must be a valid type (and will be in
|
1002 |
-
the __origin__ field), the remaining arguments are kept as a tuple in
|
1003 |
-
the __extra__ field.
|
1004 |
-
|
1005 |
-
Details:
|
1006 |
-
|
1007 |
-
- It's an error to call `Annotated` with less than two arguments.
|
1008 |
-
- Nested Annotated are flattened::
|
1009 |
-
|
1010 |
-
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
|
1011 |
-
|
1012 |
-
- Instantiating an annotated type is equivalent to instantiating the
|
1013 |
-
underlying type::
|
1014 |
-
|
1015 |
-
Annotated[C, Ann1](5) == C(5)
|
1016 |
-
|
1017 |
-
- Annotated can be used as a generic type alias::
|
1018 |
-
|
1019 |
-
Optimized = Annotated[T, runtime.Optimize()]
|
1020 |
-
Optimized[int] == Annotated[int, runtime.Optimize()]
|
1021 |
-
|
1022 |
-
OptimizedList = Annotated[List[T], runtime.Optimize()]
|
1023 |
-
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
|
1024 |
-
"""
|
1025 |
-
|
1026 |
-
__slots__ = ()
|
1027 |
-
|
1028 |
-
def __new__(cls, *args, **kwargs):
|
1029 |
-
raise TypeError("Type Annotated cannot be instantiated.")
|
1030 |
-
|
1031 |
-
@typing._tp_cache
|
1032 |
-
def __class_getitem__(cls, params):
|
1033 |
-
if not isinstance(params, tuple) or len(params) < 2:
|
1034 |
-
raise TypeError("Annotated[...] should be used "
|
1035 |
-
"with at least two arguments (a type and an "
|
1036 |
-
"annotation).")
|
1037 |
-
allowed_special_forms = (ClassVar, Final)
|
1038 |
-
if get_origin(params[0]) in allowed_special_forms:
|
1039 |
-
origin = params[0]
|
1040 |
-
else:
|
1041 |
-
msg = "Annotated[t, ...]: t must be a type."
|
1042 |
-
origin = typing._type_check(params[0], msg)
|
1043 |
-
metadata = tuple(params[1:])
|
1044 |
-
return _AnnotatedAlias(origin, metadata)
|
1045 |
-
|
1046 |
-
def __init_subclass__(cls, *args, **kwargs):
|
1047 |
-
raise TypeError(
|
1048 |
-
f"Cannot subclass {cls.__module__}.Annotated"
|
1049 |
-
)
|
1050 |
-
|
1051 |
-
# Python 3.8 has get_origin() and get_args() but those implementations aren't
|
1052 |
-
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
|
1053 |
-
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
|
1054 |
-
if sys.version_info[:2] >= (3, 10):
|
1055 |
-
get_origin = typing.get_origin
|
1056 |
-
get_args = typing.get_args
|
1057 |
-
# 3.7-3.9
|
1058 |
-
else:
|
1059 |
-
try:
|
1060 |
-
# 3.9+
|
1061 |
-
from typing import _BaseGenericAlias
|
1062 |
-
except ImportError:
|
1063 |
-
_BaseGenericAlias = typing._GenericAlias
|
1064 |
-
try:
|
1065 |
-
# 3.9+
|
1066 |
-
from typing import GenericAlias as _typing_GenericAlias
|
1067 |
-
except ImportError:
|
1068 |
-
_typing_GenericAlias = typing._GenericAlias
|
1069 |
-
|
1070 |
-
def get_origin(tp):
|
1071 |
-
"""Get the unsubscripted version of a type.
|
1072 |
-
|
1073 |
-
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
|
1074 |
-
and Annotated. Return None for unsupported types. Examples::
|
1075 |
-
|
1076 |
-
get_origin(Literal[42]) is Literal
|
1077 |
-
get_origin(int) is None
|
1078 |
-
get_origin(ClassVar[int]) is ClassVar
|
1079 |
-
get_origin(Generic) is Generic
|
1080 |
-
get_origin(Generic[T]) is Generic
|
1081 |
-
get_origin(Union[T, int]) is Union
|
1082 |
-
get_origin(List[Tuple[T, T]][int]) == list
|
1083 |
-
get_origin(P.args) is P
|
1084 |
-
"""
|
1085 |
-
if isinstance(tp, _AnnotatedAlias):
|
1086 |
-
return Annotated
|
1087 |
-
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
|
1088 |
-
ParamSpecArgs, ParamSpecKwargs)):
|
1089 |
-
return tp.__origin__
|
1090 |
-
if tp is typing.Generic:
|
1091 |
-
return typing.Generic
|
1092 |
-
return None
|
1093 |
-
|
1094 |
-
def get_args(tp):
|
1095 |
-
"""Get type arguments with all substitutions performed.
|
1096 |
-
|
1097 |
-
For unions, basic simplifications used by Union constructor are performed.
|
1098 |
-
Examples::
|
1099 |
-
get_args(Dict[str, int]) == (str, int)
|
1100 |
-
get_args(int) == ()
|
1101 |
-
get_args(Union[int, Union[T, int], str][int]) == (int, str)
|
1102 |
-
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
|
1103 |
-
get_args(Callable[[], T][int]) == ([], int)
|
1104 |
-
"""
|
1105 |
-
if isinstance(tp, _AnnotatedAlias):
|
1106 |
-
return (tp.__origin__,) + tp.__metadata__
|
1107 |
-
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
|
1108 |
-
if getattr(tp, "_special", False):
|
1109 |
-
return ()
|
1110 |
-
res = tp.__args__
|
1111 |
-
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
|
1112 |
-
res = (list(res[:-1]), res[-1])
|
1113 |
-
return res
|
1114 |
-
return ()
|
1115 |
-
|
1116 |
-
|
1117 |
-
# 3.10+
|
1118 |
-
if hasattr(typing, 'TypeAlias'):
|
1119 |
-
TypeAlias = typing.TypeAlias
|
1120 |
-
# 3.9
|
1121 |
-
elif sys.version_info[:2] >= (3, 9):
|
1122 |
-
class _TypeAliasForm(typing._SpecialForm, _root=True):
|
1123 |
-
def __repr__(self):
|
1124 |
-
return 'typing_extensions.' + self._name
|
1125 |
-
|
1126 |
-
@_TypeAliasForm
|
1127 |
-
def TypeAlias(self, parameters):
|
1128 |
-
"""Special marker indicating that an assignment should
|
1129 |
-
be recognized as a proper type alias definition by type
|
1130 |
-
checkers.
|
1131 |
-
|
1132 |
-
For example::
|
1133 |
-
|
1134 |
-
Predicate: TypeAlias = Callable[..., bool]
|
1135 |
-
|
1136 |
-
It's invalid when used anywhere except as in the example above.
|
1137 |
-
"""
|
1138 |
-
raise TypeError(f"{self} is not subscriptable")
|
1139 |
-
# 3.7-3.8
|
1140 |
-
else:
|
1141 |
-
class _TypeAliasForm(typing._SpecialForm, _root=True):
|
1142 |
-
def __repr__(self):
|
1143 |
-
return 'typing_extensions.' + self._name
|
1144 |
-
|
1145 |
-
TypeAlias = _TypeAliasForm('TypeAlias',
|
1146 |
-
doc="""Special marker indicating that an assignment should
|
1147 |
-
be recognized as a proper type alias definition by type
|
1148 |
-
checkers.
|
1149 |
-
|
1150 |
-
For example::
|
1151 |
-
|
1152 |
-
Predicate: TypeAlias = Callable[..., bool]
|
1153 |
-
|
1154 |
-
It's invalid when used anywhere except as in the example
|
1155 |
-
above.""")
|
1156 |
-
|
1157 |
-
|
1158 |
-
class _DefaultMixin:
|
1159 |
-
"""Mixin for TypeVarLike defaults."""
|
1160 |
-
|
1161 |
-
__slots__ = ()
|
1162 |
-
|
1163 |
-
def __init__(self, default):
|
1164 |
-
if isinstance(default, (tuple, list)):
|
1165 |
-
self.__default__ = tuple((typing._type_check(d, "Default must be a type")
|
1166 |
-
for d in default))
|
1167 |
-
elif default != _marker:
|
1168 |
-
self.__default__ = typing._type_check(default, "Default must be a type")
|
1169 |
-
else:
|
1170 |
-
self.__default__ = None
|
1171 |
-
|
1172 |
-
|
1173 |
-
# Add default and infer_variance parameters from PEP 696 and 695
|
1174 |
-
class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
|
1175 |
-
"""Type variable."""
|
1176 |
-
|
1177 |
-
__module__ = 'typing'
|
1178 |
-
|
1179 |
-
def __init__(self, name, *constraints, bound=None,
|
1180 |
-
covariant=False, contravariant=False,
|
1181 |
-
default=_marker, infer_variance=False):
|
1182 |
-
super().__init__(name, *constraints, bound=bound, covariant=covariant,
|
1183 |
-
contravariant=contravariant)
|
1184 |
-
_DefaultMixin.__init__(self, default)
|
1185 |
-
self.__infer_variance__ = infer_variance
|
1186 |
-
|
1187 |
-
# for pickling:
|
1188 |
-
try:
|
1189 |
-
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
|
1190 |
-
except (AttributeError, ValueError):
|
1191 |
-
def_mod = None
|
1192 |
-
if def_mod != 'typing_extensions':
|
1193 |
-
self.__module__ = def_mod
|
1194 |
-
|
1195 |
-
|
1196 |
-
# Python 3.10+ has PEP 612
|
1197 |
-
if hasattr(typing, 'ParamSpecArgs'):
|
1198 |
-
ParamSpecArgs = typing.ParamSpecArgs
|
1199 |
-
ParamSpecKwargs = typing.ParamSpecKwargs
|
1200 |
-
# 3.7-3.9
|
1201 |
-
else:
|
1202 |
-
class _Immutable:
|
1203 |
-
"""Mixin to indicate that object should not be copied."""
|
1204 |
-
__slots__ = ()
|
1205 |
-
|
1206 |
-
def __copy__(self):
|
1207 |
-
return self
|
1208 |
-
|
1209 |
-
def __deepcopy__(self, memo):
|
1210 |
-
return self
|
1211 |
-
|
1212 |
-
class ParamSpecArgs(_Immutable):
|
1213 |
-
"""The args for a ParamSpec object.
|
1214 |
-
|
1215 |
-
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
|
1216 |
-
|
1217 |
-
ParamSpecArgs objects have a reference back to their ParamSpec:
|
1218 |
-
|
1219 |
-
P.args.__origin__ is P
|
1220 |
-
|
1221 |
-
This type is meant for runtime introspection and has no special meaning to
|
1222 |
-
static type checkers.
|
1223 |
-
"""
|
1224 |
-
def __init__(self, origin):
|
1225 |
-
self.__origin__ = origin
|
1226 |
-
|
1227 |
-
def __repr__(self):
|
1228 |
-
return f"{self.__origin__.__name__}.args"
|
1229 |
-
|
1230 |
-
def __eq__(self, other):
|
1231 |
-
if not isinstance(other, ParamSpecArgs):
|
1232 |
-
return NotImplemented
|
1233 |
-
return self.__origin__ == other.__origin__
|
1234 |
-
|
1235 |
-
class ParamSpecKwargs(_Immutable):
|
1236 |
-
"""The kwargs for a ParamSpec object.
|
1237 |
-
|
1238 |
-
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
|
1239 |
-
|
1240 |
-
ParamSpecKwargs objects have a reference back to their ParamSpec:
|
1241 |
-
|
1242 |
-
P.kwargs.__origin__ is P
|
1243 |
-
|
1244 |
-
This type is meant for runtime introspection and has no special meaning to
|
1245 |
-
static type checkers.
|
1246 |
-
"""
|
1247 |
-
def __init__(self, origin):
|
1248 |
-
self.__origin__ = origin
|
1249 |
-
|
1250 |
-
def __repr__(self):
|
1251 |
-
return f"{self.__origin__.__name__}.kwargs"
|
1252 |
-
|
1253 |
-
def __eq__(self, other):
|
1254 |
-
if not isinstance(other, ParamSpecKwargs):
|
1255 |
-
return NotImplemented
|
1256 |
-
return self.__origin__ == other.__origin__
|
1257 |
-
|
1258 |
-
# 3.10+
|
1259 |
-
if hasattr(typing, 'ParamSpec'):
|
1260 |
-
|
1261 |
-
# Add default Parameter - PEP 696
|
1262 |
-
class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True):
|
1263 |
-
"""Parameter specification variable."""
|
1264 |
-
|
1265 |
-
__module__ = 'typing'
|
1266 |
-
|
1267 |
-
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
|
1268 |
-
default=_marker):
|
1269 |
-
super().__init__(name, bound=bound, covariant=covariant,
|
1270 |
-
contravariant=contravariant)
|
1271 |
-
_DefaultMixin.__init__(self, default)
|
1272 |
-
|
1273 |
-
# for pickling:
|
1274 |
-
try:
|
1275 |
-
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
|
1276 |
-
except (AttributeError, ValueError):
|
1277 |
-
def_mod = None
|
1278 |
-
if def_mod != 'typing_extensions':
|
1279 |
-
self.__module__ = def_mod
|
1280 |
-
|
1281 |
-
# 3.7-3.9
|
1282 |
-
else:
|
1283 |
-
|
1284 |
-
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
|
1285 |
-
class ParamSpec(list, _DefaultMixin):
|
1286 |
-
"""Parameter specification variable.
|
1287 |
-
|
1288 |
-
Usage::
|
1289 |
-
|
1290 |
-
P = ParamSpec('P')
|
1291 |
-
|
1292 |
-
Parameter specification variables exist primarily for the benefit of static
|
1293 |
-
type checkers. They are used to forward the parameter types of one
|
1294 |
-
callable to another callable, a pattern commonly found in higher order
|
1295 |
-
functions and decorators. They are only valid when used in ``Concatenate``,
|
1296 |
-
or s the first argument to ``Callable``. In Python 3.10 and higher,
|
1297 |
-
they are also supported in user-defined Generics at runtime.
|
1298 |
-
See class Generic for more information on generic types. An
|
1299 |
-
example for annotating a decorator::
|
1300 |
-
|
1301 |
-
T = TypeVar('T')
|
1302 |
-
P = ParamSpec('P')
|
1303 |
-
|
1304 |
-
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
|
1305 |
-
'''A type-safe decorator to add logging to a function.'''
|
1306 |
-
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
|
1307 |
-
logging.info(f'{f.__name__} was called')
|
1308 |
-
return f(*args, **kwargs)
|
1309 |
-
return inner
|
1310 |
-
|
1311 |
-
@add_logging
|
1312 |
-
def add_two(x: float, y: float) -> float:
|
1313 |
-
'''Add two numbers together.'''
|
1314 |
-
return x + y
|
1315 |
-
|
1316 |
-
Parameter specification variables defined with covariant=True or
|
1317 |
-
contravariant=True can be used to declare covariant or contravariant
|
1318 |
-
generic types. These keyword arguments are valid, but their actual semantics
|
1319 |
-
are yet to be decided. See PEP 612 for details.
|
1320 |
-
|
1321 |
-
Parameter specification variables can be introspected. e.g.:
|
1322 |
-
|
1323 |
-
P.__name__ == 'T'
|
1324 |
-
P.__bound__ == None
|
1325 |
-
P.__covariant__ == False
|
1326 |
-
P.__contravariant__ == False
|
1327 |
-
|
1328 |
-
Note that only parameter specification variables defined in global scope can
|
1329 |
-
be pickled.
|
1330 |
-
"""
|
1331 |
-
|
1332 |
-
# Trick Generic __parameters__.
|
1333 |
-
__class__ = typing.TypeVar
|
1334 |
-
|
1335 |
-
@property
|
1336 |
-
def args(self):
|
1337 |
-
return ParamSpecArgs(self)
|
1338 |
-
|
1339 |
-
@property
|
1340 |
-
def kwargs(self):
|
1341 |
-
return ParamSpecKwargs(self)
|
1342 |
-
|
1343 |
-
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
|
1344 |
-
default=_marker):
|
1345 |
-
super().__init__([self])
|
1346 |
-
self.__name__ = name
|
1347 |
-
self.__covariant__ = bool(covariant)
|
1348 |
-
self.__contravariant__ = bool(contravariant)
|
1349 |
-
if bound:
|
1350 |
-
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
|
1351 |
-
else:
|
1352 |
-
self.__bound__ = None
|
1353 |
-
_DefaultMixin.__init__(self, default)
|
1354 |
-
|
1355 |
-
# for pickling:
|
1356 |
-
try:
|
1357 |
-
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
|
1358 |
-
except (AttributeError, ValueError):
|
1359 |
-
def_mod = None
|
1360 |
-
if def_mod != 'typing_extensions':
|
1361 |
-
self.__module__ = def_mod
|
1362 |
-
|
1363 |
-
def __repr__(self):
|
1364 |
-
if self.__covariant__:
|
1365 |
-
prefix = '+'
|
1366 |
-
elif self.__contravariant__:
|
1367 |
-
prefix = '-'
|
1368 |
-
else:
|
1369 |
-
prefix = '~'
|
1370 |
-
return prefix + self.__name__
|
1371 |
-
|
1372 |
-
def __hash__(self):
|
1373 |
-
return object.__hash__(self)
|
1374 |
-
|
1375 |
-
def __eq__(self, other):
|
1376 |
-
return self is other
|
1377 |
-
|
1378 |
-
def __reduce__(self):
|
1379 |
-
return self.__name__
|
1380 |
-
|
1381 |
-
# Hack to get typing._type_check to pass.
|
1382 |
-
def __call__(self, *args, **kwargs):
|
1383 |
-
pass
|
1384 |
-
|
1385 |
-
|
1386 |
-
# 3.7-3.9
|
1387 |
-
if not hasattr(typing, 'Concatenate'):
|
1388 |
-
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
|
1389 |
-
class _ConcatenateGenericAlias(list):
|
1390 |
-
|
1391 |
-
# Trick Generic into looking into this for __parameters__.
|
1392 |
-
__class__ = typing._GenericAlias
|
1393 |
-
|
1394 |
-
# Flag in 3.8.
|
1395 |
-
_special = False
|
1396 |
-
|
1397 |
-
def __init__(self, origin, args):
|
1398 |
-
super().__init__(args)
|
1399 |
-
self.__origin__ = origin
|
1400 |
-
self.__args__ = args
|
1401 |
-
|
1402 |
-
def __repr__(self):
|
1403 |
-
_type_repr = typing._type_repr
|
1404 |
-
return (f'{_type_repr(self.__origin__)}'
|
1405 |
-
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
|
1406 |
-
|
1407 |
-
def __hash__(self):
|
1408 |
-
return hash((self.__origin__, self.__args__))
|
1409 |
-
|
1410 |
-
# Hack to get typing._type_check to pass in Generic.
|
1411 |
-
def __call__(self, *args, **kwargs):
|
1412 |
-
pass
|
1413 |
-
|
1414 |
-
@property
|
1415 |
-
def __parameters__(self):
|
1416 |
-
return tuple(
|
1417 |
-
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
|
1418 |
-
)
|
1419 |
-
|
1420 |
-
|
1421 |
-
# 3.7-3.9
|
1422 |
-
@typing._tp_cache
|
1423 |
-
def _concatenate_getitem(self, parameters):
|
1424 |
-
if parameters == ():
|
1425 |
-
raise TypeError("Cannot take a Concatenate of no types.")
|
1426 |
-
if not isinstance(parameters, tuple):
|
1427 |
-
parameters = (parameters,)
|
1428 |
-
if not isinstance(parameters[-1], ParamSpec):
|
1429 |
-
raise TypeError("The last parameter to Concatenate should be a "
|
1430 |
-
"ParamSpec variable.")
|
1431 |
-
msg = "Concatenate[arg, ...]: each arg must be a type."
|
1432 |
-
parameters = tuple(typing._type_check(p, msg) for p in parameters)
|
1433 |
-
return _ConcatenateGenericAlias(self, parameters)
|
1434 |
-
|
1435 |
-
|
1436 |
-
# 3.10+
|
1437 |
-
if hasattr(typing, 'Concatenate'):
|
1438 |
-
Concatenate = typing.Concatenate
|
1439 |
-
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
|
1440 |
-
# 3.9
|
1441 |
-
elif sys.version_info[:2] >= (3, 9):
|
1442 |
-
@_TypeAliasForm
|
1443 |
-
def Concatenate(self, parameters):
|
1444 |
-
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
|
1445 |
-
higher order function which adds, removes or transforms parameters of a
|
1446 |
-
callable.
|
1447 |
-
|
1448 |
-
For example::
|
1449 |
-
|
1450 |
-
Callable[Concatenate[int, P], int]
|
1451 |
-
|
1452 |
-
See PEP 612 for detailed information.
|
1453 |
-
"""
|
1454 |
-
return _concatenate_getitem(self, parameters)
|
1455 |
-
# 3.7-8
|
1456 |
-
else:
|
1457 |
-
class _ConcatenateForm(typing._SpecialForm, _root=True):
|
1458 |
-
def __repr__(self):
|
1459 |
-
return 'typing_extensions.' + self._name
|
1460 |
-
|
1461 |
-
def __getitem__(self, parameters):
|
1462 |
-
return _concatenate_getitem(self, parameters)
|
1463 |
-
|
1464 |
-
Concatenate = _ConcatenateForm(
|
1465 |
-
'Concatenate',
|
1466 |
-
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
|
1467 |
-
higher order function which adds, removes or transforms parameters of a
|
1468 |
-
callable.
|
1469 |
-
|
1470 |
-
For example::
|
1471 |
-
|
1472 |
-
Callable[Concatenate[int, P], int]
|
1473 |
-
|
1474 |
-
See PEP 612 for detailed information.
|
1475 |
-
""")
|
1476 |
-
|
1477 |
-
# 3.10+
|
1478 |
-
if hasattr(typing, 'TypeGuard'):
|
1479 |
-
TypeGuard = typing.TypeGuard
|
1480 |
-
# 3.9
|
1481 |
-
elif sys.version_info[:2] >= (3, 9):
|
1482 |
-
class _TypeGuardForm(typing._SpecialForm, _root=True):
|
1483 |
-
def __repr__(self):
|
1484 |
-
return 'typing_extensions.' + self._name
|
1485 |
-
|
1486 |
-
@_TypeGuardForm
|
1487 |
-
def TypeGuard(self, parameters):
|
1488 |
-
"""Special typing form used to annotate the return type of a user-defined
|
1489 |
-
type guard function. ``TypeGuard`` only accepts a single type argument.
|
1490 |
-
At runtime, functions marked this way should return a boolean.
|
1491 |
-
|
1492 |
-
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
|
1493 |
-
type checkers to determine a more precise type of an expression within a
|
1494 |
-
program's code flow. Usually type narrowing is done by analyzing
|
1495 |
-
conditional code flow and applying the narrowing to a block of code. The
|
1496 |
-
conditional expression here is sometimes referred to as a "type guard".
|
1497 |
-
|
1498 |
-
Sometimes it would be convenient to use a user-defined boolean function
|
1499 |
-
as a type guard. Such a function should use ``TypeGuard[...]`` as its
|
1500 |
-
return type to alert static type checkers to this intention.
|
1501 |
-
|
1502 |
-
Using ``-> TypeGuard`` tells the static type checker that for a given
|
1503 |
-
function:
|
1504 |
-
|
1505 |
-
1. The return value is a boolean.
|
1506 |
-
2. If the return value is ``True``, the type of its argument
|
1507 |
-
is the type inside ``TypeGuard``.
|
1508 |
-
|
1509 |
-
For example::
|
1510 |
-
|
1511 |
-
def is_str(val: Union[str, float]):
|
1512 |
-
# "isinstance" type guard
|
1513 |
-
if isinstance(val, str):
|
1514 |
-
# Type of ``val`` is narrowed to ``str``
|
1515 |
-
...
|
1516 |
-
else:
|
1517 |
-
# Else, type of ``val`` is narrowed to ``float``.
|
1518 |
-
...
|
1519 |
-
|
1520 |
-
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
|
1521 |
-
form of ``TypeA`` (it can even be a wider form) and this may lead to
|
1522 |
-
type-unsafe results. The main reason is to allow for things like
|
1523 |
-
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
|
1524 |
-
a subtype of the former, since ``List`` is invariant. The responsibility of
|
1525 |
-
writing type-safe type guards is left to the user.
|
1526 |
-
|
1527 |
-
``TypeGuard`` also works with type variables. For more information, see
|
1528 |
-
PEP 647 (User-Defined Type Guards).
|
1529 |
-
"""
|
1530 |
-
item = typing._type_check(parameters, f'{self} accepts only a single type.')
|
1531 |
-
return typing._GenericAlias(self, (item,))
|
1532 |
-
# 3.7-3.8
|
1533 |
-
else:
|
1534 |
-
class _TypeGuardForm(typing._SpecialForm, _root=True):
|
1535 |
-
|
1536 |
-
def __repr__(self):
|
1537 |
-
return 'typing_extensions.' + self._name
|
1538 |
-
|
1539 |
-
def __getitem__(self, parameters):
|
1540 |
-
item = typing._type_check(parameters,
|
1541 |
-
f'{self._name} accepts only a single type')
|
1542 |
-
return typing._GenericAlias(self, (item,))
|
1543 |
-
|
1544 |
-
TypeGuard = _TypeGuardForm(
|
1545 |
-
'TypeGuard',
|
1546 |
-
doc="""Special typing form used to annotate the return type of a user-defined
|
1547 |
-
type guard function. ``TypeGuard`` only accepts a single type argument.
|
1548 |
-
At runtime, functions marked this way should return a boolean.
|
1549 |
-
|
1550 |
-
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
|
1551 |
-
type checkers to determine a more precise type of an expression within a
|
1552 |
-
program's code flow. Usually type narrowing is done by analyzing
|
1553 |
-
conditional code flow and applying the narrowing to a block of code. The
|
1554 |
-
conditional expression here is sometimes referred to as a "type guard".
|
1555 |
-
|
1556 |
-
Sometimes it would be convenient to use a user-defined boolean function
|
1557 |
-
as a type guard. Such a function should use ``TypeGuard[...]`` as its
|
1558 |
-
return type to alert static type checkers to this intention.
|
1559 |
-
|
1560 |
-
Using ``-> TypeGuard`` tells the static type checker that for a given
|
1561 |
-
function:
|
1562 |
-
|
1563 |
-
1. The return value is a boolean.
|
1564 |
-
2. If the return value is ``True``, the type of its argument
|
1565 |
-
is the type inside ``TypeGuard``.
|
1566 |
-
|
1567 |
-
For example::
|
1568 |
-
|
1569 |
-
def is_str(val: Union[str, float]):
|
1570 |
-
# "isinstance" type guard
|
1571 |
-
if isinstance(val, str):
|
1572 |
-
# Type of ``val`` is narrowed to ``str``
|
1573 |
-
...
|
1574 |
-
else:
|
1575 |
-
# Else, type of ``val`` is narrowed to ``float``.
|
1576 |
-
...
|
1577 |
-
|
1578 |
-
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
|
1579 |
-
form of ``TypeA`` (it can even be a wider form) and this may lead to
|
1580 |
-
type-unsafe results. The main reason is to allow for things like
|
1581 |
-
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
|
1582 |
-
a subtype of the former, since ``List`` is invariant. The responsibility of
|
1583 |
-
writing type-safe type guards is left to the user.
|
1584 |
-
|
1585 |
-
``TypeGuard`` also works with type variables. For more information, see
|
1586 |
-
PEP 647 (User-Defined Type Guards).
|
1587 |
-
""")
|
1588 |
-
|
1589 |
-
|
1590 |
-
# Vendored from cpython typing._SpecialFrom
|
1591 |
-
class _SpecialForm(typing._Final, _root=True):
|
1592 |
-
__slots__ = ('_name', '__doc__', '_getitem')
|
1593 |
-
|
1594 |
-
def __init__(self, getitem):
|
1595 |
-
self._getitem = getitem
|
1596 |
-
self._name = getitem.__name__
|
1597 |
-
self.__doc__ = getitem.__doc__
|
1598 |
-
|
1599 |
-
def __getattr__(self, item):
|
1600 |
-
if item in {'__name__', '__qualname__'}:
|
1601 |
-
return self._name
|
1602 |
-
|
1603 |
-
raise AttributeError(item)
|
1604 |
-
|
1605 |
-
def __mro_entries__(self, bases):
|
1606 |
-
raise TypeError(f"Cannot subclass {self!r}")
|
1607 |
-
|
1608 |
-
def __repr__(self):
|
1609 |
-
return f'typing_extensions.{self._name}'
|
1610 |
-
|
1611 |
-
def __reduce__(self):
|
1612 |
-
return self._name
|
1613 |
-
|
1614 |
-
def __call__(self, *args, **kwds):
|
1615 |
-
raise TypeError(f"Cannot instantiate {self!r}")
|
1616 |
-
|
1617 |
-
def __or__(self, other):
|
1618 |
-
return typing.Union[self, other]
|
1619 |
-
|
1620 |
-
def __ror__(self, other):
|
1621 |
-
return typing.Union[other, self]
|
1622 |
-
|
1623 |
-
def __instancecheck__(self, obj):
|
1624 |
-
raise TypeError(f"{self} cannot be used with isinstance()")
|
1625 |
-
|
1626 |
-
def __subclasscheck__(self, cls):
|
1627 |
-
raise TypeError(f"{self} cannot be used with issubclass()")
|
1628 |
-
|
1629 |
-
@typing._tp_cache
|
1630 |
-
def __getitem__(self, parameters):
|
1631 |
-
return self._getitem(self, parameters)
|
1632 |
-
|
1633 |
-
|
1634 |
-
if hasattr(typing, "LiteralString"):
|
1635 |
-
LiteralString = typing.LiteralString
|
1636 |
-
else:
|
1637 |
-
@_SpecialForm
|
1638 |
-
def LiteralString(self, params):
|
1639 |
-
"""Represents an arbitrary literal string.
|
1640 |
-
|
1641 |
-
Example::
|
1642 |
-
|
1643 |
-
from pip._vendor.typing_extensions import LiteralString
|
1644 |
-
|
1645 |
-
def query(sql: LiteralString) -> ...:
|
1646 |
-
...
|
1647 |
-
|
1648 |
-
query("SELECT * FROM table") # ok
|
1649 |
-
query(f"SELECT * FROM {input()}") # not ok
|
1650 |
-
|
1651 |
-
See PEP 675 for details.
|
1652 |
-
|
1653 |
-
"""
|
1654 |
-
raise TypeError(f"{self} is not subscriptable")
|
1655 |
-
|
1656 |
-
|
1657 |
-
if hasattr(typing, "Self"):
|
1658 |
-
Self = typing.Self
|
1659 |
-
else:
|
1660 |
-
@_SpecialForm
|
1661 |
-
def Self(self, params):
|
1662 |
-
"""Used to spell the type of "self" in classes.
|
1663 |
-
|
1664 |
-
Example::
|
1665 |
-
|
1666 |
-
from typing import Self
|
1667 |
-
|
1668 |
-
class ReturnsSelf:
|
1669 |
-
def parse(self, data: bytes) -> Self:
|
1670 |
-
...
|
1671 |
-
return self
|
1672 |
-
|
1673 |
-
"""
|
1674 |
-
|
1675 |
-
raise TypeError(f"{self} is not subscriptable")
|
1676 |
-
|
1677 |
-
|
1678 |
-
if hasattr(typing, "Never"):
|
1679 |
-
Never = typing.Never
|
1680 |
-
else:
|
1681 |
-
@_SpecialForm
|
1682 |
-
def Never(self, params):
|
1683 |
-
"""The bottom type, a type that has no members.
|
1684 |
-
|
1685 |
-
This can be used to define a function that should never be
|
1686 |
-
called, or a function that never returns::
|
1687 |
-
|
1688 |
-
from pip._vendor.typing_extensions import Never
|
1689 |
-
|
1690 |
-
def never_call_me(arg: Never) -> None:
|
1691 |
-
pass
|
1692 |
-
|
1693 |
-
def int_or_str(arg: int | str) -> None:
|
1694 |
-
never_call_me(arg) # type checker error
|
1695 |
-
match arg:
|
1696 |
-
case int():
|
1697 |
-
print("It's an int")
|
1698 |
-
case str():
|
1699 |
-
print("It's a str")
|
1700 |
-
case _:
|
1701 |
-
never_call_me(arg) # ok, arg is of type Never
|
1702 |
-
|
1703 |
-
"""
|
1704 |
-
|
1705 |
-
raise TypeError(f"{self} is not subscriptable")
|
1706 |
-
|
1707 |
-
|
1708 |
-
if hasattr(typing, 'Required'):
|
1709 |
-
Required = typing.Required
|
1710 |
-
NotRequired = typing.NotRequired
|
1711 |
-
elif sys.version_info[:2] >= (3, 9):
|
1712 |
-
class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
|
1713 |
-
def __repr__(self):
|
1714 |
-
return 'typing_extensions.' + self._name
|
1715 |
-
|
1716 |
-
@_ExtensionsSpecialForm
|
1717 |
-
def Required(self, parameters):
|
1718 |
-
"""A special typing construct to mark a key of a total=False TypedDict
|
1719 |
-
as required. For example:
|
1720 |
-
|
1721 |
-
class Movie(TypedDict, total=False):
|
1722 |
-
title: Required[str]
|
1723 |
-
year: int
|
1724 |
-
|
1725 |
-
m = Movie(
|
1726 |
-
title='The Matrix', # typechecker error if key is omitted
|
1727 |
-
year=1999,
|
1728 |
-
)
|
1729 |
-
|
1730 |
-
There is no runtime checking that a required key is actually provided
|
1731 |
-
when instantiating a related TypedDict.
|
1732 |
-
"""
|
1733 |
-
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
|
1734 |
-
return typing._GenericAlias(self, (item,))
|
1735 |
-
|
1736 |
-
@_ExtensionsSpecialForm
|
1737 |
-
def NotRequired(self, parameters):
|
1738 |
-
"""A special typing construct to mark a key of a TypedDict as
|
1739 |
-
potentially missing. For example:
|
1740 |
-
|
1741 |
-
class Movie(TypedDict):
|
1742 |
-
title: str
|
1743 |
-
year: NotRequired[int]
|
1744 |
-
|
1745 |
-
m = Movie(
|
1746 |
-
title='The Matrix', # typechecker error if key is omitted
|
1747 |
-
year=1999,
|
1748 |
-
)
|
1749 |
-
"""
|
1750 |
-
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
|
1751 |
-
return typing._GenericAlias(self, (item,))
|
1752 |
-
|
1753 |
-
else:
|
1754 |
-
class _RequiredForm(typing._SpecialForm, _root=True):
|
1755 |
-
def __repr__(self):
|
1756 |
-
return 'typing_extensions.' + self._name
|
1757 |
-
|
1758 |
-
def __getitem__(self, parameters):
|
1759 |
-
item = typing._type_check(parameters,
|
1760 |
-
f'{self._name} accepts only a single type.')
|
1761 |
-
return typing._GenericAlias(self, (item,))
|
1762 |
-
|
1763 |
-
Required = _RequiredForm(
|
1764 |
-
'Required',
|
1765 |
-
doc="""A special typing construct to mark a key of a total=False TypedDict
|
1766 |
-
as required. For example:
|
1767 |
-
|
1768 |
-
class Movie(TypedDict, total=False):
|
1769 |
-
title: Required[str]
|
1770 |
-
year: int
|
1771 |
-
|
1772 |
-
m = Movie(
|
1773 |
-
title='The Matrix', # typechecker error if key is omitted
|
1774 |
-
year=1999,
|
1775 |
-
)
|
1776 |
-
|
1777 |
-
There is no runtime checking that a required key is actually provided
|
1778 |
-
when instantiating a related TypedDict.
|
1779 |
-
""")
|
1780 |
-
NotRequired = _RequiredForm(
|
1781 |
-
'NotRequired',
|
1782 |
-
doc="""A special typing construct to mark a key of a TypedDict as
|
1783 |
-
potentially missing. For example:
|
1784 |
-
|
1785 |
-
class Movie(TypedDict):
|
1786 |
-
title: str
|
1787 |
-
year: NotRequired[int]
|
1788 |
-
|
1789 |
-
m = Movie(
|
1790 |
-
title='The Matrix', # typechecker error if key is omitted
|
1791 |
-
year=1999,
|
1792 |
-
)
|
1793 |
-
""")
|
1794 |
-
|
1795 |
-
|
1796 |
-
if hasattr(typing, "Unpack"): # 3.11+
|
1797 |
-
Unpack = typing.Unpack
|
1798 |
-
elif sys.version_info[:2] >= (3, 9):
|
1799 |
-
class _UnpackSpecialForm(typing._SpecialForm, _root=True):
|
1800 |
-
def __repr__(self):
|
1801 |
-
return 'typing_extensions.' + self._name
|
1802 |
-
|
1803 |
-
class _UnpackAlias(typing._GenericAlias, _root=True):
|
1804 |
-
__class__ = typing.TypeVar
|
1805 |
-
|
1806 |
-
@_UnpackSpecialForm
|
1807 |
-
def Unpack(self, parameters):
|
1808 |
-
"""A special typing construct to unpack a variadic type. For example:
|
1809 |
-
|
1810 |
-
Shape = TypeVarTuple('Shape')
|
1811 |
-
Batch = NewType('Batch', int)
|
1812 |
-
|
1813 |
-
def add_batch_axis(
|
1814 |
-
x: Array[Unpack[Shape]]
|
1815 |
-
) -> Array[Batch, Unpack[Shape]]: ...
|
1816 |
-
|
1817 |
-
"""
|
1818 |
-
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
|
1819 |
-
return _UnpackAlias(self, (item,))
|
1820 |
-
|
1821 |
-
def _is_unpack(obj):
|
1822 |
-
return isinstance(obj, _UnpackAlias)
|
1823 |
-
|
1824 |
-
else:
|
1825 |
-
class _UnpackAlias(typing._GenericAlias, _root=True):
|
1826 |
-
__class__ = typing.TypeVar
|
1827 |
-
|
1828 |
-
class _UnpackForm(typing._SpecialForm, _root=True):
|
1829 |
-
def __repr__(self):
|
1830 |
-
return 'typing_extensions.' + self._name
|
1831 |
-
|
1832 |
-
def __getitem__(self, parameters):
|
1833 |
-
item = typing._type_check(parameters,
|
1834 |
-
f'{self._name} accepts only a single type.')
|
1835 |
-
return _UnpackAlias(self, (item,))
|
1836 |
-
|
1837 |
-
Unpack = _UnpackForm(
|
1838 |
-
'Unpack',
|
1839 |
-
doc="""A special typing construct to unpack a variadic type. For example:
|
1840 |
-
|
1841 |
-
Shape = TypeVarTuple('Shape')
|
1842 |
-
Batch = NewType('Batch', int)
|
1843 |
-
|
1844 |
-
def add_batch_axis(
|
1845 |
-
x: Array[Unpack[Shape]]
|
1846 |
-
) -> Array[Batch, Unpack[Shape]]: ...
|
1847 |
-
|
1848 |
-
""")
|
1849 |
-
|
1850 |
-
def _is_unpack(obj):
|
1851 |
-
return isinstance(obj, _UnpackAlias)
|
1852 |
-
|
1853 |
-
|
1854 |
-
if hasattr(typing, "TypeVarTuple"): # 3.11+
|
1855 |
-
|
1856 |
-
# Add default Parameter - PEP 696
|
1857 |
-
class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
|
1858 |
-
"""Type variable tuple."""
|
1859 |
-
|
1860 |
-
def __init__(self, name, *, default=_marker):
|
1861 |
-
super().__init__(name)
|
1862 |
-
_DefaultMixin.__init__(self, default)
|
1863 |
-
|
1864 |
-
# for pickling:
|
1865 |
-
try:
|
1866 |
-
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
|
1867 |
-
except (AttributeError, ValueError):
|
1868 |
-
def_mod = None
|
1869 |
-
if def_mod != 'typing_extensions':
|
1870 |
-
self.__module__ = def_mod
|
1871 |
-
|
1872 |
-
else:
|
1873 |
-
class TypeVarTuple(_DefaultMixin):
|
1874 |
-
"""Type variable tuple.
|
1875 |
-
|
1876 |
-
Usage::
|
1877 |
-
|
1878 |
-
Ts = TypeVarTuple('Ts')
|
1879 |
-
|
1880 |
-
In the same way that a normal type variable is a stand-in for a single
|
1881 |
-
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
|
1882 |
-
type such as ``Tuple[int, str]``.
|
1883 |
-
|
1884 |
-
Type variable tuples can be used in ``Generic`` declarations.
|
1885 |
-
Consider the following example::
|
1886 |
-
|
1887 |
-
class Array(Generic[*Ts]): ...
|
1888 |
-
|
1889 |
-
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
|
1890 |
-
where ``T1`` and ``T2`` are type variables. To use these type variables
|
1891 |
-
as type parameters of ``Array``, we must *unpack* the type variable tuple using
|
1892 |
-
the star operator: ``*Ts``. The signature of ``Array`` then behaves
|
1893 |
-
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
|
1894 |
-
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
|
1895 |
-
us to parameterise the class with an *arbitrary* number of type parameters.
|
1896 |
-
|
1897 |
-
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
|
1898 |
-
This includes class definitions, as shown above, as well as function
|
1899 |
-
signatures and variable annotations::
|
1900 |
-
|
1901 |
-
class Array(Generic[*Ts]):
|
1902 |
-
|
1903 |
-
def __init__(self, shape: Tuple[*Ts]):
|
1904 |
-
self._shape: Tuple[*Ts] = shape
|
1905 |
-
|
1906 |
-
def get_shape(self) -> Tuple[*Ts]:
|
1907 |
-
return self._shape
|
1908 |
-
|
1909 |
-
shape = (Height(480), Width(640))
|
1910 |
-
x: Array[Height, Width] = Array(shape)
|
1911 |
-
y = abs(x) # Inferred type is Array[Height, Width]
|
1912 |
-
z = x + x # ... is Array[Height, Width]
|
1913 |
-
x.get_shape() # ... is tuple[Height, Width]
|
1914 |
-
|
1915 |
-
"""
|
1916 |
-
|
1917 |
-
# Trick Generic __parameters__.
|
1918 |
-
__class__ = typing.TypeVar
|
1919 |
-
|
1920 |
-
def __iter__(self):
|
1921 |
-
yield self.__unpacked__
|
1922 |
-
|
1923 |
-
def __init__(self, name, *, default=_marker):
|
1924 |
-
self.__name__ = name
|
1925 |
-
_DefaultMixin.__init__(self, default)
|
1926 |
-
|
1927 |
-
# for pickling:
|
1928 |
-
try:
|
1929 |
-
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
|
1930 |
-
except (AttributeError, ValueError):
|
1931 |
-
def_mod = None
|
1932 |
-
if def_mod != 'typing_extensions':
|
1933 |
-
self.__module__ = def_mod
|
1934 |
-
|
1935 |
-
self.__unpacked__ = Unpack[self]
|
1936 |
-
|
1937 |
-
def __repr__(self):
|
1938 |
-
return self.__name__
|
1939 |
-
|
1940 |
-
def __hash__(self):
|
1941 |
-
return object.__hash__(self)
|
1942 |
-
|
1943 |
-
def __eq__(self, other):
|
1944 |
-
return self is other
|
1945 |
-
|
1946 |
-
def __reduce__(self):
|
1947 |
-
return self.__name__
|
1948 |
-
|
1949 |
-
def __init_subclass__(self, *args, **kwds):
|
1950 |
-
if '_root' not in kwds:
|
1951 |
-
raise TypeError("Cannot subclass special typing classes")
|
1952 |
-
|
1953 |
-
|
1954 |
-
if hasattr(typing, "reveal_type"):
|
1955 |
-
reveal_type = typing.reveal_type
|
1956 |
-
else:
|
1957 |
-
def reveal_type(__obj: T) -> T:
|
1958 |
-
"""Reveal the inferred type of a variable.
|
1959 |
-
|
1960 |
-
When a static type checker encounters a call to ``reveal_type()``,
|
1961 |
-
it will emit the inferred type of the argument::
|
1962 |
-
|
1963 |
-
x: int = 1
|
1964 |
-
reveal_type(x)
|
1965 |
-
|
1966 |
-
Running a static type checker (e.g., ``mypy``) on this example
|
1967 |
-
will produce output similar to 'Revealed type is "builtins.int"'.
|
1968 |
-
|
1969 |
-
At runtime, the function prints the runtime type of the
|
1970 |
-
argument and returns it unchanged.
|
1971 |
-
|
1972 |
-
"""
|
1973 |
-
print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
|
1974 |
-
return __obj
|
1975 |
-
|
1976 |
-
|
1977 |
-
if hasattr(typing, "assert_never"):
|
1978 |
-
assert_never = typing.assert_never
|
1979 |
-
else:
|
1980 |
-
def assert_never(__arg: Never) -> Never:
|
1981 |
-
"""Assert to the type checker that a line of code is unreachable.
|
1982 |
-
|
1983 |
-
Example::
|
1984 |
-
|
1985 |
-
def int_or_str(arg: int | str) -> None:
|
1986 |
-
match arg:
|
1987 |
-
case int():
|
1988 |
-
print("It's an int")
|
1989 |
-
case str():
|
1990 |
-
print("It's a str")
|
1991 |
-
case _:
|
1992 |
-
assert_never(arg)
|
1993 |
-
|
1994 |
-
If a type checker finds that a call to assert_never() is
|
1995 |
-
reachable, it will emit an error.
|
1996 |
-
|
1997 |
-
At runtime, this throws an exception when called.
|
1998 |
-
|
1999 |
-
"""
|
2000 |
-
raise AssertionError("Expected code to be unreachable")
|
2001 |
-
|
2002 |
-
|
2003 |
-
if sys.version_info >= (3, 12):
|
2004 |
-
# dataclass_transform exists in 3.11 but lacks the frozen_default parameter
|
2005 |
-
dataclass_transform = typing.dataclass_transform
|
2006 |
-
else:
|
2007 |
-
def dataclass_transform(
|
2008 |
-
*,
|
2009 |
-
eq_default: bool = True,
|
2010 |
-
order_default: bool = False,
|
2011 |
-
kw_only_default: bool = False,
|
2012 |
-
frozen_default: bool = False,
|
2013 |
-
field_specifiers: typing.Tuple[
|
2014 |
-
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
|
2015 |
-
...
|
2016 |
-
] = (),
|
2017 |
-
**kwargs: typing.Any,
|
2018 |
-
) -> typing.Callable[[T], T]:
|
2019 |
-
"""Decorator that marks a function, class, or metaclass as providing
|
2020 |
-
dataclass-like behavior.
|
2021 |
-
|
2022 |
-
Example:
|
2023 |
-
|
2024 |
-
from pip._vendor.typing_extensions import dataclass_transform
|
2025 |
-
|
2026 |
-
_T = TypeVar("_T")
|
2027 |
-
|
2028 |
-
# Used on a decorator function
|
2029 |
-
@dataclass_transform()
|
2030 |
-
def create_model(cls: type[_T]) -> type[_T]:
|
2031 |
-
...
|
2032 |
-
return cls
|
2033 |
-
|
2034 |
-
@create_model
|
2035 |
-
class CustomerModel:
|
2036 |
-
id: int
|
2037 |
-
name: str
|
2038 |
-
|
2039 |
-
# Used on a base class
|
2040 |
-
@dataclass_transform()
|
2041 |
-
class ModelBase: ...
|
2042 |
-
|
2043 |
-
class CustomerModel(ModelBase):
|
2044 |
-
id: int
|
2045 |
-
name: str
|
2046 |
-
|
2047 |
-
# Used on a metaclass
|
2048 |
-
@dataclass_transform()
|
2049 |
-
class ModelMeta(type): ...
|
2050 |
-
|
2051 |
-
class ModelBase(metaclass=ModelMeta): ...
|
2052 |
-
|
2053 |
-
class CustomerModel(ModelBase):
|
2054 |
-
id: int
|
2055 |
-
name: str
|
2056 |
-
|
2057 |
-
Each of the ``CustomerModel`` classes defined in this example will now
|
2058 |
-
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
|
2059 |
-
decorator. For example, the type checker will synthesize an ``__init__``
|
2060 |
-
method.
|
2061 |
-
|
2062 |
-
The arguments to this decorator can be used to customize this behavior:
|
2063 |
-
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
|
2064 |
-
True or False if it is omitted by the caller.
|
2065 |
-
- ``order_default`` indicates whether the ``order`` parameter is
|
2066 |
-
assumed to be True or False if it is omitted by the caller.
|
2067 |
-
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
|
2068 |
-
assumed to be True or False if it is omitted by the caller.
|
2069 |
-
- ``frozen_default`` indicates whether the ``frozen`` parameter is
|
2070 |
-
assumed to be True or False if it is omitted by the caller.
|
2071 |
-
- ``field_specifiers`` specifies a static list of supported classes
|
2072 |
-
or functions that describe fields, similar to ``dataclasses.field()``.
|
2073 |
-
|
2074 |
-
At runtime, this decorator records its arguments in the
|
2075 |
-
``__dataclass_transform__`` attribute on the decorated object.
|
2076 |
-
|
2077 |
-
See PEP 681 for details.
|
2078 |
-
|
2079 |
-
"""
|
2080 |
-
def decorator(cls_or_fn):
|
2081 |
-
cls_or_fn.__dataclass_transform__ = {
|
2082 |
-
"eq_default": eq_default,
|
2083 |
-
"order_default": order_default,
|
2084 |
-
"kw_only_default": kw_only_default,
|
2085 |
-
"frozen_default": frozen_default,
|
2086 |
-
"field_specifiers": field_specifiers,
|
2087 |
-
"kwargs": kwargs,
|
2088 |
-
}
|
2089 |
-
return cls_or_fn
|
2090 |
-
return decorator
|
2091 |
-
|
2092 |
-
|
2093 |
-
if hasattr(typing, "override"):
|
2094 |
-
override = typing.override
|
2095 |
-
else:
|
2096 |
-
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
|
2097 |
-
|
2098 |
-
def override(__arg: _F) -> _F:
|
2099 |
-
"""Indicate that a method is intended to override a method in a base class.
|
2100 |
-
|
2101 |
-
Usage:
|
2102 |
-
|
2103 |
-
class Base:
|
2104 |
-
def method(self) -> None: ...
|
2105 |
-
pass
|
2106 |
-
|
2107 |
-
class Child(Base):
|
2108 |
-
@override
|
2109 |
-
def method(self) -> None:
|
2110 |
-
super().method()
|
2111 |
-
|
2112 |
-
When this decorator is applied to a method, the type checker will
|
2113 |
-
validate that it overrides a method with the same name on a base class.
|
2114 |
-
This helps prevent bugs that may occur when a base class is changed
|
2115 |
-
without an equivalent change to a child class.
|
2116 |
-
|
2117 |
-
There is no runtime checking of these properties. The decorator
|
2118 |
-
sets the ``__override__`` attribute to ``True`` on the decorated object
|
2119 |
-
to allow runtime introspection.
|
2120 |
-
|
2121 |
-
See PEP 698 for details.
|
2122 |
-
|
2123 |
-
"""
|
2124 |
-
try:
|
2125 |
-
__arg.__override__ = True
|
2126 |
-
except (AttributeError, TypeError):
|
2127 |
-
# Skip the attribute silently if it is not writable.
|
2128 |
-
# AttributeError happens if the object has __slots__ or a
|
2129 |
-
# read-only property, TypeError if it's a builtin class.
|
2130 |
-
pass
|
2131 |
-
return __arg
|
2132 |
-
|
2133 |
-
|
2134 |
-
if hasattr(typing, "deprecated"):
|
2135 |
-
deprecated = typing.deprecated
|
2136 |
-
else:
|
2137 |
-
_T = typing.TypeVar("_T")
|
2138 |
-
|
2139 |
-
def deprecated(
|
2140 |
-
__msg: str,
|
2141 |
-
*,
|
2142 |
-
category: typing.Optional[typing.Type[Warning]] = DeprecationWarning,
|
2143 |
-
stacklevel: int = 1,
|
2144 |
-
) -> typing.Callable[[_T], _T]:
|
2145 |
-
"""Indicate that a class, function or overload is deprecated.
|
2146 |
-
|
2147 |
-
Usage:
|
2148 |
-
|
2149 |
-
@deprecated("Use B instead")
|
2150 |
-
class A:
|
2151 |
-
pass
|
2152 |
-
|
2153 |
-
@deprecated("Use g instead")
|
2154 |
-
def f():
|
2155 |
-
pass
|
2156 |
-
|
2157 |
-
@overload
|
2158 |
-
@deprecated("int support is deprecated")
|
2159 |
-
def g(x: int) -> int: ...
|
2160 |
-
@overload
|
2161 |
-
def g(x: str) -> int: ...
|
2162 |
-
|
2163 |
-
When this decorator is applied to an object, the type checker
|
2164 |
-
will generate a diagnostic on usage of the deprecated object.
|
2165 |
-
|
2166 |
-
No runtime warning is issued. The decorator sets the ``__deprecated__``
|
2167 |
-
attribute on the decorated object to the deprecation message
|
2168 |
-
passed to the decorator. If applied to an overload, the decorator
|
2169 |
-
must be after the ``@overload`` decorator for the attribute to
|
2170 |
-
exist on the overload as returned by ``get_overloads()``.
|
2171 |
-
|
2172 |
-
See PEP 702 for details.
|
2173 |
-
|
2174 |
-
"""
|
2175 |
-
def decorator(__arg: _T) -> _T:
|
2176 |
-
if category is None:
|
2177 |
-
__arg.__deprecated__ = __msg
|
2178 |
-
return __arg
|
2179 |
-
elif isinstance(__arg, type):
|
2180 |
-
original_new = __arg.__new__
|
2181 |
-
has_init = __arg.__init__ is not object.__init__
|
2182 |
-
|
2183 |
-
@functools.wraps(original_new)
|
2184 |
-
def __new__(cls, *args, **kwargs):
|
2185 |
-
warnings.warn(__msg, category=category, stacklevel=stacklevel + 1)
|
2186 |
-
# Mirrors a similar check in object.__new__.
|
2187 |
-
if not has_init and (args or kwargs):
|
2188 |
-
raise TypeError(f"{cls.__name__}() takes no arguments")
|
2189 |
-
if original_new is not object.__new__:
|
2190 |
-
return original_new(cls, *args, **kwargs)
|
2191 |
-
else:
|
2192 |
-
return original_new(cls)
|
2193 |
-
|
2194 |
-
__arg.__new__ = staticmethod(__new__)
|
2195 |
-
__arg.__deprecated__ = __new__.__deprecated__ = __msg
|
2196 |
-
return __arg
|
2197 |
-
elif callable(__arg):
|
2198 |
-
@functools.wraps(__arg)
|
2199 |
-
def wrapper(*args, **kwargs):
|
2200 |
-
warnings.warn(__msg, category=category, stacklevel=stacklevel + 1)
|
2201 |
-
return __arg(*args, **kwargs)
|
2202 |
-
|
2203 |
-
__arg.__deprecated__ = wrapper.__deprecated__ = __msg
|
2204 |
-
return wrapper
|
2205 |
-
else:
|
2206 |
-
raise TypeError(
|
2207 |
-
"@deprecated decorator with non-None category must be applied to "
|
2208 |
-
f"a class or callable, not {__arg!r}"
|
2209 |
-
)
|
2210 |
-
|
2211 |
-
return decorator
|
2212 |
-
|
2213 |
-
|
2214 |
-
# We have to do some monkey patching to deal with the dual nature of
|
2215 |
-
# Unpack/TypeVarTuple:
|
2216 |
-
# - We want Unpack to be a kind of TypeVar so it gets accepted in
|
2217 |
-
# Generic[Unpack[Ts]]
|
2218 |
-
# - We want it to *not* be treated as a TypeVar for the purposes of
|
2219 |
-
# counting generic parameters, so that when we subscript a generic,
|
2220 |
-
# the runtime doesn't try to substitute the Unpack with the subscripted type.
|
2221 |
-
if not hasattr(typing, "TypeVarTuple"):
|
2222 |
-
typing._collect_type_vars = _collect_type_vars
|
2223 |
-
typing._check_generic = _check_generic
|
2224 |
-
|
2225 |
-
|
2226 |
-
# Backport typing.NamedTuple as it exists in Python 3.11.
|
2227 |
-
# In 3.11, the ability to define generic `NamedTuple`s was supported.
|
2228 |
-
# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
|
2229 |
-
if sys.version_info >= (3, 11):
|
2230 |
-
NamedTuple = typing.NamedTuple
|
2231 |
-
else:
|
2232 |
-
def _caller():
|
2233 |
-
try:
|
2234 |
-
return sys._getframe(2).f_globals.get('__name__', '__main__')
|
2235 |
-
except (AttributeError, ValueError): # For platforms without _getframe()
|
2236 |
-
return None
|
2237 |
-
|
2238 |
-
def _make_nmtuple(name, types, module, defaults=()):
|
2239 |
-
fields = [n for n, t in types]
|
2240 |
-
annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
|
2241 |
-
for n, t in types}
|
2242 |
-
nm_tpl = collections.namedtuple(name, fields,
|
2243 |
-
defaults=defaults, module=module)
|
2244 |
-
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
|
2245 |
-
# The `_field_types` attribute was removed in 3.9;
|
2246 |
-
# in earlier versions, it is the same as the `__annotations__` attribute
|
2247 |
-
if sys.version_info < (3, 9):
|
2248 |
-
nm_tpl._field_types = annotations
|
2249 |
-
return nm_tpl
|
2250 |
-
|
2251 |
-
_prohibited_namedtuple_fields = typing._prohibited
|
2252 |
-
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
|
2253 |
-
|
2254 |
-
class _NamedTupleMeta(type):
|
2255 |
-
def __new__(cls, typename, bases, ns):
|
2256 |
-
assert _NamedTuple in bases
|
2257 |
-
for base in bases:
|
2258 |
-
if base is not _NamedTuple and base is not typing.Generic:
|
2259 |
-
raise TypeError(
|
2260 |
-
'can only inherit from a NamedTuple type and Generic')
|
2261 |
-
bases = tuple(tuple if base is _NamedTuple else base for base in bases)
|
2262 |
-
types = ns.get('__annotations__', {})
|
2263 |
-
default_names = []
|
2264 |
-
for field_name in types:
|
2265 |
-
if field_name in ns:
|
2266 |
-
default_names.append(field_name)
|
2267 |
-
elif default_names:
|
2268 |
-
raise TypeError(f"Non-default namedtuple field {field_name} "
|
2269 |
-
f"cannot follow default field"
|
2270 |
-
f"{'s' if len(default_names) > 1 else ''} "
|
2271 |
-
f"{', '.join(default_names)}")
|
2272 |
-
nm_tpl = _make_nmtuple(
|
2273 |
-
typename, types.items(),
|
2274 |
-
defaults=[ns[n] for n in default_names],
|
2275 |
-
module=ns['__module__']
|
2276 |
-
)
|
2277 |
-
nm_tpl.__bases__ = bases
|
2278 |
-
if typing.Generic in bases:
|
2279 |
-
class_getitem = typing.Generic.__class_getitem__.__func__
|
2280 |
-
nm_tpl.__class_getitem__ = classmethod(class_getitem)
|
2281 |
-
# update from user namespace without overriding special namedtuple attributes
|
2282 |
-
for key in ns:
|
2283 |
-
if key in _prohibited_namedtuple_fields:
|
2284 |
-
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
|
2285 |
-
elif key not in _special_namedtuple_fields and key not in nm_tpl._fields:
|
2286 |
-
setattr(nm_tpl, key, ns[key])
|
2287 |
-
if typing.Generic in bases:
|
2288 |
-
nm_tpl.__init_subclass__()
|
2289 |
-
return nm_tpl
|
2290 |
-
|
2291 |
-
def NamedTuple(__typename, __fields=None, **kwargs):
|
2292 |
-
if __fields is None:
|
2293 |
-
__fields = kwargs.items()
|
2294 |
-
elif kwargs:
|
2295 |
-
raise TypeError("Either list of fields or keywords"
|
2296 |
-
" can be provided to NamedTuple, not both")
|
2297 |
-
return _make_nmtuple(__typename, __fields, module=_caller())
|
2298 |
-
|
2299 |
-
NamedTuple.__doc__ = typing.NamedTuple.__doc__
|
2300 |
-
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
|
2301 |
-
|
2302 |
-
# On 3.8+, alter the signature so that it matches typing.NamedTuple.
|
2303 |
-
# The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
|
2304 |
-
# so just leave the signature as it is on 3.7.
|
2305 |
-
if sys.version_info >= (3, 8):
|
2306 |
-
NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
|
2307 |
-
|
2308 |
-
def _namedtuple_mro_entries(bases):
|
2309 |
-
assert NamedTuple in bases
|
2310 |
-
return (_NamedTuple,)
|
2311 |
-
|
2312 |
-
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/results.py
DELETED
@@ -1,760 +0,0 @@
|
|
1 |
-
# results.py
|
2 |
-
from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator
|
3 |
-
import pprint
|
4 |
-
from weakref import ref as wkref
|
5 |
-
from typing import Tuple, Any
|
6 |
-
|
7 |
-
str_type: Tuple[type, ...] = (str, bytes)
|
8 |
-
_generator_type = type((_ for _ in ()))
|
9 |
-
|
10 |
-
|
11 |
-
class _ParseResultsWithOffset:
|
12 |
-
__slots__ = ["tup"]
|
13 |
-
|
14 |
-
def __init__(self, p1, p2):
|
15 |
-
self.tup = (p1, p2)
|
16 |
-
|
17 |
-
def __getitem__(self, i):
|
18 |
-
return self.tup[i]
|
19 |
-
|
20 |
-
def __getstate__(self):
|
21 |
-
return self.tup
|
22 |
-
|
23 |
-
def __setstate__(self, *args):
|
24 |
-
self.tup = args[0]
|
25 |
-
|
26 |
-
|
27 |
-
class ParseResults:
|
28 |
-
"""Structured parse results, to provide multiple means of access to
|
29 |
-
the parsed data:
|
30 |
-
|
31 |
-
- as a list (``len(results)``)
|
32 |
-
- by list index (``results[0], results[1]``, etc.)
|
33 |
-
- by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
|
34 |
-
|
35 |
-
Example::
|
36 |
-
|
37 |
-
integer = Word(nums)
|
38 |
-
date_str = (integer.set_results_name("year") + '/'
|
39 |
-
+ integer.set_results_name("month") + '/'
|
40 |
-
+ integer.set_results_name("day"))
|
41 |
-
# equivalent form:
|
42 |
-
# date_str = (integer("year") + '/'
|
43 |
-
# + integer("month") + '/'
|
44 |
-
# + integer("day"))
|
45 |
-
|
46 |
-
# parse_string returns a ParseResults object
|
47 |
-
result = date_str.parse_string("1999/12/31")
|
48 |
-
|
49 |
-
def test(s, fn=repr):
|
50 |
-
print("{} -> {}".format(s, fn(eval(s))))
|
51 |
-
test("list(result)")
|
52 |
-
test("result[0]")
|
53 |
-
test("result['month']")
|
54 |
-
test("result.day")
|
55 |
-
test("'month' in result")
|
56 |
-
test("'minutes' in result")
|
57 |
-
test("result.dump()", str)
|
58 |
-
|
59 |
-
prints::
|
60 |
-
|
61 |
-
list(result) -> ['1999', '/', '12', '/', '31']
|
62 |
-
result[0] -> '1999'
|
63 |
-
result['month'] -> '12'
|
64 |
-
result.day -> '31'
|
65 |
-
'month' in result -> True
|
66 |
-
'minutes' in result -> False
|
67 |
-
result.dump() -> ['1999', '/', '12', '/', '31']
|
68 |
-
- day: '31'
|
69 |
-
- month: '12'
|
70 |
-
- year: '1999'
|
71 |
-
"""
|
72 |
-
|
73 |
-
_null_values: Tuple[Any, ...] = (None, [], "", ())
|
74 |
-
|
75 |
-
__slots__ = [
|
76 |
-
"_name",
|
77 |
-
"_parent",
|
78 |
-
"_all_names",
|
79 |
-
"_modal",
|
80 |
-
"_toklist",
|
81 |
-
"_tokdict",
|
82 |
-
"__weakref__",
|
83 |
-
]
|
84 |
-
|
85 |
-
class List(list):
|
86 |
-
"""
|
87 |
-
Simple wrapper class to distinguish parsed list results that should be preserved
|
88 |
-
as actual Python lists, instead of being converted to :class:`ParseResults`:
|
89 |
-
|
90 |
-
LBRACK, RBRACK = map(pp.Suppress, "[]")
|
91 |
-
element = pp.Forward()
|
92 |
-
item = ppc.integer
|
93 |
-
element_list = LBRACK + pp.delimited_list(element) + RBRACK
|
94 |
-
|
95 |
-
# add parse actions to convert from ParseResults to actual Python collection types
|
96 |
-
def as_python_list(t):
|
97 |
-
return pp.ParseResults.List(t.as_list())
|
98 |
-
element_list.add_parse_action(as_python_list)
|
99 |
-
|
100 |
-
element <<= item | element_list
|
101 |
-
|
102 |
-
element.run_tests('''
|
103 |
-
100
|
104 |
-
[2,3,4]
|
105 |
-
[[2, 1],3,4]
|
106 |
-
[(2, 1),3,4]
|
107 |
-
(2,3,4)
|
108 |
-
''', post_parse=lambda s, r: (r[0], type(r[0])))
|
109 |
-
|
110 |
-
prints:
|
111 |
-
|
112 |
-
100
|
113 |
-
(100, <class 'int'>)
|
114 |
-
|
115 |
-
[2,3,4]
|
116 |
-
([2, 3, 4], <class 'list'>)
|
117 |
-
|
118 |
-
[[2, 1],3,4]
|
119 |
-
([[2, 1], 3, 4], <class 'list'>)
|
120 |
-
|
121 |
-
(Used internally by :class:`Group` when `aslist=True`.)
|
122 |
-
"""
|
123 |
-
|
124 |
-
def __new__(cls, contained=None):
|
125 |
-
if contained is None:
|
126 |
-
contained = []
|
127 |
-
|
128 |
-
if not isinstance(contained, list):
|
129 |
-
raise TypeError(
|
130 |
-
"{} may only be constructed with a list,"
|
131 |
-
" not {}".format(cls.__name__, type(contained).__name__)
|
132 |
-
)
|
133 |
-
|
134 |
-
return list.__new__(cls)
|
135 |
-
|
136 |
-
def __new__(cls, toklist=None, name=None, **kwargs):
|
137 |
-
if isinstance(toklist, ParseResults):
|
138 |
-
return toklist
|
139 |
-
self = object.__new__(cls)
|
140 |
-
self._name = None
|
141 |
-
self._parent = None
|
142 |
-
self._all_names = set()
|
143 |
-
|
144 |
-
if toklist is None:
|
145 |
-
self._toklist = []
|
146 |
-
elif isinstance(toklist, (list, _generator_type)):
|
147 |
-
self._toklist = (
|
148 |
-
[toklist[:]]
|
149 |
-
if isinstance(toklist, ParseResults.List)
|
150 |
-
else list(toklist)
|
151 |
-
)
|
152 |
-
else:
|
153 |
-
self._toklist = [toklist]
|
154 |
-
self._tokdict = dict()
|
155 |
-
return self
|
156 |
-
|
157 |
-
# Performance tuning: we construct a *lot* of these, so keep this
|
158 |
-
# constructor as small and fast as possible
|
159 |
-
def __init__(
|
160 |
-
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
|
161 |
-
):
|
162 |
-
self._modal = modal
|
163 |
-
if name is not None and name != "":
|
164 |
-
if isinstance(name, int):
|
165 |
-
name = str(name)
|
166 |
-
if not modal:
|
167 |
-
self._all_names = {name}
|
168 |
-
self._name = name
|
169 |
-
if toklist not in self._null_values:
|
170 |
-
if isinstance(toklist, (str_type, type)):
|
171 |
-
toklist = [toklist]
|
172 |
-
if asList:
|
173 |
-
if isinstance(toklist, ParseResults):
|
174 |
-
self[name] = _ParseResultsWithOffset(
|
175 |
-
ParseResults(toklist._toklist), 0
|
176 |
-
)
|
177 |
-
else:
|
178 |
-
self[name] = _ParseResultsWithOffset(
|
179 |
-
ParseResults(toklist[0]), 0
|
180 |
-
)
|
181 |
-
self[name]._name = name
|
182 |
-
else:
|
183 |
-
try:
|
184 |
-
self[name] = toklist[0]
|
185 |
-
except (KeyError, TypeError, IndexError):
|
186 |
-
if toklist is not self:
|
187 |
-
self[name] = toklist
|
188 |
-
else:
|
189 |
-
self._name = name
|
190 |
-
|
191 |
-
def __getitem__(self, i):
|
192 |
-
if isinstance(i, (int, slice)):
|
193 |
-
return self._toklist[i]
|
194 |
-
else:
|
195 |
-
if i not in self._all_names:
|
196 |
-
return self._tokdict[i][-1][0]
|
197 |
-
else:
|
198 |
-
return ParseResults([v[0] for v in self._tokdict[i]])
|
199 |
-
|
200 |
-
def __setitem__(self, k, v, isinstance=isinstance):
|
201 |
-
if isinstance(v, _ParseResultsWithOffset):
|
202 |
-
self._tokdict[k] = self._tokdict.get(k, list()) + [v]
|
203 |
-
sub = v[0]
|
204 |
-
elif isinstance(k, (int, slice)):
|
205 |
-
self._toklist[k] = v
|
206 |
-
sub = v
|
207 |
-
else:
|
208 |
-
self._tokdict[k] = self._tokdict.get(k, list()) + [
|
209 |
-
_ParseResultsWithOffset(v, 0)
|
210 |
-
]
|
211 |
-
sub = v
|
212 |
-
if isinstance(sub, ParseResults):
|
213 |
-
sub._parent = wkref(self)
|
214 |
-
|
215 |
-
def __delitem__(self, i):
|
216 |
-
if isinstance(i, (int, slice)):
|
217 |
-
mylen = len(self._toklist)
|
218 |
-
del self._toklist[i]
|
219 |
-
|
220 |
-
# convert int to slice
|
221 |
-
if isinstance(i, int):
|
222 |
-
if i < 0:
|
223 |
-
i += mylen
|
224 |
-
i = slice(i, i + 1)
|
225 |
-
# get removed indices
|
226 |
-
removed = list(range(*i.indices(mylen)))
|
227 |
-
removed.reverse()
|
228 |
-
# fixup indices in token dictionary
|
229 |
-
for name, occurrences in self._tokdict.items():
|
230 |
-
for j in removed:
|
231 |
-
for k, (value, position) in enumerate(occurrences):
|
232 |
-
occurrences[k] = _ParseResultsWithOffset(
|
233 |
-
value, position - (position > j)
|
234 |
-
)
|
235 |
-
else:
|
236 |
-
del self._tokdict[i]
|
237 |
-
|
238 |
-
def __contains__(self, k) -> bool:
|
239 |
-
return k in self._tokdict
|
240 |
-
|
241 |
-
def __len__(self) -> int:
|
242 |
-
return len(self._toklist)
|
243 |
-
|
244 |
-
def __bool__(self) -> bool:
|
245 |
-
return not not (self._toklist or self._tokdict)
|
246 |
-
|
247 |
-
def __iter__(self) -> Iterator:
|
248 |
-
return iter(self._toklist)
|
249 |
-
|
250 |
-
def __reversed__(self) -> Iterator:
|
251 |
-
return iter(self._toklist[::-1])
|
252 |
-
|
253 |
-
def keys(self):
|
254 |
-
return iter(self._tokdict)
|
255 |
-
|
256 |
-
def values(self):
|
257 |
-
return (self[k] for k in self.keys())
|
258 |
-
|
259 |
-
def items(self):
|
260 |
-
return ((k, self[k]) for k in self.keys())
|
261 |
-
|
262 |
-
def haskeys(self) -> bool:
|
263 |
-
"""
|
264 |
-
Since ``keys()`` returns an iterator, this method is helpful in bypassing
|
265 |
-
code that looks for the existence of any defined results names."""
|
266 |
-
return bool(self._tokdict)
|
267 |
-
|
268 |
-
def pop(self, *args, **kwargs):
|
269 |
-
"""
|
270 |
-
Removes and returns item at specified index (default= ``last``).
|
271 |
-
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
|
272 |
-
passed no argument or an integer argument, it will use ``list``
|
273 |
-
semantics and pop tokens from the list of parsed tokens. If passed
|
274 |
-
a non-integer argument (most likely a string), it will use ``dict``
|
275 |
-
semantics and pop the corresponding value from any defined results
|
276 |
-
names. A second default return value argument is supported, just as in
|
277 |
-
``dict.pop()``.
|
278 |
-
|
279 |
-
Example::
|
280 |
-
|
281 |
-
numlist = Word(nums)[...]
|
282 |
-
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
283 |
-
|
284 |
-
def remove_first(tokens):
|
285 |
-
tokens.pop(0)
|
286 |
-
numlist.add_parse_action(remove_first)
|
287 |
-
print(numlist.parse_string("0 123 321")) # -> ['123', '321']
|
288 |
-
|
289 |
-
label = Word(alphas)
|
290 |
-
patt = label("LABEL") + Word(nums)[1, ...]
|
291 |
-
print(patt.parse_string("AAB 123 321").dump())
|
292 |
-
|
293 |
-
# Use pop() in a parse action to remove named result (note that corresponding value is not
|
294 |
-
# removed from list form of results)
|
295 |
-
def remove_LABEL(tokens):
|
296 |
-
tokens.pop("LABEL")
|
297 |
-
return tokens
|
298 |
-
patt.add_parse_action(remove_LABEL)
|
299 |
-
print(patt.parse_string("AAB 123 321").dump())
|
300 |
-
|
301 |
-
prints::
|
302 |
-
|
303 |
-
['AAB', '123', '321']
|
304 |
-
- LABEL: 'AAB'
|
305 |
-
|
306 |
-
['AAB', '123', '321']
|
307 |
-
"""
|
308 |
-
if not args:
|
309 |
-
args = [-1]
|
310 |
-
for k, v in kwargs.items():
|
311 |
-
if k == "default":
|
312 |
-
args = (args[0], v)
|
313 |
-
else:
|
314 |
-
raise TypeError(
|
315 |
-
"pop() got an unexpected keyword argument {!r}".format(k)
|
316 |
-
)
|
317 |
-
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
|
318 |
-
index = args[0]
|
319 |
-
ret = self[index]
|
320 |
-
del self[index]
|
321 |
-
return ret
|
322 |
-
else:
|
323 |
-
defaultvalue = args[1]
|
324 |
-
return defaultvalue
|
325 |
-
|
326 |
-
def get(self, key, default_value=None):
|
327 |
-
"""
|
328 |
-
Returns named result matching the given key, or if there is no
|
329 |
-
such name, then returns the given ``default_value`` or ``None`` if no
|
330 |
-
``default_value`` is specified.
|
331 |
-
|
332 |
-
Similar to ``dict.get()``.
|
333 |
-
|
334 |
-
Example::
|
335 |
-
|
336 |
-
integer = Word(nums)
|
337 |
-
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
338 |
-
|
339 |
-
result = date_str.parse_string("1999/12/31")
|
340 |
-
print(result.get("year")) # -> '1999'
|
341 |
-
print(result.get("hour", "not specified")) # -> 'not specified'
|
342 |
-
print(result.get("hour")) # -> None
|
343 |
-
"""
|
344 |
-
if key in self:
|
345 |
-
return self[key]
|
346 |
-
else:
|
347 |
-
return default_value
|
348 |
-
|
349 |
-
def insert(self, index, ins_string):
|
350 |
-
"""
|
351 |
-
Inserts new element at location index in the list of parsed tokens.
|
352 |
-
|
353 |
-
Similar to ``list.insert()``.
|
354 |
-
|
355 |
-
Example::
|
356 |
-
|
357 |
-
numlist = Word(nums)[...]
|
358 |
-
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
359 |
-
|
360 |
-
# use a parse action to insert the parse location in the front of the parsed results
|
361 |
-
def insert_locn(locn, tokens):
|
362 |
-
tokens.insert(0, locn)
|
363 |
-
numlist.add_parse_action(insert_locn)
|
364 |
-
print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
|
365 |
-
"""
|
366 |
-
self._toklist.insert(index, ins_string)
|
367 |
-
# fixup indices in token dictionary
|
368 |
-
for name, occurrences in self._tokdict.items():
|
369 |
-
for k, (value, position) in enumerate(occurrences):
|
370 |
-
occurrences[k] = _ParseResultsWithOffset(
|
371 |
-
value, position + (position > index)
|
372 |
-
)
|
373 |
-
|
374 |
-
def append(self, item):
|
375 |
-
"""
|
376 |
-
Add single element to end of ``ParseResults`` list of elements.
|
377 |
-
|
378 |
-
Example::
|
379 |
-
|
380 |
-
numlist = Word(nums)[...]
|
381 |
-
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
382 |
-
|
383 |
-
# use a parse action to compute the sum of the parsed integers, and add it to the end
|
384 |
-
def append_sum(tokens):
|
385 |
-
tokens.append(sum(map(int, tokens)))
|
386 |
-
numlist.add_parse_action(append_sum)
|
387 |
-
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
|
388 |
-
"""
|
389 |
-
self._toklist.append(item)
|
390 |
-
|
391 |
-
def extend(self, itemseq):
|
392 |
-
"""
|
393 |
-
Add sequence of elements to end of ``ParseResults`` list of elements.
|
394 |
-
|
395 |
-
Example::
|
396 |
-
|
397 |
-
patt = Word(alphas)[1, ...]
|
398 |
-
|
399 |
-
# use a parse action to append the reverse of the matched strings, to make a palindrome
|
400 |
-
def make_palindrome(tokens):
|
401 |
-
tokens.extend(reversed([t[::-1] for t in tokens]))
|
402 |
-
return ''.join(tokens)
|
403 |
-
patt.add_parse_action(make_palindrome)
|
404 |
-
print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
|
405 |
-
"""
|
406 |
-
if isinstance(itemseq, ParseResults):
|
407 |
-
self.__iadd__(itemseq)
|
408 |
-
else:
|
409 |
-
self._toklist.extend(itemseq)
|
410 |
-
|
411 |
-
def clear(self):
|
412 |
-
"""
|
413 |
-
Clear all elements and results names.
|
414 |
-
"""
|
415 |
-
del self._toklist[:]
|
416 |
-
self._tokdict.clear()
|
417 |
-
|
418 |
-
def __getattr__(self, name):
|
419 |
-
try:
|
420 |
-
return self[name]
|
421 |
-
except KeyError:
|
422 |
-
if name.startswith("__"):
|
423 |
-
raise AttributeError(name)
|
424 |
-
return ""
|
425 |
-
|
426 |
-
def __add__(self, other) -> "ParseResults":
|
427 |
-
ret = self.copy()
|
428 |
-
ret += other
|
429 |
-
return ret
|
430 |
-
|
431 |
-
def __iadd__(self, other) -> "ParseResults":
|
432 |
-
if other._tokdict:
|
433 |
-
offset = len(self._toklist)
|
434 |
-
addoffset = lambda a: offset if a < 0 else a + offset
|
435 |
-
otheritems = other._tokdict.items()
|
436 |
-
otherdictitems = [
|
437 |
-
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
|
438 |
-
for k, vlist in otheritems
|
439 |
-
for v in vlist
|
440 |
-
]
|
441 |
-
for k, v in otherdictitems:
|
442 |
-
self[k] = v
|
443 |
-
if isinstance(v[0], ParseResults):
|
444 |
-
v[0]._parent = wkref(self)
|
445 |
-
|
446 |
-
self._toklist += other._toklist
|
447 |
-
self._all_names |= other._all_names
|
448 |
-
return self
|
449 |
-
|
450 |
-
def __radd__(self, other) -> "ParseResults":
|
451 |
-
if isinstance(other, int) and other == 0:
|
452 |
-
# useful for merging many ParseResults using sum() builtin
|
453 |
-
return self.copy()
|
454 |
-
else:
|
455 |
-
# this may raise a TypeError - so be it
|
456 |
-
return other + self
|
457 |
-
|
458 |
-
def __repr__(self) -> str:
|
459 |
-
return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict())
|
460 |
-
|
461 |
-
def __str__(self) -> str:
|
462 |
-
return (
|
463 |
-
"["
|
464 |
-
+ ", ".join(
|
465 |
-
[
|
466 |
-
str(i) if isinstance(i, ParseResults) else repr(i)
|
467 |
-
for i in self._toklist
|
468 |
-
]
|
469 |
-
)
|
470 |
-
+ "]"
|
471 |
-
)
|
472 |
-
|
473 |
-
def _asStringList(self, sep=""):
|
474 |
-
out = []
|
475 |
-
for item in self._toklist:
|
476 |
-
if out and sep:
|
477 |
-
out.append(sep)
|
478 |
-
if isinstance(item, ParseResults):
|
479 |
-
out += item._asStringList()
|
480 |
-
else:
|
481 |
-
out.append(str(item))
|
482 |
-
return out
|
483 |
-
|
484 |
-
def as_list(self) -> list:
|
485 |
-
"""
|
486 |
-
Returns the parse results as a nested list of matching tokens, all converted to strings.
|
487 |
-
|
488 |
-
Example::
|
489 |
-
|
490 |
-
patt = Word(alphas)[1, ...]
|
491 |
-
result = patt.parse_string("sldkj lsdkj sldkj")
|
492 |
-
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
|
493 |
-
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
|
494 |
-
|
495 |
-
# Use as_list() to create an actual list
|
496 |
-
result_list = result.as_list()
|
497 |
-
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
|
498 |
-
"""
|
499 |
-
return [
|
500 |
-
res.as_list() if isinstance(res, ParseResults) else res
|
501 |
-
for res in self._toklist
|
502 |
-
]
|
503 |
-
|
504 |
-
def as_dict(self) -> dict:
|
505 |
-
"""
|
506 |
-
Returns the named parse results as a nested dictionary.
|
507 |
-
|
508 |
-
Example::
|
509 |
-
|
510 |
-
integer = Word(nums)
|
511 |
-
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
512 |
-
|
513 |
-
result = date_str.parse_string('12/31/1999')
|
514 |
-
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
|
515 |
-
|
516 |
-
result_dict = result.as_dict()
|
517 |
-
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
|
518 |
-
|
519 |
-
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
|
520 |
-
import json
|
521 |
-
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
|
522 |
-
print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
|
523 |
-
"""
|
524 |
-
|
525 |
-
def to_item(obj):
|
526 |
-
if isinstance(obj, ParseResults):
|
527 |
-
return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
|
528 |
-
else:
|
529 |
-
return obj
|
530 |
-
|
531 |
-
return dict((k, to_item(v)) for k, v in self.items())
|
532 |
-
|
533 |
-
def copy(self) -> "ParseResults":
|
534 |
-
"""
|
535 |
-
Returns a new copy of a :class:`ParseResults` object.
|
536 |
-
"""
|
537 |
-
ret = ParseResults(self._toklist)
|
538 |
-
ret._tokdict = self._tokdict.copy()
|
539 |
-
ret._parent = self._parent
|
540 |
-
ret._all_names |= self._all_names
|
541 |
-
ret._name = self._name
|
542 |
-
return ret
|
543 |
-
|
544 |
-
def get_name(self):
|
545 |
-
r"""
|
546 |
-
Returns the results name for this token expression. Useful when several
|
547 |
-
different expressions might match at a particular location.
|
548 |
-
|
549 |
-
Example::
|
550 |
-
|
551 |
-
integer = Word(nums)
|
552 |
-
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
|
553 |
-
house_number_expr = Suppress('#') + Word(nums, alphanums)
|
554 |
-
user_data = (Group(house_number_expr)("house_number")
|
555 |
-
| Group(ssn_expr)("ssn")
|
556 |
-
| Group(integer)("age"))
|
557 |
-
user_info = user_data[1, ...]
|
558 |
-
|
559 |
-
result = user_info.parse_string("22 111-22-3333 #221B")
|
560 |
-
for item in result:
|
561 |
-
print(item.get_name(), ':', item[0])
|
562 |
-
|
563 |
-
prints::
|
564 |
-
|
565 |
-
age : 22
|
566 |
-
ssn : 111-22-3333
|
567 |
-
house_number : 221B
|
568 |
-
"""
|
569 |
-
if self._name:
|
570 |
-
return self._name
|
571 |
-
elif self._parent:
|
572 |
-
par = self._parent()
|
573 |
-
|
574 |
-
def find_in_parent(sub):
|
575 |
-
return next(
|
576 |
-
(
|
577 |
-
k
|
578 |
-
for k, vlist in par._tokdict.items()
|
579 |
-
for v, loc in vlist
|
580 |
-
if sub is v
|
581 |
-
),
|
582 |
-
None,
|
583 |
-
)
|
584 |
-
|
585 |
-
return find_in_parent(self) if par else None
|
586 |
-
elif (
|
587 |
-
len(self) == 1
|
588 |
-
and len(self._tokdict) == 1
|
589 |
-
and next(iter(self._tokdict.values()))[0][1] in (0, -1)
|
590 |
-
):
|
591 |
-
return next(iter(self._tokdict.keys()))
|
592 |
-
else:
|
593 |
-
return None
|
594 |
-
|
595 |
-
def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
|
596 |
-
"""
|
597 |
-
Diagnostic method for listing out the contents of
|
598 |
-
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
|
599 |
-
that this string can be embedded in a nested display of other data.
|
600 |
-
|
601 |
-
Example::
|
602 |
-
|
603 |
-
integer = Word(nums)
|
604 |
-
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
605 |
-
|
606 |
-
result = date_str.parse_string('1999/12/31')
|
607 |
-
print(result.dump())
|
608 |
-
|
609 |
-
prints::
|
610 |
-
|
611 |
-
['1999', '/', '12', '/', '31']
|
612 |
-
- day: '31'
|
613 |
-
- month: '12'
|
614 |
-
- year: '1999'
|
615 |
-
"""
|
616 |
-
out = []
|
617 |
-
NL = "\n"
|
618 |
-
out.append(indent + str(self.as_list()) if include_list else "")
|
619 |
-
|
620 |
-
if full:
|
621 |
-
if self.haskeys():
|
622 |
-
items = sorted((str(k), v) for k, v in self.items())
|
623 |
-
for k, v in items:
|
624 |
-
if out:
|
625 |
-
out.append(NL)
|
626 |
-
out.append("{}{}- {}: ".format(indent, (" " * _depth), k))
|
627 |
-
if isinstance(v, ParseResults):
|
628 |
-
if v:
|
629 |
-
out.append(
|
630 |
-
v.dump(
|
631 |
-
indent=indent,
|
632 |
-
full=full,
|
633 |
-
include_list=include_list,
|
634 |
-
_depth=_depth + 1,
|
635 |
-
)
|
636 |
-
)
|
637 |
-
else:
|
638 |
-
out.append(str(v))
|
639 |
-
else:
|
640 |
-
out.append(repr(v))
|
641 |
-
if any(isinstance(vv, ParseResults) for vv in self):
|
642 |
-
v = self
|
643 |
-
for i, vv in enumerate(v):
|
644 |
-
if isinstance(vv, ParseResults):
|
645 |
-
out.append(
|
646 |
-
"\n{}{}[{}]:\n{}{}{}".format(
|
647 |
-
indent,
|
648 |
-
(" " * (_depth)),
|
649 |
-
i,
|
650 |
-
indent,
|
651 |
-
(" " * (_depth + 1)),
|
652 |
-
vv.dump(
|
653 |
-
indent=indent,
|
654 |
-
full=full,
|
655 |
-
include_list=include_list,
|
656 |
-
_depth=_depth + 1,
|
657 |
-
),
|
658 |
-
)
|
659 |
-
)
|
660 |
-
else:
|
661 |
-
out.append(
|
662 |
-
"\n%s%s[%d]:\n%s%s%s"
|
663 |
-
% (
|
664 |
-
indent,
|
665 |
-
(" " * (_depth)),
|
666 |
-
i,
|
667 |
-
indent,
|
668 |
-
(" " * (_depth + 1)),
|
669 |
-
str(vv),
|
670 |
-
)
|
671 |
-
)
|
672 |
-
|
673 |
-
return "".join(out)
|
674 |
-
|
675 |
-
def pprint(self, *args, **kwargs):
|
676 |
-
"""
|
677 |
-
Pretty-printer for parsed results as a list, using the
|
678 |
-
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
|
679 |
-
Accepts additional positional or keyword args as defined for
|
680 |
-
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
|
681 |
-
|
682 |
-
Example::
|
683 |
-
|
684 |
-
ident = Word(alphas, alphanums)
|
685 |
-
num = Word(nums)
|
686 |
-
func = Forward()
|
687 |
-
term = ident | num | Group('(' + func + ')')
|
688 |
-
func <<= ident + Group(Optional(delimited_list(term)))
|
689 |
-
result = func.parse_string("fna a,b,(fnb c,d,200),100")
|
690 |
-
result.pprint(width=40)
|
691 |
-
|
692 |
-
prints::
|
693 |
-
|
694 |
-
['fna',
|
695 |
-
['a',
|
696 |
-
'b',
|
697 |
-
['(', 'fnb', ['c', 'd', '200'], ')'],
|
698 |
-
'100']]
|
699 |
-
"""
|
700 |
-
pprint.pprint(self.as_list(), *args, **kwargs)
|
701 |
-
|
702 |
-
# add support for pickle protocol
|
703 |
-
def __getstate__(self):
|
704 |
-
return (
|
705 |
-
self._toklist,
|
706 |
-
(
|
707 |
-
self._tokdict.copy(),
|
708 |
-
self._parent is not None and self._parent() or None,
|
709 |
-
self._all_names,
|
710 |
-
self._name,
|
711 |
-
),
|
712 |
-
)
|
713 |
-
|
714 |
-
def __setstate__(self, state):
|
715 |
-
self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
|
716 |
-
self._all_names = set(inAccumNames)
|
717 |
-
if par is not None:
|
718 |
-
self._parent = wkref(par)
|
719 |
-
else:
|
720 |
-
self._parent = None
|
721 |
-
|
722 |
-
def __getnewargs__(self):
|
723 |
-
return self._toklist, self._name
|
724 |
-
|
725 |
-
def __dir__(self):
|
726 |
-
return dir(type(self)) + list(self.keys())
|
727 |
-
|
728 |
-
@classmethod
|
729 |
-
def from_dict(cls, other, name=None) -> "ParseResults":
|
730 |
-
"""
|
731 |
-
Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
|
732 |
-
name-value relations as results names. If an optional ``name`` argument is
|
733 |
-
given, a nested ``ParseResults`` will be returned.
|
734 |
-
"""
|
735 |
-
|
736 |
-
def is_iterable(obj):
|
737 |
-
try:
|
738 |
-
iter(obj)
|
739 |
-
except Exception:
|
740 |
-
return False
|
741 |
-
else:
|
742 |
-
return not isinstance(obj, str_type)
|
743 |
-
|
744 |
-
ret = cls([])
|
745 |
-
for k, v in other.items():
|
746 |
-
if isinstance(v, Mapping):
|
747 |
-
ret += cls.from_dict(v, name=k)
|
748 |
-
else:
|
749 |
-
ret += cls([v], name=k, asList=is_iterable(v))
|
750 |
-
if name is not None:
|
751 |
-
ret = cls([ret], name=name)
|
752 |
-
return ret
|
753 |
-
|
754 |
-
asList = as_list
|
755 |
-
asDict = as_dict
|
756 |
-
getName = get_name
|
757 |
-
|
758 |
-
|
759 |
-
MutableMapping.register(ParseResults)
|
760 |
-
MutableSequence.register(ParseResults)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzizR/FaceRecognitionGradio/app.py
DELETED
@@ -1,228 +0,0 @@
|
|
1 |
-
from fastcore.all import *
|
2 |
-
from fastai.vision.all import *
|
3 |
-
|
4 |
-
import pathlib
|
5 |
-
plt = platform.system()
|
6 |
-
if plt == 'Linux': pathlib.WindowsPath = pathlib.PosixPath
|
7 |
-
|
8 |
-
learn = load_learner('./fast_ai_model_resnet18_new_labeling.pth')
|
9 |
-
|
10 |
-
|
11 |
-
import cv2
|
12 |
-
import torch
|
13 |
-
import torchvision.transforms as tt
|
14 |
-
import numpy as np
|
15 |
-
# from facenet_pytorch import MTCNN
|
16 |
-
from PIL import Image
|
17 |
-
import numpy as np
|
18 |
-
|
19 |
-
from retinaface import RetinaFace
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
25 |
-
|
26 |
-
def load_checkpoint(filepath):
|
27 |
-
checkpoint = torch.load(filepath, device)
|
28 |
-
model = checkpoint['model']
|
29 |
-
model.load_state_dict(checkpoint['state_dict'], strict=False)
|
30 |
-
for parameter in model.parameters():
|
31 |
-
parameter.requires_grad = False
|
32 |
-
|
33 |
-
model.eval()
|
34 |
-
return model
|
35 |
-
|
36 |
-
|
37 |
-
# filepath = './models/9.pth'
|
38 |
-
# loaded_model = load_checkpoint(filepath)
|
39 |
-
|
40 |
-
loaded_model = learn
|
41 |
-
|
42 |
-
class FaceDetector(object):
|
43 |
-
"""
|
44 |
-
Face detector class
|
45 |
-
"""
|
46 |
-
|
47 |
-
def __init__(self, detector,loaded_model,image=None):
|
48 |
-
self.detector = detector
|
49 |
-
self.loaded_model=loaded_model
|
50 |
-
self.image = image
|
51 |
-
|
52 |
-
def _draw(self, frame, boxes, probs, landmarks):
|
53 |
-
"""
|
54 |
-
Draw landmarks and boxes for each face detected
|
55 |
-
"""
|
56 |
-
try:
|
57 |
-
for box, prob, ld in zip(boxes, probs, landmarks):
|
58 |
-
# Draw rectangle on frame
|
59 |
-
box = box.astype('int')
|
60 |
-
ld = ld.astype('int')
|
61 |
-
cv2.rectangle(frame,
|
62 |
-
(box[0], box[1]),
|
63 |
-
(box[2], box[3]),
|
64 |
-
(0, 0, 255),
|
65 |
-
thickness=2)
|
66 |
-
|
67 |
-
# Show probability
|
68 |
-
cv2.putText(frame, str(
|
69 |
-
prob), (box[2], box[3]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
|
70 |
-
|
71 |
-
# Draw landmarks
|
72 |
-
cv2.circle(frame, tuple(ld[0]), 5, (0, 0, 255), -1)
|
73 |
-
cv2.circle(frame, tuple(ld[1]), 5, (0, 0, 255), -1)
|
74 |
-
cv2.circle(frame, tuple(ld[2]), 5, (0, 0, 255), -1)
|
75 |
-
cv2.circle(frame, tuple(ld[3]), 5, (0, 0, 255), -1)
|
76 |
-
cv2.circle(frame, tuple(ld[4]), 5, (0, 0, 255), -1)
|
77 |
-
except Exception as e:
|
78 |
-
# print(e)
|
79 |
-
pass
|
80 |
-
|
81 |
-
return frame
|
82 |
-
|
83 |
-
# def _capture(self,frame,boxes,probs,landmarks):
|
84 |
-
# sampleNum = 0
|
85 |
-
# while True:
|
86 |
-
# try:
|
87 |
-
# sampleNum = sampleNum+1
|
88 |
-
# boxes = boxes.astype('int')
|
89 |
-
# face = frame[boxes[0,1]:boxes[0,3],boxes[0,0]:boxes[0,2]]
|
90 |
-
# cv2.imwrite(str(sampleNum) + ".jpg",face)
|
91 |
-
# except Exception as e:
|
92 |
-
# # print(e)
|
93 |
-
# pass
|
94 |
-
# if sampleNum >2:
|
95 |
-
# break
|
96 |
-
|
97 |
-
|
98 |
-
def _recognize(self,loaded_model,frame,boxes):
|
99 |
-
try:
|
100 |
-
|
101 |
-
for box in boxes:
|
102 |
-
# box = box.astype('int')
|
103 |
-
# face = frame[box[0][1]:box[0][3],box[0][0]:box[0][2]]
|
104 |
-
face = frame[box[1]:box[3],box[0]:box[2]]
|
105 |
-
# pil_image = Image.fromarray(face, mode="RGB")
|
106 |
-
|
107 |
-
labels,_,probs = loaded_model.predict(face)
|
108 |
-
# label= 1
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
label = labels if (probs[np.argmax(probs)] > 0.8) else "Unknown"
|
114 |
-
probs = probs[np.argmax(probs)] if (probs[np.argmax(probs)] > 0.8) else np.nan
|
115 |
-
|
116 |
-
# if prediction == 0:
|
117 |
-
# label = "Fadli"
|
118 |
-
# elif prediction == 1:
|
119 |
-
# label = "Aziz"
|
120 |
-
# elif prediction == 2:
|
121 |
-
# label = "Eka"
|
122 |
-
# else:
|
123 |
-
# label = "Unknown"
|
124 |
-
_ = "" if probs == np.nan else ": {:.2f}".format(float(probs))
|
125 |
-
|
126 |
-
cv2.putText(frame, label + _ , (box[2], box[3]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
|
127 |
-
|
128 |
-
# cv2.putText(frame,str(["{:.2f}".format(x)for x in probs]), (box[2]-100, box[3]+30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2, cv2.LINE_AA)
|
129 |
-
|
130 |
-
# cv2.putText(frame,"{:.2f}".format(float(probs[np.argmax(probs)])), (box[2]-100, box[3]+30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2, cv2.LINE_AA)
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
cv2.rectangle(frame,
|
136 |
-
(box[0], box[1]),
|
137 |
-
(box[2], box[3]),
|
138 |
-
(0, 0, 255),
|
139 |
-
thickness=2)
|
140 |
-
# cv2.putText(frame, str(
|
141 |
-
# prediction), (box[0][2], box[0][3]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
|
142 |
-
return frame
|
143 |
-
except Exception as e:
|
144 |
-
print(e,'eaea')
|
145 |
-
pass
|
146 |
-
|
147 |
-
def run_on_image(self,image):
|
148 |
-
frame = image
|
149 |
-
try:
|
150 |
-
# detect face box, probability and landmarks
|
151 |
-
json_face = self.detector.detect_faces(frame)
|
152 |
-
|
153 |
-
# boxes = [json_face['face_{}'.format(x)]['facial_area'] for x in range(1,len(json_face)+1)]
|
154 |
-
# probs = ["{:.2f}".format(json_face['face_{}'.format(x)]['score']) for x in range(1,len(json_face)+1)]
|
155 |
-
# landmarks = [json_face['face_{}'.format(x)]['landmarks'] for x in range(1,len(json_face)+1)]
|
156 |
-
|
157 |
-
boxes = []
|
158 |
-
probs = []
|
159 |
-
landmarks = []
|
160 |
-
for key, value in json_face.items():
|
161 |
-
boxes.append(value['facial_area'])
|
162 |
-
probs.append("{:.2f}".format(value['score']))
|
163 |
-
landmarks.append(value['landmarks'])
|
164 |
-
|
165 |
-
return Image.fromarray(self._recognize(loaded_model,frame,boxes))
|
166 |
-
# self._draw(frame, boxes, probs, landmarks)
|
167 |
-
|
168 |
-
except Exception as e:
|
169 |
-
print(e)
|
170 |
-
pass
|
171 |
-
|
172 |
-
|
173 |
-
def run(self):
|
174 |
-
"""
|
175 |
-
Run the FaceDetector and draw landmarks and boxes around detected faces
|
176 |
-
"""
|
177 |
-
cap = cv2.VideoCapture(0)
|
178 |
-
|
179 |
-
while True:
|
180 |
-
ret, frame = cap.read()
|
181 |
-
try:
|
182 |
-
# detect face box, probability and landmarks
|
183 |
-
json_face = self.detector.detect_faces(frame)
|
184 |
-
|
185 |
-
# boxes = [json_face['face_{}'.format(x)]['facial_area'] for x in range(1,len(json_face)+1)]
|
186 |
-
# probs = ["{:.2f}".format(json_face['face_{}'.format(x)]['score']) for x in range(1,len(json_face)+1)]
|
187 |
-
# landmarks = [json_face['face_{}'.format(x)]['landmarks'] for x in range(1,len(json_face)+1)]
|
188 |
-
|
189 |
-
boxes = []
|
190 |
-
probs = []
|
191 |
-
landmarks = []
|
192 |
-
for key, value in json_face.items():
|
193 |
-
boxes.append(value['facial_area'])
|
194 |
-
probs.append("{:.2f}".format(value['score']))
|
195 |
-
landmarks.append(value['landmarks'])
|
196 |
-
|
197 |
-
self._recognize(loaded_model,frame,boxes)
|
198 |
-
# self._draw(frame, boxes, probs, landmarks)
|
199 |
-
|
200 |
-
except Exception as e:
|
201 |
-
print(e)
|
202 |
-
pass
|
203 |
-
|
204 |
-
# Show the frame
|
205 |
-
cv2.imshow('Face Detection', frame)
|
206 |
-
|
207 |
-
if cv2.waitKey(1) & 0xFF == ord('q'):
|
208 |
-
break
|
209 |
-
|
210 |
-
cap.release()
|
211 |
-
cv2.destroyAllWindows()
|
212 |
-
|
213 |
-
|
214 |
-
# Run the app
|
215 |
-
|
216 |
-
# fcd = FaceDetector(RetinaFace,loaded_model)
|
217 |
-
detector = RetinaFace
|
218 |
-
# fcd = FaceDetector(detector,loaded_model)
|
219 |
-
# fcd.run()
|
220 |
-
|
221 |
-
fcd = FaceDetector(detector,loaded_model)
|
222 |
-
|
223 |
-
import gradio as gr
|
224 |
-
|
225 |
-
gr.Interface(fn=fcd.run_on_image,
|
226 |
-
inputs=gr.Image(),
|
227 |
-
outputs=gr.Image(),
|
228 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzumaSeren100/XuanShen-Bert-VITS2/attentions.py
DELETED
@@ -1,344 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
import commons
|
8 |
-
import logging
|
9 |
-
|
10 |
-
logger = logging.getLogger(__name__)
|
11 |
-
|
12 |
-
class LayerNorm(nn.Module):
|
13 |
-
def __init__(self, channels, eps=1e-5):
|
14 |
-
super().__init__()
|
15 |
-
self.channels = channels
|
16 |
-
self.eps = eps
|
17 |
-
|
18 |
-
self.gamma = nn.Parameter(torch.ones(channels))
|
19 |
-
self.beta = nn.Parameter(torch.zeros(channels))
|
20 |
-
|
21 |
-
def forward(self, x):
|
22 |
-
x = x.transpose(1, -1)
|
23 |
-
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
24 |
-
return x.transpose(1, -1)
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
@torch.jit.script
|
29 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
30 |
-
n_channels_int = n_channels[0]
|
31 |
-
in_act = input_a + input_b
|
32 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
33 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
34 |
-
acts = t_act * s_act
|
35 |
-
return acts
|
36 |
-
|
37 |
-
class Encoder(nn.Module):
|
38 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs):
|
39 |
-
super().__init__()
|
40 |
-
self.hidden_channels = hidden_channels
|
41 |
-
self.filter_channels = filter_channels
|
42 |
-
self.n_heads = n_heads
|
43 |
-
self.n_layers = n_layers
|
44 |
-
self.kernel_size = kernel_size
|
45 |
-
self.p_dropout = p_dropout
|
46 |
-
self.window_size = window_size
|
47 |
-
#if isflow:
|
48 |
-
# cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
|
49 |
-
# self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
|
50 |
-
# self.cond_layer = weight_norm(cond_layer, name='weight')
|
51 |
-
# self.gin_channels = 256
|
52 |
-
self.cond_layer_idx = self.n_layers
|
53 |
-
if 'gin_channels' in kwargs:
|
54 |
-
self.gin_channels = kwargs['gin_channels']
|
55 |
-
if self.gin_channels != 0:
|
56 |
-
self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
|
57 |
-
# vits2 says 3rd block, so idx is 2 by default
|
58 |
-
self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2
|
59 |
-
logging.debug(self.gin_channels, self.cond_layer_idx)
|
60 |
-
assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers'
|
61 |
-
self.drop = nn.Dropout(p_dropout)
|
62 |
-
self.attn_layers = nn.ModuleList()
|
63 |
-
self.norm_layers_1 = nn.ModuleList()
|
64 |
-
self.ffn_layers = nn.ModuleList()
|
65 |
-
self.norm_layers_2 = nn.ModuleList()
|
66 |
-
for i in range(self.n_layers):
|
67 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
68 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
69 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
70 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
71 |
-
def forward(self, x, x_mask, g=None):
|
72 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
73 |
-
x = x * x_mask
|
74 |
-
for i in range(self.n_layers):
|
75 |
-
if i == self.cond_layer_idx and g is not None:
|
76 |
-
g = self.spk_emb_linear(g.transpose(1, 2))
|
77 |
-
g = g.transpose(1, 2)
|
78 |
-
x = x + g
|
79 |
-
x = x * x_mask
|
80 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
81 |
-
y = self.drop(y)
|
82 |
-
x = self.norm_layers_1[i](x + y)
|
83 |
-
|
84 |
-
y = self.ffn_layers[i](x, x_mask)
|
85 |
-
y = self.drop(y)
|
86 |
-
x = self.norm_layers_2[i](x + y)
|
87 |
-
x = x * x_mask
|
88 |
-
return x
|
89 |
-
|
90 |
-
|
91 |
-
class Decoder(nn.Module):
|
92 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
93 |
-
super().__init__()
|
94 |
-
self.hidden_channels = hidden_channels
|
95 |
-
self.filter_channels = filter_channels
|
96 |
-
self.n_heads = n_heads
|
97 |
-
self.n_layers = n_layers
|
98 |
-
self.kernel_size = kernel_size
|
99 |
-
self.p_dropout = p_dropout
|
100 |
-
self.proximal_bias = proximal_bias
|
101 |
-
self.proximal_init = proximal_init
|
102 |
-
|
103 |
-
self.drop = nn.Dropout(p_dropout)
|
104 |
-
self.self_attn_layers = nn.ModuleList()
|
105 |
-
self.norm_layers_0 = nn.ModuleList()
|
106 |
-
self.encdec_attn_layers = nn.ModuleList()
|
107 |
-
self.norm_layers_1 = nn.ModuleList()
|
108 |
-
self.ffn_layers = nn.ModuleList()
|
109 |
-
self.norm_layers_2 = nn.ModuleList()
|
110 |
-
for i in range(self.n_layers):
|
111 |
-
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
112 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
113 |
-
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
114 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
115 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
116 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
117 |
-
|
118 |
-
def forward(self, x, x_mask, h, h_mask):
|
119 |
-
"""
|
120 |
-
x: decoder input
|
121 |
-
h: encoder output
|
122 |
-
"""
|
123 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
124 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
125 |
-
x = x * x_mask
|
126 |
-
for i in range(self.n_layers):
|
127 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
128 |
-
y = self.drop(y)
|
129 |
-
x = self.norm_layers_0[i](x + y)
|
130 |
-
|
131 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
132 |
-
y = self.drop(y)
|
133 |
-
x = self.norm_layers_1[i](x + y)
|
134 |
-
|
135 |
-
y = self.ffn_layers[i](x, x_mask)
|
136 |
-
y = self.drop(y)
|
137 |
-
x = self.norm_layers_2[i](x + y)
|
138 |
-
x = x * x_mask
|
139 |
-
return x
|
140 |
-
|
141 |
-
|
142 |
-
class MultiHeadAttention(nn.Module):
|
143 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
144 |
-
super().__init__()
|
145 |
-
assert channels % n_heads == 0
|
146 |
-
|
147 |
-
self.channels = channels
|
148 |
-
self.out_channels = out_channels
|
149 |
-
self.n_heads = n_heads
|
150 |
-
self.p_dropout = p_dropout
|
151 |
-
self.window_size = window_size
|
152 |
-
self.heads_share = heads_share
|
153 |
-
self.block_length = block_length
|
154 |
-
self.proximal_bias = proximal_bias
|
155 |
-
self.proximal_init = proximal_init
|
156 |
-
self.attn = None
|
157 |
-
|
158 |
-
self.k_channels = channels // n_heads
|
159 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
160 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
161 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
162 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
163 |
-
self.drop = nn.Dropout(p_dropout)
|
164 |
-
|
165 |
-
if window_size is not None:
|
166 |
-
n_heads_rel = 1 if heads_share else n_heads
|
167 |
-
rel_stddev = self.k_channels**-0.5
|
168 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
169 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
170 |
-
|
171 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
172 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
173 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
174 |
-
if proximal_init:
|
175 |
-
with torch.no_grad():
|
176 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
177 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
178 |
-
|
179 |
-
def forward(self, x, c, attn_mask=None):
|
180 |
-
q = self.conv_q(x)
|
181 |
-
k = self.conv_k(c)
|
182 |
-
v = self.conv_v(c)
|
183 |
-
|
184 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
185 |
-
|
186 |
-
x = self.conv_o(x)
|
187 |
-
return x
|
188 |
-
|
189 |
-
def attention(self, query, key, value, mask=None):
|
190 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
191 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
192 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
193 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
194 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
195 |
-
|
196 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
197 |
-
if self.window_size is not None:
|
198 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
199 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
200 |
-
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
201 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
202 |
-
scores = scores + scores_local
|
203 |
-
if self.proximal_bias:
|
204 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
205 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
206 |
-
if mask is not None:
|
207 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
208 |
-
if self.block_length is not None:
|
209 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
210 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
211 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
212 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
213 |
-
p_attn = self.drop(p_attn)
|
214 |
-
output = torch.matmul(p_attn, value)
|
215 |
-
if self.window_size is not None:
|
216 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
217 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
218 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
219 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
220 |
-
return output, p_attn
|
221 |
-
|
222 |
-
def _matmul_with_relative_values(self, x, y):
|
223 |
-
"""
|
224 |
-
x: [b, h, l, m]
|
225 |
-
y: [h or 1, m, d]
|
226 |
-
ret: [b, h, l, d]
|
227 |
-
"""
|
228 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
229 |
-
return ret
|
230 |
-
|
231 |
-
def _matmul_with_relative_keys(self, x, y):
|
232 |
-
"""
|
233 |
-
x: [b, h, l, d]
|
234 |
-
y: [h or 1, m, d]
|
235 |
-
ret: [b, h, l, m]
|
236 |
-
"""
|
237 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
238 |
-
return ret
|
239 |
-
|
240 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
241 |
-
max_relative_position = 2 * self.window_size + 1
|
242 |
-
# Pad first before slice to avoid using cond ops.
|
243 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
244 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
245 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
246 |
-
if pad_length > 0:
|
247 |
-
padded_relative_embeddings = F.pad(
|
248 |
-
relative_embeddings,
|
249 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
250 |
-
else:
|
251 |
-
padded_relative_embeddings = relative_embeddings
|
252 |
-
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
253 |
-
return used_relative_embeddings
|
254 |
-
|
255 |
-
def _relative_position_to_absolute_position(self, x):
|
256 |
-
"""
|
257 |
-
x: [b, h, l, 2*l-1]
|
258 |
-
ret: [b, h, l, l]
|
259 |
-
"""
|
260 |
-
batch, heads, length, _ = x.size()
|
261 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
262 |
-
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
263 |
-
|
264 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
265 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
266 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
267 |
-
|
268 |
-
# Reshape and slice out the padded elements.
|
269 |
-
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
270 |
-
return x_final
|
271 |
-
|
272 |
-
def _absolute_position_to_relative_position(self, x):
|
273 |
-
"""
|
274 |
-
x: [b, h, l, l]
|
275 |
-
ret: [b, h, l, 2*l-1]
|
276 |
-
"""
|
277 |
-
batch, heads, length, _ = x.size()
|
278 |
-
# padd along column
|
279 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
280 |
-
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
281 |
-
# add 0's in the beginning that will skew the elements after reshape
|
282 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
283 |
-
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
284 |
-
return x_final
|
285 |
-
|
286 |
-
def _attention_bias_proximal(self, length):
|
287 |
-
"""Bias for self-attention to encourage attention to close positions.
|
288 |
-
Args:
|
289 |
-
length: an integer scalar.
|
290 |
-
Returns:
|
291 |
-
a Tensor with shape [1, 1, length, length]
|
292 |
-
"""
|
293 |
-
r = torch.arange(length, dtype=torch.float32)
|
294 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
295 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
296 |
-
|
297 |
-
|
298 |
-
class FFN(nn.Module):
|
299 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
300 |
-
super().__init__()
|
301 |
-
self.in_channels = in_channels
|
302 |
-
self.out_channels = out_channels
|
303 |
-
self.filter_channels = filter_channels
|
304 |
-
self.kernel_size = kernel_size
|
305 |
-
self.p_dropout = p_dropout
|
306 |
-
self.activation = activation
|
307 |
-
self.causal = causal
|
308 |
-
|
309 |
-
if causal:
|
310 |
-
self.padding = self._causal_padding
|
311 |
-
else:
|
312 |
-
self.padding = self._same_padding
|
313 |
-
|
314 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
315 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
316 |
-
self.drop = nn.Dropout(p_dropout)
|
317 |
-
|
318 |
-
def forward(self, x, x_mask):
|
319 |
-
x = self.conv_1(self.padding(x * x_mask))
|
320 |
-
if self.activation == "gelu":
|
321 |
-
x = x * torch.sigmoid(1.702 * x)
|
322 |
-
else:
|
323 |
-
x = torch.relu(x)
|
324 |
-
x = self.drop(x)
|
325 |
-
x = self.conv_2(self.padding(x * x_mask))
|
326 |
-
return x * x_mask
|
327 |
-
|
328 |
-
def _causal_padding(self, x):
|
329 |
-
if self.kernel_size == 1:
|
330 |
-
return x
|
331 |
-
pad_l = self.kernel_size - 1
|
332 |
-
pad_r = 0
|
333 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
334 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
335 |
-
return x
|
336 |
-
|
337 |
-
def _same_padding(self, x):
|
338 |
-
if self.kernel_size == 1:
|
339 |
-
return x
|
340 |
-
pad_l = (self.kernel_size - 1) // 2
|
341 |
-
pad_r = self.kernel_size // 2
|
342 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
343 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
344 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BAAI/AltDiffusion/ui_functions.py
DELETED
@@ -1,240 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import gradio as gr
|
3 |
-
from PIL import Image, ImageFont, ImageDraw, ImageFilter, ImageOps
|
4 |
-
from io import BytesIO
|
5 |
-
import base64
|
6 |
-
import re
|
7 |
-
|
8 |
-
def change_img_choices(sample_size):
|
9 |
-
choices = []
|
10 |
-
for i in range(int(sample_size)):
|
11 |
-
choices.append(
|
12 |
-
'图片{}(img{})'.format(i+1,i+1)
|
13 |
-
)
|
14 |
-
update_choices = gr.update(choices=choices)
|
15 |
-
return update_choices
|
16 |
-
|
17 |
-
def change_image_editor_mode(choice, cropped_image, masked_image, resize_mode, width, height):
|
18 |
-
if choice == "Mask":
|
19 |
-
update_image_result = update_image_mask(cropped_image, resize_mode, width, height)
|
20 |
-
return [gr.update(visible=False), update_image_result, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)]
|
21 |
-
|
22 |
-
update_image_result = update_image_mask(masked_image["image"] if masked_image is not None else None, resize_mode, width, height)
|
23 |
-
return [update_image_result, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)]
|
24 |
-
|
25 |
-
def update_image_mask(cropped_image, resize_mode, width, height):
|
26 |
-
resized_cropped_image = resize_image(resize_mode, cropped_image, width, height) if cropped_image else None
|
27 |
-
return gr.update(value=resized_cropped_image, visible=True)
|
28 |
-
|
29 |
-
def toggle_options_gfpgan(selection):
|
30 |
-
if 0 in selection:
|
31 |
-
return gr.update(visible=True)
|
32 |
-
else:
|
33 |
-
return gr.update(visible=False)
|
34 |
-
|
35 |
-
def toggle_options_upscalers(selection):
|
36 |
-
if 1 in selection:
|
37 |
-
return gr.update(visible=True)
|
38 |
-
else:
|
39 |
-
return gr.update(visible=False)
|
40 |
-
|
41 |
-
def toggle_options_realesrgan(selection):
|
42 |
-
if selection == 0 or selection == 1 or selection == 3:
|
43 |
-
return gr.update(visible=True)
|
44 |
-
else:
|
45 |
-
return gr.update(visible=False)
|
46 |
-
|
47 |
-
def toggle_options_gobig(selection):
|
48 |
-
if selection == 1:
|
49 |
-
#print(selection)
|
50 |
-
return gr.update(visible=True)
|
51 |
-
if selection == 3:
|
52 |
-
return gr.update(visible=True)
|
53 |
-
else:
|
54 |
-
return gr.update(visible=False)
|
55 |
-
|
56 |
-
def toggle_options_ldsr(selection):
|
57 |
-
if selection == 2 or selection == 3:
|
58 |
-
return gr.update(visible=True)
|
59 |
-
else:
|
60 |
-
return gr.update(visible=False)
|
61 |
-
|
62 |
-
def increment_down(value):
|
63 |
-
return value - 1
|
64 |
-
|
65 |
-
def increment_up(value):
|
66 |
-
return value + 1
|
67 |
-
|
68 |
-
def copy_img_to_lab(img):
|
69 |
-
try:
|
70 |
-
image_data = re.sub('^data:image/.+;base64,', '', img)
|
71 |
-
processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
|
72 |
-
tab_update = gr.update(selected='imgproc_tab')
|
73 |
-
img_update = gr.update(value=processed_image)
|
74 |
-
return processed_image, tab_update,
|
75 |
-
except IndexError:
|
76 |
-
return [None, None]
|
77 |
-
def copy_img_params_to_lab(params):
|
78 |
-
try:
|
79 |
-
prompt = params[0][0].replace('\n', ' ').replace('\r', '')
|
80 |
-
seed = int(params[1][1])
|
81 |
-
steps = int(params[7][1])
|
82 |
-
cfg_scale = float(params[9][1])
|
83 |
-
sampler = params[11][1]
|
84 |
-
return prompt,seed,steps,cfg_scale,sampler
|
85 |
-
except IndexError:
|
86 |
-
return [None, None]
|
87 |
-
def copy_img_to_input(img, idx):
|
88 |
-
try:
|
89 |
-
# print(img)
|
90 |
-
# print("=============")
|
91 |
-
# print("The img type is:{}".format(type(img[0])))
|
92 |
-
idx_map = {
|
93 |
-
"图片1(img1)":0,
|
94 |
-
"图片2(img2)":1,
|
95 |
-
"图片3(img3)":2,
|
96 |
-
"图片4(img4)":3,
|
97 |
-
}
|
98 |
-
idx = idx_map[idx]
|
99 |
-
assert img[idx]['is_file']
|
100 |
-
processed_image = Image.open(img[idx]['name'])
|
101 |
-
tab_update = gr.update(selected='img2img_tab')
|
102 |
-
move_prompt_zh_update = gr.update(visible=True)
|
103 |
-
move_prompt_en_update = gr.update(visible=True)
|
104 |
-
prompt_update = gr.update(visible=True)
|
105 |
-
return tab_update, processed_image, move_prompt_zh_update, move_prompt_en_update, prompt_update
|
106 |
-
except IndexError as e:
|
107 |
-
raise gr.Error(e)
|
108 |
-
return [None, None, None, None, None]
|
109 |
-
|
110 |
-
def copy_img_to_edit(img):
|
111 |
-
try:
|
112 |
-
image_data = re.sub('^data:image/.+;base64,', '', img)
|
113 |
-
processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
|
114 |
-
tab_update = gr.update(selected='img2img_tab')
|
115 |
-
img_update = gr.update(value=processed_image)
|
116 |
-
mode_update = gr.update(value='Crop')
|
117 |
-
return processed_image, tab_update, mode_update
|
118 |
-
except IndexError:
|
119 |
-
return [None, None]
|
120 |
-
|
121 |
-
def copy_img_to_mask(img):
|
122 |
-
try:
|
123 |
-
image_data = re.sub('^data:image/.+;base64,', '', img)
|
124 |
-
processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
|
125 |
-
tab_update = gr.update(selected='img2img_tab')
|
126 |
-
img_update = gr.update(value=processed_image)
|
127 |
-
mode_update = gr.update(value='Mask')
|
128 |
-
return processed_image, tab_update, mode_update
|
129 |
-
except IndexError:
|
130 |
-
return [None, None]
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
def copy_img_to_upscale_esrgan(img):
|
135 |
-
tabs_update = gr.update(selected='realesrgan_tab')
|
136 |
-
image_data = re.sub('^data:image/.+;base64,', '', img)
|
137 |
-
processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
|
138 |
-
return processed_image, tabs_update
|
139 |
-
|
140 |
-
|
141 |
-
help_text = """
|
142 |
-
## Mask/Crop
|
143 |
-
* Masking is not inpainting. You will probably get better results manually masking your images in photoshop instead.
|
144 |
-
* Built-in masking/cropping is very temperamental.
|
145 |
-
* It may take some time for the image to show when switching from Crop to Mask.
|
146 |
-
* If the image doesn't appear after switching to Mask, switch back to Crop and then back again to Mask
|
147 |
-
* If the mask appears distorted (the brush is weirdly shaped instead of round), switch back to Crop and then back again to Mask.
|
148 |
-
|
149 |
-
## Advanced Editor
|
150 |
-
* Click 💾 Save to send your editor changes to the img2img workflow
|
151 |
-
* Click ❌ Clear to discard your editor changes
|
152 |
-
|
153 |
-
If anything breaks, try switching modes again, switch tabs, clear the image, or reload.
|
154 |
-
"""
|
155 |
-
|
156 |
-
def resize_image(resize_mode, im, width, height):
|
157 |
-
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
|
158 |
-
if resize_mode == 0:
|
159 |
-
res = im.resize((width, height), resample=LANCZOS)
|
160 |
-
elif resize_mode == 1:
|
161 |
-
ratio = width / height
|
162 |
-
src_ratio = im.width / im.height
|
163 |
-
|
164 |
-
src_w = width if ratio > src_ratio else im.width * height // im.height
|
165 |
-
src_h = height if ratio <= src_ratio else im.height * width // im.width
|
166 |
-
|
167 |
-
resized = im.resize((src_w, src_h), resample=LANCZOS)
|
168 |
-
res = Image.new("RGBA", (width, height))
|
169 |
-
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
|
170 |
-
else:
|
171 |
-
ratio = width / height
|
172 |
-
src_ratio = im.width / im.height
|
173 |
-
|
174 |
-
src_w = width if ratio < src_ratio else im.width * height // im.height
|
175 |
-
src_h = height if ratio >= src_ratio else im.height * width // im.width
|
176 |
-
|
177 |
-
resized = im.resize((src_w, src_h), resample=LANCZOS)
|
178 |
-
res = Image.new("RGBA", (width, height))
|
179 |
-
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
|
180 |
-
|
181 |
-
if ratio < src_ratio:
|
182 |
-
fill_height = height // 2 - src_h // 2
|
183 |
-
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
|
184 |
-
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
|
185 |
-
elif ratio > src_ratio:
|
186 |
-
fill_width = width // 2 - src_w // 2
|
187 |
-
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
|
188 |
-
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
|
189 |
-
|
190 |
-
return res
|
191 |
-
|
192 |
-
def update_dimensions_info(width, height):
|
193 |
-
pixel_count_formated = "{:,.0f}".format(width * height)
|
194 |
-
return f"Aspect ratio: {round(width / height, 5)}\nTotal pixel count: {pixel_count_formated}"
|
195 |
-
|
196 |
-
def get_png_nfo( image: Image ):
|
197 |
-
info_text = ""
|
198 |
-
visible = bool(image and any(image.info))
|
199 |
-
if visible:
|
200 |
-
for key,value in image.info.items():
|
201 |
-
info_text += f"{key}: {value}\n"
|
202 |
-
info_text = info_text.rstrip('\n')
|
203 |
-
return gr.Textbox.update(value=info_text, visible=visible)
|
204 |
-
|
205 |
-
def load_settings(*values):
|
206 |
-
new_settings, key_names, checkboxgroup_info = values[-3:]
|
207 |
-
values = list(values[:-3])
|
208 |
-
|
209 |
-
if new_settings:
|
210 |
-
if type(new_settings) is str:
|
211 |
-
if os.path.exists(new_settings):
|
212 |
-
with open(new_settings, "r", encoding="utf8") as f:
|
213 |
-
new_settings = yaml.safe_load(f)
|
214 |
-
elif new_settings.startswith("file://") and os.path.exists(new_settings[7:]):
|
215 |
-
with open(new_settings[7:], "r", encoding="utf8") as f:
|
216 |
-
new_settings = yaml.safe_load(f)
|
217 |
-
else:
|
218 |
-
new_settings = yaml.safe_load(new_settings)
|
219 |
-
if type(new_settings) is not dict:
|
220 |
-
new_settings = {"prompt": new_settings}
|
221 |
-
if "txt2img" in new_settings:
|
222 |
-
new_settings = new_settings["txt2img"]
|
223 |
-
target = new_settings.pop("target", "txt2img")
|
224 |
-
if target != "txt2img":
|
225 |
-
print(f"Warning: applying settings to txt2img even though {target} is specified as target.", file=sys.stderr)
|
226 |
-
|
227 |
-
skipped_settings = {}
|
228 |
-
for key in new_settings.keys():
|
229 |
-
if key in key_names:
|
230 |
-
values[key_names.index(key)] = new_settings[key]
|
231 |
-
else:
|
232 |
-
skipped_settings[key] = new_settings[key]
|
233 |
-
if skipped_settings:
|
234 |
-
print(f"Settings could not be applied: {skipped_settings}", file=sys.stderr)
|
235 |
-
|
236 |
-
# Convert lists of checkbox indices to lists of checkbox labels:
|
237 |
-
for (cbg_index, cbg_choices) in checkboxgroup_info:
|
238 |
-
values[cbg_index] = [cbg_choices[i] for i in values[cbg_index]]
|
239 |
-
|
240 |
-
return values
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BFH/BKMotionsAI/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: BKMotionsAI
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.15
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: gpl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BMukhtar/BookRecognitionKz/app.py
DELETED
@@ -1,63 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from PIL import Image
|
3 |
-
import os
|
4 |
-
import easyocr
|
5 |
-
import numpy as np
|
6 |
-
import fitz # PyMuPDF
|
7 |
-
import io
|
8 |
-
from pdf2image import convert_from_bytes
|
9 |
-
|
10 |
-
models_dir = "./models"
|
11 |
-
output_dir = "./output"
|
12 |
-
dirs = [models_dir, output_dir]
|
13 |
-
for d in dirs:
|
14 |
-
if not os.path.exists(output_dir):
|
15 |
-
os.makedirs(output_dir)
|
16 |
-
|
17 |
-
font_path = models_dir + "/Ubuntu-Regular.ttf"
|
18 |
-
reader = easyocr.Reader(
|
19 |
-
['en'],
|
20 |
-
gpu=True,
|
21 |
-
recog_network='best_norm_ED',
|
22 |
-
detect_network="craft",
|
23 |
-
user_network_directory=models_dir,
|
24 |
-
model_storage_directory=models_dir,
|
25 |
-
) # this needs to run only once to load the model into memory
|
26 |
-
|
27 |
-
# main title
|
28 |
-
st.set_page_config(layout="wide")
|
29 |
-
st.title("Сурет немесе пдф файлдан текст алу")
|
30 |
-
# subtitle
|
31 |
-
st.markdown("## Qazaq OCR")
|
32 |
-
uploaded_file = st.file_uploader("Өз файлыңызды осында жүктеңіз ('png', 'jpeg', 'jpg', 'pdf')", type=['png', 'jpeg', 'jpg', 'pdf'])
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
if uploaded_file is not None:
|
37 |
-
if uploaded_file.type == "application/pdf":
|
38 |
-
with st.spinner('ПДФ өңделуде ...'):
|
39 |
-
temp_pdf_file = "./temp_pdf_file.pdf"
|
40 |
-
with open(temp_pdf_file, "wb") as f:
|
41 |
-
f.write(uploaded_file.read())
|
42 |
-
|
43 |
-
# Now open the temporary file with fitz
|
44 |
-
pdf_document = fitz.open(temp_pdf_file)
|
45 |
-
total_pages = len(pdf_document)
|
46 |
-
for page_num in range(total_pages):
|
47 |
-
page = pdf_document.load_page(page_num)
|
48 |
-
image_matrix = fitz.Matrix(fitz.Identity)
|
49 |
-
pixmap = page.get_pixmap(matrix=image_matrix, dpi=300)
|
50 |
-
image_data = pixmap.samples # This is a bytes object
|
51 |
-
image = Image.frombytes("RGB", (pixmap.width, pixmap.height), image_data)
|
52 |
-
st.subheader(f'Бет {page_num + 1}/{total_pages}')
|
53 |
-
st.image(image, caption=f'Бет {page_num + 1}')
|
54 |
-
result = reader.readtext(np.array(image), paragraph=True)
|
55 |
-
result_text = "\n\n".join([item[1] for item in result])
|
56 |
-
st.text(result_text)
|
57 |
-
else:
|
58 |
-
with st.spinner('Сурет өңделуде ...'):
|
59 |
-
image = Image.open(uploaded_file)
|
60 |
-
st.image(image)
|
61 |
-
result = reader.readtext(np.array(image), paragraph=True)
|
62 |
-
result_text = "\n\n".join([item[1] for item in result])
|
63 |
-
st.text(result_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/app/engine/caption.ts
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
"use server"
|
2 |
-
|
3 |
-
import { ImageAnalysisRequest, ImageAnalysisResponse } from "@/types"
|
4 |
-
|
5 |
-
const apiUrl = `${process.env.RENDERING_VIDEOCHAIN_API_URL || ""}`
|
6 |
-
|
7 |
-
export async function see({
|
8 |
-
prompt,
|
9 |
-
imageBase64
|
10 |
-
}: {
|
11 |
-
prompt: string
|
12 |
-
imageBase64: string
|
13 |
-
}): Promise<string> {
|
14 |
-
if (!prompt) {
|
15 |
-
console.error(`cannot call the API without an image, aborting..`)
|
16 |
-
throw new Error(`cannot call the API without an image, aborting..`)
|
17 |
-
}
|
18 |
-
|
19 |
-
try {
|
20 |
-
const request = {
|
21 |
-
prompt,
|
22 |
-
image: imageBase64
|
23 |
-
|
24 |
-
} as ImageAnalysisRequest
|
25 |
-
|
26 |
-
console.log(`calling ${apiUrl}/analyze called with: `, {
|
27 |
-
prompt: request.prompt,
|
28 |
-
image: request.image.slice(0, 20)
|
29 |
-
})
|
30 |
-
|
31 |
-
const res = await fetch(`${apiUrl}/analyze`, {
|
32 |
-
method: "POST",
|
33 |
-
headers: {
|
34 |
-
Accept: "application/json",
|
35 |
-
"Content-Type": "application/json",
|
36 |
-
// Authorization: `Bearer ${videochainApi}`,
|
37 |
-
},
|
38 |
-
body: JSON.stringify(request),
|
39 |
-
cache: 'no-store',
|
40 |
-
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
|
41 |
-
// next: { revalidate: 1 }
|
42 |
-
})
|
43 |
-
|
44 |
-
if (res.status !== 200) {
|
45 |
-
throw new Error('Failed to fetch data')
|
46 |
-
}
|
47 |
-
|
48 |
-
const response = (await res.json()) as ImageAnalysisResponse
|
49 |
-
return response.result
|
50 |
-
} catch (err) {
|
51 |
-
console.error(err)
|
52 |
-
return ""
|
53 |
-
}
|
54 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Basil2k4/botbasil203/src/startup/version_sticker.sh
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
### @accetto, September 2019
|
3 |
-
|
4 |
-
ubuntu=$("${STARTUPDIR}/version_of.sh" ubuntu)
|
5 |
-
chromium=$("${STARTUPDIR}/version_of.sh" chromium)
|
6 |
-
|
7 |
-
case "$1" in
|
8 |
-
-v)
|
9 |
-
echo "Ubuntu $ubuntu"
|
10 |
-
echo "Chromium $chromium"
|
11 |
-
;;
|
12 |
-
-V)
|
13 |
-
mousepad=$("${STARTUPDIR}/version_of.sh" mousepad)
|
14 |
-
vim=$("${STARTUPDIR}/version_of.sh" vim)
|
15 |
-
nano=$("${STARTUPDIR}/version_of.sh" nano)
|
16 |
-
tigervnc=$("${STARTUPDIR}/version_of.sh" tigervnc)
|
17 |
-
novnc=$("${STARTUPDIR}/version_of.sh" novnc)
|
18 |
-
websockify=$("${STARTUPDIR}/version_of.sh" websockify)
|
19 |
-
curl=$("${STARTUPDIR}/version_of.sh" curl)
|
20 |
-
git=$("${STARTUPDIR}/version_of.sh" git)
|
21 |
-
jq=$("${STARTUPDIR}/version_of.sh" jq)
|
22 |
-
echo "Ubuntu $ubuntu"
|
23 |
-
echo "Mousepad $mousepad"
|
24 |
-
echo "VIM $vim"
|
25 |
-
echo "GNU nano $nano"
|
26 |
-
echo "TigerVNC $tigervnc"
|
27 |
-
echo "noVNC $novnc"
|
28 |
-
echo "websockify $websockify"
|
29 |
-
echo "curl $curl"
|
30 |
-
echo "Git $git"
|
31 |
-
echo "jq $jq"
|
32 |
-
echo "Chromium $chromium"
|
33 |
-
;;
|
34 |
-
*)
|
35 |
-
### example: ubuntu18.04.3-firefox_68.0.2
|
36 |
-
sticker="ubuntu$ubuntu"-"chromium$chromium"
|
37 |
-
echo "$sticker"
|
38 |
-
;;
|
39 |
-
esac
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Candy Crush Saga 1.242.1.1 Mod Apk.md
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Camión Simulador Ultimate Vô Hạn Tiạn APK: Una revisión</h1>
|
3 |
-
<p>Si eres un fan de los juegos de simulación de conducción, es posible que hayas oído hablar de <strong>Truck Simulator Ultimate</strong>, un juego desarrollado por Zuuks Games, una compañía de juegos móviles con sede en Turquía. Truck Simulator Ultimate es un juego que te permite experimentar la vida de un conductor de camión, desde conducir a través de diferentes países y ciudades, hasta administrar tu propia compañía de transporte, hasta personalizar tus camiones y oficinas. Pero lo que si quieres disfrutar del juego sin preocuparse por el dinero y los recursos? Ahí es donde <fuerte>Camión Simulador Ultimate Vô Hạn Tiạn APK</strong> entra en. Esta es una versión modificada del juego que le da dinero ilimitado y acceso a todas las características. En este artículo, vamos a revisar Truck Simulator Ultimate Vô Hạn TiËn APK, sus características, cómo descargarlo e instalarlo, sus pros y contras, y algunos consejos y trucos para jugar el juego. </p>
|
4 |
-
<h2>¿Qué es Truck Simulator Ultimate? </h2>
|
5 |
-
<p>Truck Simulator Ultimate es un juego de simulación de conducción que combina elementos de simulación y magnate. Puede conducir varios camiones de Estados Unidos a Europa, transportar diferentes tipos de carga en más de 100 ciudades, participar en subastas y obtener mayores beneficios, construir su propia flota de camiones, contratar empleados y administrar su empresa, diseñar sus oficinas de la manera que desee, y más. El juego también cuenta con gráficos realistas, física, clima, tráfico, estaciones de radio, carreteras de peaje, áreas de descanso y modo multijugador. Puedes jugar con camiones oficiales con licencia de Mercedes-Benz, así como con otras marcas. El juego está disponible para dispositivos Android e iOS. </p>
|
6 |
-
<h2>candy crush saga 1.242.1.1 mod apk</h2><br /><p><b><b>Download Zip</b> ☆ <a href="https://bltlly.com/2v6LnB">https://bltlly.com/2v6LnB</a></b></p><br /><br />
|
7 |
-
<h3>Características de Truck Simulator Ultimate</h3>
|
8 |
-
<p>Algunas de las características que hacen que Truck Simulator Ultimate se destaque son:</p>
|
9 |
-
<ul>
|
10 |
-
<li><strong>DLC mods system</strong>: Puedes descargar e instalar varios mods que añaden nuevos camiones, mapas, skins, sonidos y más al juego. </li>
|
11 |
-
|
12 |
-
<li><strong>Transporte una amplia variedad de carga</strong>: Puede transportar diferentes tipos de carga, como compras de moda en línea, gas y combustible, fusión, nevera, dinero, entrega de alimentos, pila de gemas, suministros de oficina, miel congelada, materiales del parque temático, coches y trabajos más divertidos. </li>
|
13 |
-
<li><strong>Administrar su propio negocio</strong>: Usted puede establecer su empresa en los países líderes del mundo como los Estados Unidos, China, Canadá, Rusia, Alemania, Italia, Francia, España, Países Bajos, Turquía, Corea del Sur, Japón, Brasil, Azerbaiyán y convertirse en la mayor empresa de logística del mundo. </li>
|
14 |
-
<li><strong>Construye tu propia flota de camiones</strong>: Puedes comprar nuevos camiones del mercado o de las subastas. También puede actualizar sus camiones con lámparas, parachoques, bocina, luces de cabina y más opciones de modificación. </li>
|
15 |
-
<li><strong>Contrata empleados y gestiona tu empresa</strong>: Puedes contratar conductores que puedan encargarse de los servicios de entrega por ti. También puede contratar personal de oficina que puede ayudarle con la contabilidad, marketing, recursos humanos, etc.</li>
|
16 |
-
<li><strong>Diseña tus oficinas de la manera que quieras</strong>: Puedes comprar equipos de oficina como computadoras, impresoras, escritorios, sillas, sofás, plantas, pinturas, etc. y ordenarlos según tu preferencia. </li>
|
17 |
-
<li><strong>Encuentre y pague por gas y combustible baratos</strong>: Puede usar el mapa para localizar estaciones de servicio que ofrecen precios bajos para gas y combustible. También puede pedir alimentos y bebidas en las áreas de descanso. </li>
|
18 |
-
|
19 |
-
<li><strong>Gráficos realistas y física</strong>: Puedes disfrutar de los impresionantes gráficos y la física realista del juego. Puedes ver los reflejos de los camiones en el agua, las sombras de los árboles en la carretera, las gotas de lluvia en el parabrisas, el humo del tubo de escape, y más. También puede sentir el peso de la carga, la fricción de los neumáticos, la suspensión del camión y más. </li>
|
20 |
-
<li><strong>Tiempo realista y tráfico</strong>: Puedes experimentar diferentes condiciones climáticas como soleado, nublado, lluvioso, nevado, brumoso, etc. También puedes encontrar diferentes situaciones de tráfico, como atascos, accidentes, obras de carreteras, puntos de control de la policía, etc. Usted tiene que seguir las reglas de tráfico y señales para evitar multas y sanciones. </li>
|
21 |
-
<li><strong>Estaciones de radio realistas</strong>: Puedes escuchar varias estaciones de radio de diferentes países y géneros. También puedes escuchar tu propia música añadiendo tus archivos a la carpeta del juego. </li>
|
22 |
-
<li><strong>Carreteras de peaje realistas y áreas de descanso</strong>: Tienes que pagar peajes cuando entras en las carreteras de peaje. También puede detenerse en las áreas de descanso para repostar su camión, pedir alimentos y bebidas, usar el baño, etc.</li>
|
23 |
-
</ul>
|
24 |
-
<h3>¿Cómo descargar e instalar Truck Simulator Ultimate Vô Hạn Tiạn APK? </h3>
|
25 |
-
<p>Si desea descargar e instalar Truck Simulator Ultimate Vô Hạn Tin APK, usted tiene que seguir estos pasos:</p>
|
26 |
-
<ol>
|
27 |
-
<li><strong>Descargar Truck Simulator Ultimate Vô Hạn Tiạn APK file</strong>: Puede descargar el archivo APK de una fuente de confianza como [APKPure] o [APKCombo]. Asegúrese de descargar la última versión del archivo. </li>
|
28 |
-
<li><strong>Habilitar fuentes desconocidas</strong>: Antes de instalar el archivo APK, debe habilitar fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
|
29 |
-
|
30 |
-
<li><strong>Launch Truck Simulator Ultimate Vô Hạn Tiạn APK</strong>: Una vez que instale el archivo APK, puede iniciar Truck Simulator Ultimate Vô Hạn Tiạn APK tocando en su icono en la pantalla de inicio o cajón de aplicaciones. </li>
|
31 |
-
<li><strong>Disfrute de Truck Simulator Ultimate Vô Hạn APK</strong>: Ahora puede disfrutar de Truck Simulator Ultimate Vô Hạn Tiạn APK con dinero ilimitado y acceso a todas las características. </li>
|
32 |
-
</ol>
|
33 |
-
<h3>Pros y contras de Truck Simulator Ultimate Vô Hạn Tiạn APK</h3>
|
34 |
-
<p>Camión Simulador Ultimate Vô Hạn APK tiene sus pros y sus contras. Aquí están algunos de ellos:</p>
|
35 |
-
<tabla>
|
36 |
-
<tr>
|
37 |
-
<th>Pros</th>
|
38 |
-
<th>Contras</th>
|
39 |
-
</tr>
|
40 |
-
<tr>
|
41 |
-
<td>Puedes disfrutar de dinero ilimitado y acceso a todas las funciones. </td>
|
42 |
-
<td>Usted puede encontrar algunos errores y fallos en el juego. </td>
|
43 |
-
</tr>
|
44 |
-
<tr>
|
45 |
-
<td>Puedes descargar e instalar mods que mejoren tu experiencia de juego. </td>
|
46 |
-
<td>Puedes arriesgarte a que te prohíban o suspendan del juego si usas mods ilegales. </td>
|
47 |
-
</tr>
|
48 |
-
<tr>
|
49 |
-
<td>Puedes jugar con camiones oficiales con licencia de Mercedes-Benz, así como con otras marcas. </td>
|
50 |
-
<td>No puedes recibir actualizaciones y soporte de los desarrolladores oficiales del juego. </td>
|
51 |
-
</tr>
|
52 |
-
<tr>
|
53 |
-
<td>Puedes jugar con gráficos realistas, física, clima, tráfico, estaciones de radio, autopistas, áreas de descanso y modo multijugador. </td>
|
54 |
-
<td>Es posible que necesite un dispositivo de alta gama para ejecutar el juego sin problemas. </td>
|
55 |
-
</tr>
|
56 |
-
<tr>
|
57 |
-
<td>Puedes gestionar tu propia empresa de transporte y diseñar tus oficinas. </td>
|
58 |
-
<td>Puedes perder interés en el juego si lo encuentras demasiado fácil o aburrido. </td>
|
59 |
-
</tr>
|
60 |
-
</tabla>
|
61 |
-
<h2>Consejos y trucos para jugar Truck Simulator Ultimate</h2>
|
62 |
-
<p>Si quieres jugar Truck Simulator Ultimate mejor, aquí hay algunos consejos y trucos que pueden ayudarte:</p>
|
63 |
-
<h3>Práctica en el garaje</h3>
|
64 |
-
<h3>Práctica en el garaje</h3>
|
65 |
-
|
66 |
-
<h3>Puja por trabajos y gestiona tu negocio</h3>
|
67 |
-
<p>Uno de los principales aspectos de Truck Simulator Ultimate es la gestión de su propia empresa de transporte. Puede pujar por trabajos de diferentes clientes y transportar su carga a sus destinos. Puede ver los detalles de cada trabajo, como el tipo de carga, peso, distancia, límite de tiempo, recompensa y penalización. También puede ver el mapa de rutas y las condiciones del tráfico. Usted debe elegir los trabajos que se adapten a sus habilidades y preferencias, y que ofrecen el mejor margen de beneficio. También debe entregar la carga a tiempo y sin ningún daño para evitar penalizaciones y malas calificaciones. </p>
|
68 |
-
<p>A medida que completes más trabajos, ganarás más dinero y reputación. Puede usar su dinero para comprar camiones nuevos, actualizar los existentes, contratar conductores y personal, expandir sus oficinas y más. También puede utilizar su reputación para desbloquear nuevos mercados y clientes. Usted debe administrar su negocio con sabiduría y eficiencia para hacer crecer su empresa y convertirse en la empresa de logística más grande del mundo. </p>
|
69 |
-
<h3>Personaliza tus camiones y oficinas</h3>
|
70 |
-
<p>Otro aspecto divertido de Truck Simulator Ultimate es personalizar sus camiones y oficinas. Puede comprar camiones nuevos en el mercado o en las subastas. También puede actualizar sus camiones con lámparas, parachoques, bocina, luces de cabina y más opciones de modificación. Puede cambiar el color, la pintura, las calcomanías, la matrícula y el logotipo de sus camiones. También puede diseñar sus oficinas de la manera que desee. Puede comprar equipos de oficina como computadoras, impresoras, escritorios, sillas, sofás, plantas, pinturas, etc. y organizarlos según su preferencia. Personalizar sus camiones y oficinas los hará más atractivos y personales. </p>
|
71 |
-
<h3>Siga las reglas de tráfico y las condiciones climáticas</h3>
|
72 |
-
|
73 |
-
<p>También hay que prestar atención a las condiciones climáticas tales como soleado, nublado, lluvioso, nevado, niebla, etc. Las condiciones climáticas pueden afectar su visibilidad, tracción, distancia de frenado, consumo de combustible, etc. Usted tiene que ajustar su estilo de conducción de acuerdo con las condiciones climáticas. Por ejemplo, tienes que conducir más lento y con más cuidado cuando está lloviendo o nevando. </p>
|
74 |
-
<p></p>
|
75 |
-
<h3>Únete al modo multijugador y compite con otros jugadores</h3>
|
76 |
-
<h3>Únete al modo multijugador y compite con otros jugadores</h3>
|
77 |
-
<p>Si quieres desafiarte a ti mismo y divertirte más con Truck Simulator Ultimate, puedes unirte al modo multijugador y competir con otros jugadores en línea. Puedes llevar carga conjunta o participar en carreras con otros jugadores. También puedes chatear con otros jugadores, hacer amigos, unirte a clanes y mucho más. El modo multijugador es una nueva característica que añade más emoción y variedad al juego. </p>
|
78 |
-
<h2>Conclusión</h2>
|
79 |
-
<p>Truck Simulator Ultimate es un juego de simulación de conducción que te permite experimentar la vida de un conductor de camión y el propietario de una empresa de transporte. Puede conducir varios camiones, transportar diferentes cargas, administrar su negocio, personalizar sus camiones y oficinas, y más. También puede descargar e instalar Truck Simulator Ultimate Vô Hạn TiËn APK, una versión modificada del juego que le da dinero ilimitado y acceso a todas las características. Sin embargo, debe ser consciente de los pros y los contras de usar esta versión, y siga los pasos para descargarla e instalarla correctamente. También debes seguir algunos consejos y trucos para jugar mejor el juego, como practicar en el garaje, pujar por trabajos, seguir las reglas de tráfico y las condiciones climáticas, y unirse al modo multijugador. Truck Simulator Ultimate es un juego que ofrece gráficos realistas, física, clima, tráfico, estaciones de radio, carreteras de peaje, áreas de descanso y modo multijugador. Es un juego que te mantendrá entretenido y comprometido durante horas. </p>
|
80 |
-
<h2>Preguntas frecuentes</h2>
|
81 |
-
|
82 |
-
<h4>Q: ¿Es seguro usar Truck Simulator Ultimate Vô Hạn APK? </h4>
|
83 |
-
<p>A: Camión Simulador Ultimate Vô Hạn Tiạn APK es seguro de usar siempre y cuando se descarga de una fuente de confianza como [APKPure] o [APKCombo]. Sin embargo, siempre debe escanear el archivo APK con un programa antivirus antes de instalarlo. </p>
|
84 |
-
<h4>Q: ¿Voy a conseguir prohibido o suspendido del juego si uso Truck Simulator Ultimate Vô Hạn Tiạn APK? </h4>
|
85 |
-
<p>A: Hay una posibilidad de que usted puede conseguir prohibido o suspendido del juego si utiliza Truck Simulator Ultimate Vô Hạn Tiạn APK. Esto se debe a que esta versión del juego no está autorizada por los desarrolladores oficiales del juego. Por lo tanto, debe usarlo bajo su propio riesgo y discreción. </p>
|
86 |
-
<h4>Q: ¿Cómo puedo actualizar Truck Simulator Ultimate Vô Hạn Tiạn APK? </h4>
|
87 |
-
<p>A: Para actualizar Truck Simulator Ultimate Vô Hạn Tiạn APK, usted tiene que descargar e instalar la última versión del archivo APK de una fuente de confianza como [APKPure] o [APKCombo]. También debe desinstalar la versión anterior del archivo APK antes de instalar el nuevo. </p>
|
88 |
-
<h4>Q: ¿Cómo puedo contactar a los desarrolladores de Truck Simulator Ultimate? </h4>
|
89 |
-
<p>A: Si tiene alguna pregunta, comentario, sugerencia o problema con respecto a Truck Simulator Ultimate, puede ponerse en contacto con los desarrolladores del juego enviándoles un correo electrónico a [email protected] o visitando su sitio web en https://www.zuuks.com/.</p>
|
90 |
-
<h4>Q: ¿Cómo puedo apoyar a los desarrolladores de Truck Simulator Ultimate? </h4>
|
91 |
-
<p>A: Si quieres apoyar a los desarrolladores de Truck Simulator Ultimate, puedes comprar la versión oficial del juego en Google Play Store o App Store. También puedes calificar y revisar el juego en estas plataformas. También puedes seguirlos en plataformas de redes sociales como Facebook, Twitter, Instagram, YouTube, etc.</p> 64aa2da5cf<br />
|
92 |
-
<br />
|
93 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Carx Street Apk Ne Zaman kacak.md
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>CarX Street APK: Todo lo que necesita saber</h1>
|
3 |
-
<p>Si usted es un fan de los juegos de carreras callejeras, es posible que haya oído hablar de CarX Street APK, un nuevo y emocionante juego que le permite experimentar la emoción de ser un corredor callejero en un mundo abierto dinámico. En este artículo, le diremos todo lo que necesita saber sobre CarX Street APK, incluyendo lo que es, cómo descargarlo e instalarlo en su dispositivo Android, ¿cuáles son sus características principales, y cuáles son sus pros y contras. También responderemos algunas preguntas frecuentes sobre el juego. ¡Empecemos! </p>
|
4 |
-
<h2>¿Qué es CarX Street APK? </h2>
|
5 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
6 |
-
<p>CarX Street APK es un juego de carreras desarrollado por CarX Technologies, LLC, los creadores de CarX Drift Racing 2. Es una versión de prueba beta abierta del juego que está disponible de forma gratuita en los dispositivos Android. El juego te permite abrazar la libertad de ser un corredor callejero en el dinámico mundo abierto de Sunset City. Puedes aceptar el desafío y convertirte en la leyenda de la ciudad uniéndote a clubes, derrotando jefes y demostrando tus habilidades en carreras realistas en carreteras y calles de la ciudad, así como en carreras de deriva de alta velocidad. </p>
|
7 |
-
<h2>carx street apk ne zaman çıkacak</h2><br /><p><b><b>Download File</b> • <a href="https://bltlly.com/2v6Mqn">https://bltlly.com/2v6Mqn</a></b></p><br /><br />
|
8 |
-
<p>El juego también le permite construir el coche de sus sueños utilizando afinación de piezas que desbloquea toda la física del comportamiento del coche CarX Technology. Puede explorar todos los rincones del enorme mundo de CarX Street y admirar los gráficos modernos de alta calidad y la impresionante física y controles. El juego también cuenta con un cambio dinámico de día/ noche, un sistema detallado de construcción de automóviles, un sistema visual de ajuste de automóviles y un servicio de soporte para informes de errores. </p>
|
9 |
-
<h2> ¿Cómo descargar e instalar CarX Street APK en su dispositivo Android? </h2>
|
10 |
-
<h3>Una guía paso a paso con capturas de pantalla y enlaces</h3>
|
11 |
-
<p>Si desea probar CarX Street APK en su dispositivo Android, tendrá que seguir estos pasos:</p>
|
12 |
-
<ol>
|
13 |
-
<li>Ir a [este enlace]( 1 ) y descargar el archivo APK (1.19 GB) de CarX Street APK.</li>
|
14 |
-
|
15 |
-
<li>Busque el archivo APK descargado en su administrador de archivos y toque en él para iniciar el proceso de instalación. </li>
|
16 |
-
<li>Siga las instrucciones en la pantalla y espere a que se complete la instalación. </li>
|
17 |
-
<li>Iniciar el juego desde el cajón de la aplicación y disfrutar! </li>
|
18 |
-
</ol>
|
19 |
-
<p>Nota: Necesitará un dispositivo Android con Android 9.0 o superior y al menos 4 GB de RAM para ejecutar el juego sin problemas. </p>
|
20 |
-
<h2>¿Cuáles son las principales características de CarX Street APK? </h2>
|
21 |
-
<h3>Modo de carrera</h3>
|
22 |
-
<p>En el modo carrera, se puede conducir a la velocidad máxima o la deriva a través de giros. La elección es suya! Puedes unirte a clubes, derrotar jefes y demostrar a todos que eres el mejor conductor de esta ciudad. También puede elegir piezas para su vehículo y desbloquear el 100% de su potencial. Puedes comprar casas para tus coches y reunir colecciones para cada modo de carrera. También puede cargar combustible con el combustible adecuado para la próxima carrera en las gasolineras de la ciudad. </p>
|
23 |
-
<h3>Mejora de la sintonización del coche</h3>
|
24 |
-
<p>En la mejora de la puesta a punto del coche, puede utilizar un sistema detallado de construcción de coches para intercambiar piezas y engañar a su coche para una carrera específica. Puede actualizar el motor, la transmisión, el cuerpo, la suspensión y los neumáticos. También puede cambiar el motor de su automóvil único. </p>
|
25 |
-
<p></p>
|
26 |
-
<h3>Ajuste visual del coche</h3>
|
27 |
-
<h3>Ajuste visual del coche</h3>
|
28 |
-
<p>En la sintonización visual del coche, puede personalizar los espejos, faros, luces, faldas, parachoques, spoilers y más. También puede cambiar el color de su automóvil y agregar pegatinas y calcomanías. Puede hacer que su automóvil se destaque de la multitud y exprese su personalidad. </p>
|
29 |
-
<h3>El juego de carreras móvil más realista</h3>
|
30 |
-
|
31 |
-
<h2>¿Cuáles son los pros y los contras de CarX Street APK? </h2>
|
32 |
-
<h3>Una tabla que compara las ventajas y desventajas del juego</h3>
|
33 |
-
<tabla>
|
34 |
-
<tr>
|
35 |
-
<th>Pros</th>
|
36 |
-
<th>Contras</th>
|
37 |
-
</tr>
|
38 |
-
<tr>
|
39 |
-
<td>Descargar y jugar gratis</td>
|
40 |
-
<td>Requiere mucho espacio de almacenamiento y RAM</td>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td>Mundo abierto con dinámico ciclo día/noche</td>
|
44 |
-
<td>Algunas áreas todavía están en desarrollo</td>
|
45 |
-
</tr>
|
46 |
-
<tr>
|
47 |
-
<td>Modelos de coches diversos y realistas</td>
|
48 |
-
<td>Algunos coches son caros de comprar o actualizar</td>
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
<td>Sistema de ajuste de coche detallado y personalizable</td>
|
52 |
-
<td>Algunas partes están bloqueadas o limitadas por el nivel</td>
|
53 |
-
</tr>
|
54 |
-
<tr>
|
55 |
-
<td>Desafiante y divertido modo de carrera</td>
|
56 |
-
<td>Algunas razas son demasiado duras o injustas</td>
|
57 |
-
</tr>
|
58 |
-
<tr>
|
59 |
-
<td>Gráficos y efectos de sonido de alta calidad</td>
|
60 |
-
<td>Algunos errores y fallos pueden ocurrir</td>
|
61 |
-
</tr>
|
62 |
-
</tabla>
|
63 |
-
<h2>Conclusión</h2>
|
64 |
-
<h3>Un resumen de los puntos principales y una recomendación para el juego</h3>
|
65 |
-
<p>En conclusión, CarX Street APK es un gran juego para los entusiastas de las carreras callejeras que quieren experimentar la emoción de ser un corredor callejero en un mundo abierto dinámico. El juego ofrece una gran cantidad de características, tales como el modo de carrera, mejora de coche tuning, visual car tuning, y la física realista y gráficos. El juego también es gratis para descargar y jugar en dispositivos Android. Sin embargo, el juego también tiene algunos inconvenientes, como requerir mucho espacio de almacenamiento y RAM, tener algunas áreas que todavía están en desarrollo, tener algunos coches que son caros o difíciles de conseguir, tener algunas partes que están bloqueadas o limitadas por el nivel, tener algunas carreras que son demasiado duras o injustas, y tener algunos errores y fallos que pueden ocurrir. Por lo tanto, le recomendamos que pruebe CarX Street APK si usted tiene un dispositivo compatible y usted está interesado en los juegos de carreras callejeras. Usted puede encontrar que es uno de los mejores juegos de carreras móviles nunca! </p>
|
66 |
-
<h2>Preguntas frecuentes</h2>
|
67 |
-
<h3>Cinco preguntas y respuestas únicas sobre CarX Street APK</h3> 64aa2da5cf<br />
|
68 |
-
<br />
|
69 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Coche Simulador 2 Descargar Ios.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Simulador de coche 2: Cómo descargar y jugar en iOS</h1>
|
3 |
-
<p>Si usted es un fan de los juegos de conducción realista, es posible que desee echa un vistazo a Car Simulator 2, uno de los juegos de simulación de coches más populares de 2023. En este juego, puedes explorar un enorme mundo abierto, conducir más de 85 coches diferentes, competir con otros jugadores en línea y mucho más. En este artículo, le mostraremos cómo descargar y jugar Car Simulator 2 en su dispositivo iOS, así como algunos consejos y trucos para aprovechar al máximo su experiencia de juego. </p>
|
4 |
-
<h2>coche simulador 2 descargar ios</h2><br /><p><b><b>Download Zip</b> ►►►►► <a href="https://bltlly.com/2v6IDn">https://bltlly.com/2v6IDn</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el simulador de coche 2?</h2>
|
6 |
-
<p>Car Simulator 2 es un juego gratuito desarrollado por OppanaGames FZC LLC, una empresa especializada en la creación de juegos de simulación de coches realistas. El juego fue lanzado en marzo de 2020 y desde entonces ha recibido más de 100 millones de descargas y críticas positivas de jugadores de todo el mundo. El juego está disponible para dispositivos Android e iOS, así como para ordenadores Mac. </p>
|
7 |
-
<p>Car Simulator 2 es un juego que te permite experimentar cómo es conducir un coche en un entorno de ciudad realista. Puede elegir entre una variedad de modelos de automóviles, desde autos deportivos hasta SUV, y personalizarlos según sus preferencias. También puede actualizar sus coches con diferentes piezas y accesorios, como motores, neumáticos, spoilers, pintura y más. </p>
|
8 |
-
<p>El juego también ofrece muchas opciones de juego para que lo disfrutes. Puedes jugar en línea con jugadores reales de todo el mundo, ganar carreras y ganar dinero que puedes gastar en coches nuevos, mejoras, garajes y casas. También puede navegar por la ciudad con sus amigos, completar misiones y misiones, visitar gasolineras interactivas y mecánicos, e incluso trabajar para la mafia o recoger tarifas de taxi. </p>
|
9 |
-
<p>El juego presenta un dinámico ciclo día-noche, física realista y efectos de sonido, interiores de automóviles de 360 grados y muchos elementos interactivos en los modelos de automóviles. El juego también tiene un mundo abierto en 3D que puedes explorar libremente, con diferentes ubicaciones como el centro, los suburbios, el aeropuerto, la playa y más. </p>
|
10 |
-
|
11 |
-
<p>Algunas de las características principales de Car Simulator 2 son:</p>
|
12 |
-
<p></p>
|
13 |
-
<ul>
|
14 |
-
<li>Un juego divertido y gratuito que te encantará jugar. </li>
|
15 |
-
<li>Modos online y para un jugador. </li>
|
16 |
-
<li>mundo abierto 3D. </li>
|
17 |
-
<li>Bonos y misiones diarias. </li>
|
18 |
-
<li>Modelos de coches completamente detallados. </li>
|
19 |
-
<li>Conduce desde una perspectiva en primera o tercera persona. </li>
|
20 |
-
<li>interiores de automóviles de 360 grados. </li>
|
21 |
-
<li>Muchos elementos interactivos en los modelos de autos. </li>
|
22 |
-
<li>Efectos realistas de física y sonidos. </li>
|
23 |
-
<li>Un mecánico con muchas opciones de actualización para sus coches. </li>
|
24 |
-
<li>Estación de servicio interactiva. </li>
|
25 |
-
<li>Emocionantes misiones en forma de misiones, desafíos de árcade y carreras. </li>
|
26 |
-
<li>Ciclo dinámico día-noche. </li>
|
27 |
-
</ul>
|
28 |
-
<h2>Cómo descargar Car Simulator 2 en iOS</h2>
|
29 |
-
<p>Si quieres jugar Car Simulator 2 en tu iPhone o iPad, tendrás que descargarlo desde la App Store. Estos son los pasos que debes seguir:</p>
|
30 |
-
<h4>Paso 1: Ir a la App Store</h4>
|
31 |
-
<p>Abre la aplicación App Store en tu dispositivo iOS y asegúrate de iniciar sesión con tu ID de Apple. Si aún no tienes un ID de Apple, puedes crear uno gratis siguiendo las instrucciones de la pantalla. </p>
|
32 |
-
<h4>Paso <h4>Paso 2: Búsqueda de simulador de coche 2</h4>
|
33 |
-
<p>En el App Store, toque en el icono de búsqueda en la esquina inferior derecha de la pantalla y escriba "Car Simulator 2" en la barra de búsqueda. Deberías ver el icono del juego con el nombre "Car Simulator 2: Driving Game" y el nombre del desarrollador "OppanaGames FZC LLC". Toque en el icono del juego para ir a su página. </p>
|
34 |
-
<h4>Paso 3: Toque en el botón de descarga</h4>
|
35 |
-
<p>En la página del juego, verá un botón azul con una nube y un símbolo de flecha. Este es el botón de descarga. Pulsa en él para comenzar a descargar el juego. Es posible que necesites introducir tu contraseña de Apple ID o usar Touch ID o Face ID para confirmar tu descarga. </p>
|
36 |
-
<h4>Paso 4: Espere a que la instalación termine</h4>
|
37 |
-
|
38 |
-
<h4>Paso 5: Iniciar el juego y disfrutar de</h4>
|
39 |
-
<p>Una vez instalado el juego, puedes tocar el icono del juego para lanzarlo. Verás una pantalla de carga con el logotipo del juego y algunos consejos. Después de eso, se le llevará al menú principal donde se puede elegir el modo de juego, ajustes, garaje, y más. También puedes iniciar sesión con tu cuenta de Facebook para guardar tu progreso y jugar con tus amigos en línea. </p>
|
40 |
-
<h2>Consejos y trucos para jugar Car Simulator 2 en iOS</h2>
|
41 |
-
<p>Ahora que ha descargado e instalado Car Simulator 2 en su dispositivo iOS, está listo para comenzar a jugar. Pero antes de hacerlo, aquí hay algunos consejos y trucos que te ayudarán a aprovechar al máximo tu experiencia de juego:</p>
|
42 |
-
<h3>Cómo ganar monedas y comprar coches nuevos</h3>
|
43 |
-
<p>Las monedas son la moneda principal en Car Simulator 2. Puedes usarlas para comprar coches nuevos, mejoras, garajes, casas y más. Hay varias maneras de ganar monedas en el juego, como:</p>
|
44 |
-
<ul>
|
45 |
-
<li>Completar misiones y misiones. Estas son tareas que puedes encontrar en el mapa o en el menú. Pueden ir desde entregar pizza hasta competir contra otros jugadores. Completarlos te recompensará con monedas y, a veces, otros artículos. </li>
|
46 |
-
<li>Ganar carreras y desafíos. Estos son eventos a los que puedes unirte online o offline. Pueden ser pruebas de tiempo, carreras de arrastre, carreras de deriva, o desafíos de árcade. Ganar les dará monedas y puntos de reputación. </li>
|
47 |
-
<li>Venta de coches. Si usted tiene demasiados coches en su garaje o desea deshacerse de algunos viejos, puede venderlos por monedas. Simplemente vaya a su garaje y toque en el coche que desea vender. A continuación, toque en el botón de venta y confirmar su elección. </li>
|
48 |
-
<li>Ver anuncios. A veces, verá una opción para ver un anuncio de vídeo a cambio de algunas monedas. Esto puede ser una manera rápida de ganar algo de dinero extra si no te importa ver unos segundos de anuncios. </li>
|
49 |
-
</ul>
|
50 |
-
<h3>Cómo evitar recibir multas de los policías</h3>
|
51 |
-
|
52 |
-
<p>Para evitar recibir multas de la policía, aquí hay algunos consejos:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Conduce con cuidado y sigue las reglas de tráfico. Esto puede sonar obvio, pero es la mejor manera de evitar atraer la atención de la policía. Use sus intermitentes, luces de freno, faros y bocina cuando sea necesario. Obedezca el límite de velocidad, deténgase en las luces rojas y las señales de alto, y ceda el paso a los peatones y otros vehículos. </li>
|
55 |
-
<li>Evite conducir cerca de coches de policía o estaciones. Es más probable que los policías se fijen en usted si conduce cerca de ellos o de su cuartel general. Trate de mantener una distancia segura de ellos y evite hacer contacto visual. </li>
|
56 |
-
<li>Usa tu radar y mapa. El juego tiene un sistema de radar que te muestra dónde están los policías en el mapa. También puedes ver sus iconos en tu mini-mapa en la esquina superior izquierda de la pantalla. Utilice estas herramientas para planificar su ruta y evitar conducir en sus áreas de patrulla. </li>
|
57 |
-
<li>Escapar de los policías si te persiguen. Si te persiguen los policías, no te asustes. Todavía puedes escapar de ellos usando tus habilidades de conducción y algunos trucos. Aquí hay algunas maneras de quitártelos de encima:</li <ul>
|
58 |
-
<li>Conduce rápido y usa tu nitro. Cuanto más rápido conduzcas, más difícil será para los policías alcanzarte. También puede usar su impulso nitro para obtener una ráfaga de velocidad y dejarlos atrás. Solo tenga cuidado de no chocar contra nada o quedarse sin combustible. </li>
|
59 |
-
<li>Conduce fuera de la carretera y usa atajos. Los policías tendrán más dificultades para seguirte si conduces por caminos de tierra, hierba, arena o agua. También puede usar atajos como callejones, puentes, túneles o rampas para perderlos. </li>
|
60 |
-
<li>Cambie la apariencia de su automóvil. Los policías reconocerán su automóvil por su color, modelo y matrícula. Puede cambiar estas características yendo a su garaje o a un taller mecánico y personalizando su automóvil. Esto hará que sea más difícil para los policías identificarte. </li>
|
61 |
-
|
62 |
-
</ul>
|
63 |
-
<h3>Cómo completar misiones y desafíos</h3>
|
64 |
-
<p>Misiones y desafíos son una de las principales fuentes de diversión y recompensas en Car Simulator 2. Son tareas que puedes encontrar en el mapa o en el menú que pondrán a prueba tus habilidades de conducción y conocimientos. Completarlos le dará monedas, puntos de reputación, y a veces otros artículos o bonos. </p>
|
65 |
-
<p>Hay diferentes tipos de misiones y desafíos en el juego, como:</p>
|
66 |
-
<ul>
|
67 |
-
<li>Misiones de entrega. Estas son misiones donde tienes que entregar un paquete o una persona a un destino dentro de un límite de tiempo. Tienes que conducir con cuidado y evitar dañar la carga o el pasajero. </li>
|
68 |
-
<li>Carreras de misiones. Estas son misiones donde tienes que competir contra otros conductores en una carrera. Tienes que conducir rápido y utilizar su nitro y habilidades para ganar la carrera. </li>
|
69 |
-
<li>Misiones de deriva. Estas son misiones donde tienes que realizar derivas en ciertas carreteras o áreas. Usted tiene que controlar la velocidad y la dirección de su coche y mantener una deriva durante el mayor tiempo posible. </li>
|
70 |
-
<li>Misiones de acrobacias. Estas son misiones donde tienes que realizar acrobacias en rampas, bucles o saltos. Usted tiene que lanzar su coche en el aire y aterrizar de forma segura sin estrellarse. </li>
|
71 |
-
<li>Desafíos árcade. Son desafíos en los que tienes que completar un objetivo determinado dentro de un límite de tiempo o con recursos limitados. Por ejemplo, puede que tenga que conducir lo más lejos posible con poco combustible o evitar chocar con cualquier obstáculo en la carretera. </li>
|
72 |
-
</ul>
|
73 |
-
<p>Para completar misiones y desafíos, aquí hay algunos consejos:</p>
|
74 |
-
<ul>
|
75 |
-
<li>Revisa el mapa y el menú para ver las misiones y desafíos disponibles. Puedes ver los iconos y nombres de las misiones y desafíos en el mapa o en el menú. También puedes ver su nivel de dificultad, recompensas y requisitos. </li>
|
76 |
-
|
77 |
-
<li>Siga las instrucciones y sugerencias en la pantalla. Una vez que comience una misión o desafío, verá algunas instrucciones y sugerencias en la pantalla que lo guiarán a través de la tarea. Por ejemplo, puedes ver flechas que te apuntan al destino, puntos de control que marcan tu progreso o temporizadores que te muestran cuánto tiempo te queda. </li <li>Completa la misión o desafío tan rápido y como sea posible. Cuanto más rápido y mejor completes la misión o el desafío, más monedas y puntos de reputación ganarás. También obtendrá una calificación de una a tres estrellas dependiendo de su rendimiento. Trata de obtener tres estrellas en cada misión o desafío para desbloquear más recompensas y logros. </li>
|
78 |
-
</ul>
|
79 |
-
<h3>Cómo jugar online con otros jugadores</h3>
|
80 |
-
<p>Una de las mejores características de Car Simulator 2 es que puedes jugar online con otros jugadores de todo el mundo. Puede unirse o crear habitaciones en línea donde puede chatear, correr o navegar con sus amigos o extraños. También puedes unirte a clanes y participar en guerras de clanes y torneos. </p>
|
81 |
-
<p>Para jugar en línea con otros jugadores, aquí hay algunos consejos:</p>
|
82 |
-
<ul>
|
83 |
-
<li>Asegúrese de tener una conexión a Internet estable. Jugar en línea requiere una buena conexión a Internet para evitar retrasos, desconexiones o problemas técnicos. Puede comprobar su velocidad y calidad de Internet yendo al menú de configuración y tocando el icono de red. </li>
|
84 |
-
<li>Elija un modo en línea que se adapte a su preferencia y nivel de habilidad. Usted puede elegir entre diferentes modos en línea, tales como paseo libre, carrera, deriva, truco, o árcade. También puedes elegir entre diferentes niveles de dificultad como principiante, intermedio o experto. </li>
|
85 |
-
|
86 |
-
<li>Diviértete y sé respetuoso. Jugar en línea con otros jugadores puede ser muy divertido y una gran manera de hacer nuevos amigos. Sin embargo, también debes ser respetuoso y seguir las reglas del juego y de la habitación. No hagas trampa, spam, troll ni molestes a otros jugadores. Sé amable, servicial y educado. </li>
|
87 |
-
</ul>
|
88 |
-
<h3>Cómo personalizar su coche y garaje</h3>
|
89 |
-
<p>Otra característica divertida de Car Simulator 2 es que puede personalizar su coche y garaje de acuerdo a su gusto y estilo. Puede cambiar el color, modelo, piezas, accesorios y pegatinas de su coche. También puede actualizar su garaje con diferentes herramientas, equipos, muebles y decoraciones. </p>
|
90 |
-
<p>Para personalizar su coche y garaje, aquí hay algunos consejos:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Ir a su garaje o una tienda de mecánica. Puede acceder a su garaje pulsando en el icono del garaje en el mapa o en el menú. También puede visitar una tienda de mecánica conduciendo a uno de los lugares marcados con un icono de llave en el mapa. </li>
|
93 |
-
<li>Seleccione la opción de coche o garaje. En su garaje o taller mecánico, verá dos opciones: coche o garaje. Toque en la opción de coche si desea personalizar su coche. Toque en la opción de garaje si desea personalizar su garaje. </li <li>Elija la opción de personalización que desee. Dependiendo de si ha seleccionado el coche o la opción de garaje, verá diferentes opciones de personalización. Por ejemplo, si seleccionó la opción de automóvil, verá opciones como pintura, modelo, motor, neumáticos, spoiler, pegatinas y más. Si seleccionó la opción de garaje, verá opciones como herramientas, equipos, muebles, decoraciones y más. </li>
|
94 |
-
<li>Gastar sus monedas o ver anuncios para desbloquear nuevos artículos. Algunos artículos de personalización son gratuitos, mientras que otros requieren que gastes algunas monedas o veas algunos anuncios para desbloquearlos. Puedes ver el precio o el icono del anuncio de cada artículo antes de seleccionarlo. También puede ver cómo se verá el artículo en su automóvil o garaje antes de confirmar su elección. </li>
|
95 |
-
|
96 |
-
</ul>
|
97 |
-
<h2>Conclusión</h2>
|
98 |
-
<p>Car Simulator 2 es un juego de conducción divertido y realista que puedes jugar en tu dispositivo iOS. Puedes descargarlo de forma gratuita desde la App Store y disfrutar de sus características como mundo abierto en 3D, modos online y offline, modelos de coches completamente detallados, emocionantes misiones y desafíos, y mucho más. También puede personalizar su coche y garaje con diferentes artículos y mejoras. Si usted está buscando un juego que le dará una idea de lo que es conducir un coche en un entorno de la ciudad, Car Simulator 2 es el juego para usted. </p>
|
99 |
-
<h2>Preguntas frecuentes</h2>
|
100 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Car Simulator 2:</p>
|
101 |
-
<ul>
|
102 |
-
<li><b>Q: ¿Cómo puedo obtener más combustible para mi coche? </b></li>
|
103 |
-
<li>A: Usted puede conseguir más combustible para su coche visitando una gasolinera. Usted puede encontrar gasolineras en el mapa marcado con un icono de la bomba de gas. También puede comprar combustible con monedas o ver anuncios para obtener combustible gratis. </li>
|
104 |
-
<li><b>Q: ¿Cómo cambio la vista de la cámara? </b></li>
|
105 |
-
<li>A: Puede cambiar la vista de la cámara tocando el icono de la cámara en la esquina superior derecha de la pantalla. Puede elegir entre diferentes vistas, como primera persona, tercera persona, de arriba hacia abajo o cámara libre. </li>
|
106 |
-
<li><b>Q: ¿Cómo enciendo la radio o cambio la música? </b></li>
|
107 |
-
<li>A: Puede encender la radio o cambiar la música tocando el icono de radio en la esquina inferior izquierda de la pantalla. Puede elegir entre diferentes estaciones de radio o listas de reproducción que se adapten a su estado de ánimo. </li>
|
108 |
-
<li><b>Q: ¿Cómo uso la bocina o los faros? </b></li>
|
109 |
-
<li>A: Puede utilizar la bocina o los faros tocando en la bocina o los iconos de los faros en la esquina inferior derecha de la pantalla. También puede deslizar hacia arriba o hacia abajo en estos iconos para ajustar el volumen o el brillo. </li>
|
110 |
-
<li><b>Q: ¿Cómo hago una pausa o salgo del juego? </b></li>
|
111 |
-
|
112 |
-
</ul></p> 64aa2da5cf<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/bucket.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
"""This module implements token buckets used for client side throttling."""
|
2 |
-
import threading
|
3 |
-
import time
|
4 |
-
|
5 |
-
from botocore.exceptions import CapacityNotAvailableError
|
6 |
-
|
7 |
-
|
8 |
-
class Clock:
|
9 |
-
def __init__(self):
|
10 |
-
pass
|
11 |
-
|
12 |
-
def sleep(self, amount):
|
13 |
-
time.sleep(amount)
|
14 |
-
|
15 |
-
def current_time(self):
|
16 |
-
return time.time()
|
17 |
-
|
18 |
-
|
19 |
-
class TokenBucket:
|
20 |
-
|
21 |
-
_MIN_RATE = 0.5
|
22 |
-
|
23 |
-
def __init__(self, max_rate, clock, min_rate=_MIN_RATE):
|
24 |
-
self._fill_rate = None
|
25 |
-
self._max_capacity = None
|
26 |
-
self._current_capacity = 0
|
27 |
-
self._clock = clock
|
28 |
-
self._last_timestamp = None
|
29 |
-
self._min_rate = min_rate
|
30 |
-
self._lock = threading.Lock()
|
31 |
-
self._new_fill_rate_condition = threading.Condition(self._lock)
|
32 |
-
self.max_rate = max_rate
|
33 |
-
|
34 |
-
@property
|
35 |
-
def max_rate(self):
|
36 |
-
return self._fill_rate
|
37 |
-
|
38 |
-
@max_rate.setter
|
39 |
-
def max_rate(self, value):
|
40 |
-
with self._new_fill_rate_condition:
|
41 |
-
# Before we can change the rate we need to fill any pending
|
42 |
-
# tokens we might have based on the current rate. If we don't
|
43 |
-
# do this it means everything since the last recorded timestamp
|
44 |
-
# will accumulate at the rate we're about to set which isn't
|
45 |
-
# correct.
|
46 |
-
self._refill()
|
47 |
-
self._fill_rate = max(value, self._min_rate)
|
48 |
-
if value >= 1:
|
49 |
-
self._max_capacity = value
|
50 |
-
else:
|
51 |
-
self._max_capacity = 1
|
52 |
-
# If we're scaling down, we also can't have a capacity that's
|
53 |
-
# more than our max_capacity.
|
54 |
-
self._current_capacity = min(
|
55 |
-
self._current_capacity, self._max_capacity
|
56 |
-
)
|
57 |
-
self._new_fill_rate_condition.notify()
|
58 |
-
|
59 |
-
@property
|
60 |
-
def max_capacity(self):
|
61 |
-
return self._max_capacity
|
62 |
-
|
63 |
-
@property
|
64 |
-
def available_capacity(self):
|
65 |
-
return self._current_capacity
|
66 |
-
|
67 |
-
def acquire(self, amount=1, block=True):
|
68 |
-
"""Acquire token or return amount of time until next token available.
|
69 |
-
|
70 |
-
If block is True, then this method will block until there's sufficient
|
71 |
-
capacity to acquire the desired amount.
|
72 |
-
|
73 |
-
If block is False, then this method will return True is capacity
|
74 |
-
was successfully acquired, False otherwise.
|
75 |
-
|
76 |
-
"""
|
77 |
-
with self._new_fill_rate_condition:
|
78 |
-
return self._acquire(amount=amount, block=block)
|
79 |
-
|
80 |
-
def _acquire(self, amount, block):
|
81 |
-
self._refill()
|
82 |
-
if amount <= self._current_capacity:
|
83 |
-
self._current_capacity -= amount
|
84 |
-
return True
|
85 |
-
else:
|
86 |
-
if not block:
|
87 |
-
raise CapacityNotAvailableError()
|
88 |
-
# Not enough capacity.
|
89 |
-
sleep_amount = self._sleep_amount(amount)
|
90 |
-
while sleep_amount > 0:
|
91 |
-
# Until python3.2, wait() always returned None so we can't
|
92 |
-
# tell if a timeout occurred waiting on the cond var.
|
93 |
-
# Because of this we'll unconditionally call _refill().
|
94 |
-
# The downside to this is that we were waken up via
|
95 |
-
# a notify(), we're calling unnecessarily calling _refill() an
|
96 |
-
# extra time.
|
97 |
-
self._new_fill_rate_condition.wait(sleep_amount)
|
98 |
-
self._refill()
|
99 |
-
sleep_amount = self._sleep_amount(amount)
|
100 |
-
self._current_capacity -= amount
|
101 |
-
return True
|
102 |
-
|
103 |
-
def _sleep_amount(self, amount):
|
104 |
-
return (amount - self._current_capacity) / self._fill_rate
|
105 |
-
|
106 |
-
def _refill(self):
|
107 |
-
timestamp = self._clock.current_time()
|
108 |
-
if self._last_timestamp is None:
|
109 |
-
self._last_timestamp = timestamp
|
110 |
-
return
|
111 |
-
current_capacity = self._current_capacity
|
112 |
-
fill_amount = (timestamp - self._last_timestamp) * self._fill_rate
|
113 |
-
new_capacity = min(self._max_capacity, current_capacity + fill_amount)
|
114 |
-
self._current_capacity = new_capacity
|
115 |
-
self._last_timestamp = timestamp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
DELETED
@@ -1,519 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This module uses ctypes to bind a whole bunch of functions and constants from
|
3 |
-
SecureTransport. The goal here is to provide the low-level API to
|
4 |
-
SecureTransport. These are essentially the C-level functions and constants, and
|
5 |
-
they're pretty gross to work with.
|
6 |
-
|
7 |
-
This code is a bastardised version of the code found in Will Bond's oscrypto
|
8 |
-
library. An enormous debt is owed to him for blazing this trail for us. For
|
9 |
-
that reason, this code should be considered to be covered both by urllib3's
|
10 |
-
license and by oscrypto's:
|
11 |
-
|
12 |
-
Copyright (c) 2015-2016 Will Bond <[email protected]>
|
13 |
-
|
14 |
-
Permission is hereby granted, free of charge, to any person obtaining a
|
15 |
-
copy of this software and associated documentation files (the "Software"),
|
16 |
-
to deal in the Software without restriction, including without limitation
|
17 |
-
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
18 |
-
and/or sell copies of the Software, and to permit persons to whom the
|
19 |
-
Software is furnished to do so, subject to the following conditions:
|
20 |
-
|
21 |
-
The above copyright notice and this permission notice shall be included in
|
22 |
-
all copies or substantial portions of the Software.
|
23 |
-
|
24 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
25 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
26 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
27 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
28 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
29 |
-
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
30 |
-
DEALINGS IN THE SOFTWARE.
|
31 |
-
"""
|
32 |
-
from __future__ import absolute_import
|
33 |
-
|
34 |
-
import platform
|
35 |
-
from ctypes import (
|
36 |
-
CDLL,
|
37 |
-
CFUNCTYPE,
|
38 |
-
POINTER,
|
39 |
-
c_bool,
|
40 |
-
c_byte,
|
41 |
-
c_char_p,
|
42 |
-
c_int32,
|
43 |
-
c_long,
|
44 |
-
c_size_t,
|
45 |
-
c_uint32,
|
46 |
-
c_ulong,
|
47 |
-
c_void_p,
|
48 |
-
)
|
49 |
-
from ctypes.util import find_library
|
50 |
-
|
51 |
-
from ...packages.six import raise_from
|
52 |
-
|
53 |
-
if platform.system() != "Darwin":
|
54 |
-
raise ImportError("Only macOS is supported")
|
55 |
-
|
56 |
-
version = platform.mac_ver()[0]
|
57 |
-
version_info = tuple(map(int, version.split(".")))
|
58 |
-
if version_info < (10, 8):
|
59 |
-
raise OSError(
|
60 |
-
"Only OS X 10.8 and newer are supported, not %s.%s"
|
61 |
-
% (version_info[0], version_info[1])
|
62 |
-
)
|
63 |
-
|
64 |
-
|
65 |
-
def load_cdll(name, macos10_16_path):
|
66 |
-
"""Loads a CDLL by name, falling back to known path on 10.16+"""
|
67 |
-
try:
|
68 |
-
# Big Sur is technically 11 but we use 10.16 due to the Big Sur
|
69 |
-
# beta being labeled as 10.16.
|
70 |
-
if version_info >= (10, 16):
|
71 |
-
path = macos10_16_path
|
72 |
-
else:
|
73 |
-
path = find_library(name)
|
74 |
-
if not path:
|
75 |
-
raise OSError # Caught and reraised as 'ImportError'
|
76 |
-
return CDLL(path, use_errno=True)
|
77 |
-
except OSError:
|
78 |
-
raise_from(ImportError("The library %s failed to load" % name), None)
|
79 |
-
|
80 |
-
|
81 |
-
Security = load_cdll(
|
82 |
-
"Security", "/System/Library/Frameworks/Security.framework/Security"
|
83 |
-
)
|
84 |
-
CoreFoundation = load_cdll(
|
85 |
-
"CoreFoundation",
|
86 |
-
"/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
|
87 |
-
)
|
88 |
-
|
89 |
-
|
90 |
-
Boolean = c_bool
|
91 |
-
CFIndex = c_long
|
92 |
-
CFStringEncoding = c_uint32
|
93 |
-
CFData = c_void_p
|
94 |
-
CFString = c_void_p
|
95 |
-
CFArray = c_void_p
|
96 |
-
CFMutableArray = c_void_p
|
97 |
-
CFDictionary = c_void_p
|
98 |
-
CFError = c_void_p
|
99 |
-
CFType = c_void_p
|
100 |
-
CFTypeID = c_ulong
|
101 |
-
|
102 |
-
CFTypeRef = POINTER(CFType)
|
103 |
-
CFAllocatorRef = c_void_p
|
104 |
-
|
105 |
-
OSStatus = c_int32
|
106 |
-
|
107 |
-
CFDataRef = POINTER(CFData)
|
108 |
-
CFStringRef = POINTER(CFString)
|
109 |
-
CFArrayRef = POINTER(CFArray)
|
110 |
-
CFMutableArrayRef = POINTER(CFMutableArray)
|
111 |
-
CFDictionaryRef = POINTER(CFDictionary)
|
112 |
-
CFArrayCallBacks = c_void_p
|
113 |
-
CFDictionaryKeyCallBacks = c_void_p
|
114 |
-
CFDictionaryValueCallBacks = c_void_p
|
115 |
-
|
116 |
-
SecCertificateRef = POINTER(c_void_p)
|
117 |
-
SecExternalFormat = c_uint32
|
118 |
-
SecExternalItemType = c_uint32
|
119 |
-
SecIdentityRef = POINTER(c_void_p)
|
120 |
-
SecItemImportExportFlags = c_uint32
|
121 |
-
SecItemImportExportKeyParameters = c_void_p
|
122 |
-
SecKeychainRef = POINTER(c_void_p)
|
123 |
-
SSLProtocol = c_uint32
|
124 |
-
SSLCipherSuite = c_uint32
|
125 |
-
SSLContextRef = POINTER(c_void_p)
|
126 |
-
SecTrustRef = POINTER(c_void_p)
|
127 |
-
SSLConnectionRef = c_uint32
|
128 |
-
SecTrustResultType = c_uint32
|
129 |
-
SecTrustOptionFlags = c_uint32
|
130 |
-
SSLProtocolSide = c_uint32
|
131 |
-
SSLConnectionType = c_uint32
|
132 |
-
SSLSessionOption = c_uint32
|
133 |
-
|
134 |
-
|
135 |
-
try:
|
136 |
-
Security.SecItemImport.argtypes = [
|
137 |
-
CFDataRef,
|
138 |
-
CFStringRef,
|
139 |
-
POINTER(SecExternalFormat),
|
140 |
-
POINTER(SecExternalItemType),
|
141 |
-
SecItemImportExportFlags,
|
142 |
-
POINTER(SecItemImportExportKeyParameters),
|
143 |
-
SecKeychainRef,
|
144 |
-
POINTER(CFArrayRef),
|
145 |
-
]
|
146 |
-
Security.SecItemImport.restype = OSStatus
|
147 |
-
|
148 |
-
Security.SecCertificateGetTypeID.argtypes = []
|
149 |
-
Security.SecCertificateGetTypeID.restype = CFTypeID
|
150 |
-
|
151 |
-
Security.SecIdentityGetTypeID.argtypes = []
|
152 |
-
Security.SecIdentityGetTypeID.restype = CFTypeID
|
153 |
-
|
154 |
-
Security.SecKeyGetTypeID.argtypes = []
|
155 |
-
Security.SecKeyGetTypeID.restype = CFTypeID
|
156 |
-
|
157 |
-
Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
|
158 |
-
Security.SecCertificateCreateWithData.restype = SecCertificateRef
|
159 |
-
|
160 |
-
Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
|
161 |
-
Security.SecCertificateCopyData.restype = CFDataRef
|
162 |
-
|
163 |
-
Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
|
164 |
-
Security.SecCopyErrorMessageString.restype = CFStringRef
|
165 |
-
|
166 |
-
Security.SecIdentityCreateWithCertificate.argtypes = [
|
167 |
-
CFTypeRef,
|
168 |
-
SecCertificateRef,
|
169 |
-
POINTER(SecIdentityRef),
|
170 |
-
]
|
171 |
-
Security.SecIdentityCreateWithCertificate.restype = OSStatus
|
172 |
-
|
173 |
-
Security.SecKeychainCreate.argtypes = [
|
174 |
-
c_char_p,
|
175 |
-
c_uint32,
|
176 |
-
c_void_p,
|
177 |
-
Boolean,
|
178 |
-
c_void_p,
|
179 |
-
POINTER(SecKeychainRef),
|
180 |
-
]
|
181 |
-
Security.SecKeychainCreate.restype = OSStatus
|
182 |
-
|
183 |
-
Security.SecKeychainDelete.argtypes = [SecKeychainRef]
|
184 |
-
Security.SecKeychainDelete.restype = OSStatus
|
185 |
-
|
186 |
-
Security.SecPKCS12Import.argtypes = [
|
187 |
-
CFDataRef,
|
188 |
-
CFDictionaryRef,
|
189 |
-
POINTER(CFArrayRef),
|
190 |
-
]
|
191 |
-
Security.SecPKCS12Import.restype = OSStatus
|
192 |
-
|
193 |
-
SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
|
194 |
-
SSLWriteFunc = CFUNCTYPE(
|
195 |
-
OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
|
196 |
-
)
|
197 |
-
|
198 |
-
Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
|
199 |
-
Security.SSLSetIOFuncs.restype = OSStatus
|
200 |
-
|
201 |
-
Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
|
202 |
-
Security.SSLSetPeerID.restype = OSStatus
|
203 |
-
|
204 |
-
Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
|
205 |
-
Security.SSLSetCertificate.restype = OSStatus
|
206 |
-
|
207 |
-
Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
|
208 |
-
Security.SSLSetCertificateAuthorities.restype = OSStatus
|
209 |
-
|
210 |
-
Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
|
211 |
-
Security.SSLSetConnection.restype = OSStatus
|
212 |
-
|
213 |
-
Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
|
214 |
-
Security.SSLSetPeerDomainName.restype = OSStatus
|
215 |
-
|
216 |
-
Security.SSLHandshake.argtypes = [SSLContextRef]
|
217 |
-
Security.SSLHandshake.restype = OSStatus
|
218 |
-
|
219 |
-
Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
|
220 |
-
Security.SSLRead.restype = OSStatus
|
221 |
-
|
222 |
-
Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
|
223 |
-
Security.SSLWrite.restype = OSStatus
|
224 |
-
|
225 |
-
Security.SSLClose.argtypes = [SSLContextRef]
|
226 |
-
Security.SSLClose.restype = OSStatus
|
227 |
-
|
228 |
-
Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
|
229 |
-
Security.SSLGetNumberSupportedCiphers.restype = OSStatus
|
230 |
-
|
231 |
-
Security.SSLGetSupportedCiphers.argtypes = [
|
232 |
-
SSLContextRef,
|
233 |
-
POINTER(SSLCipherSuite),
|
234 |
-
POINTER(c_size_t),
|
235 |
-
]
|
236 |
-
Security.SSLGetSupportedCiphers.restype = OSStatus
|
237 |
-
|
238 |
-
Security.SSLSetEnabledCiphers.argtypes = [
|
239 |
-
SSLContextRef,
|
240 |
-
POINTER(SSLCipherSuite),
|
241 |
-
c_size_t,
|
242 |
-
]
|
243 |
-
Security.SSLSetEnabledCiphers.restype = OSStatus
|
244 |
-
|
245 |
-
Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
|
246 |
-
Security.SSLGetNumberEnabledCiphers.restype = OSStatus
|
247 |
-
|
248 |
-
Security.SSLGetEnabledCiphers.argtypes = [
|
249 |
-
SSLContextRef,
|
250 |
-
POINTER(SSLCipherSuite),
|
251 |
-
POINTER(c_size_t),
|
252 |
-
]
|
253 |
-
Security.SSLGetEnabledCiphers.restype = OSStatus
|
254 |
-
|
255 |
-
Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
|
256 |
-
Security.SSLGetNegotiatedCipher.restype = OSStatus
|
257 |
-
|
258 |
-
Security.SSLGetNegotiatedProtocolVersion.argtypes = [
|
259 |
-
SSLContextRef,
|
260 |
-
POINTER(SSLProtocol),
|
261 |
-
]
|
262 |
-
Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
|
263 |
-
|
264 |
-
Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
|
265 |
-
Security.SSLCopyPeerTrust.restype = OSStatus
|
266 |
-
|
267 |
-
Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
|
268 |
-
Security.SecTrustSetAnchorCertificates.restype = OSStatus
|
269 |
-
|
270 |
-
Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
|
271 |
-
Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
|
272 |
-
|
273 |
-
Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
|
274 |
-
Security.SecTrustEvaluate.restype = OSStatus
|
275 |
-
|
276 |
-
Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
|
277 |
-
Security.SecTrustGetCertificateCount.restype = CFIndex
|
278 |
-
|
279 |
-
Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
|
280 |
-
Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
|
281 |
-
|
282 |
-
Security.SSLCreateContext.argtypes = [
|
283 |
-
CFAllocatorRef,
|
284 |
-
SSLProtocolSide,
|
285 |
-
SSLConnectionType,
|
286 |
-
]
|
287 |
-
Security.SSLCreateContext.restype = SSLContextRef
|
288 |
-
|
289 |
-
Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
|
290 |
-
Security.SSLSetSessionOption.restype = OSStatus
|
291 |
-
|
292 |
-
Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
|
293 |
-
Security.SSLSetProtocolVersionMin.restype = OSStatus
|
294 |
-
|
295 |
-
Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
|
296 |
-
Security.SSLSetProtocolVersionMax.restype = OSStatus
|
297 |
-
|
298 |
-
try:
|
299 |
-
Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef]
|
300 |
-
Security.SSLSetALPNProtocols.restype = OSStatus
|
301 |
-
except AttributeError:
|
302 |
-
# Supported only in 10.12+
|
303 |
-
pass
|
304 |
-
|
305 |
-
Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
|
306 |
-
Security.SecCopyErrorMessageString.restype = CFStringRef
|
307 |
-
|
308 |
-
Security.SSLReadFunc = SSLReadFunc
|
309 |
-
Security.SSLWriteFunc = SSLWriteFunc
|
310 |
-
Security.SSLContextRef = SSLContextRef
|
311 |
-
Security.SSLProtocol = SSLProtocol
|
312 |
-
Security.SSLCipherSuite = SSLCipherSuite
|
313 |
-
Security.SecIdentityRef = SecIdentityRef
|
314 |
-
Security.SecKeychainRef = SecKeychainRef
|
315 |
-
Security.SecTrustRef = SecTrustRef
|
316 |
-
Security.SecTrustResultType = SecTrustResultType
|
317 |
-
Security.SecExternalFormat = SecExternalFormat
|
318 |
-
Security.OSStatus = OSStatus
|
319 |
-
|
320 |
-
Security.kSecImportExportPassphrase = CFStringRef.in_dll(
|
321 |
-
Security, "kSecImportExportPassphrase"
|
322 |
-
)
|
323 |
-
Security.kSecImportItemIdentity = CFStringRef.in_dll(
|
324 |
-
Security, "kSecImportItemIdentity"
|
325 |
-
)
|
326 |
-
|
327 |
-
# CoreFoundation time!
|
328 |
-
CoreFoundation.CFRetain.argtypes = [CFTypeRef]
|
329 |
-
CoreFoundation.CFRetain.restype = CFTypeRef
|
330 |
-
|
331 |
-
CoreFoundation.CFRelease.argtypes = [CFTypeRef]
|
332 |
-
CoreFoundation.CFRelease.restype = None
|
333 |
-
|
334 |
-
CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
|
335 |
-
CoreFoundation.CFGetTypeID.restype = CFTypeID
|
336 |
-
|
337 |
-
CoreFoundation.CFStringCreateWithCString.argtypes = [
|
338 |
-
CFAllocatorRef,
|
339 |
-
c_char_p,
|
340 |
-
CFStringEncoding,
|
341 |
-
]
|
342 |
-
CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
|
343 |
-
|
344 |
-
CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
|
345 |
-
CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
|
346 |
-
|
347 |
-
CoreFoundation.CFStringGetCString.argtypes = [
|
348 |
-
CFStringRef,
|
349 |
-
c_char_p,
|
350 |
-
CFIndex,
|
351 |
-
CFStringEncoding,
|
352 |
-
]
|
353 |
-
CoreFoundation.CFStringGetCString.restype = c_bool
|
354 |
-
|
355 |
-
CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
|
356 |
-
CoreFoundation.CFDataCreate.restype = CFDataRef
|
357 |
-
|
358 |
-
CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
|
359 |
-
CoreFoundation.CFDataGetLength.restype = CFIndex
|
360 |
-
|
361 |
-
CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
|
362 |
-
CoreFoundation.CFDataGetBytePtr.restype = c_void_p
|
363 |
-
|
364 |
-
CoreFoundation.CFDictionaryCreate.argtypes = [
|
365 |
-
CFAllocatorRef,
|
366 |
-
POINTER(CFTypeRef),
|
367 |
-
POINTER(CFTypeRef),
|
368 |
-
CFIndex,
|
369 |
-
CFDictionaryKeyCallBacks,
|
370 |
-
CFDictionaryValueCallBacks,
|
371 |
-
]
|
372 |
-
CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
|
373 |
-
|
374 |
-
CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
|
375 |
-
CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
|
376 |
-
|
377 |
-
CoreFoundation.CFArrayCreate.argtypes = [
|
378 |
-
CFAllocatorRef,
|
379 |
-
POINTER(CFTypeRef),
|
380 |
-
CFIndex,
|
381 |
-
CFArrayCallBacks,
|
382 |
-
]
|
383 |
-
CoreFoundation.CFArrayCreate.restype = CFArrayRef
|
384 |
-
|
385 |
-
CoreFoundation.CFArrayCreateMutable.argtypes = [
|
386 |
-
CFAllocatorRef,
|
387 |
-
CFIndex,
|
388 |
-
CFArrayCallBacks,
|
389 |
-
]
|
390 |
-
CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
|
391 |
-
|
392 |
-
CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
|
393 |
-
CoreFoundation.CFArrayAppendValue.restype = None
|
394 |
-
|
395 |
-
CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
|
396 |
-
CoreFoundation.CFArrayGetCount.restype = CFIndex
|
397 |
-
|
398 |
-
CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
|
399 |
-
CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
|
400 |
-
|
401 |
-
CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
|
402 |
-
CoreFoundation, "kCFAllocatorDefault"
|
403 |
-
)
|
404 |
-
CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
|
405 |
-
CoreFoundation, "kCFTypeArrayCallBacks"
|
406 |
-
)
|
407 |
-
CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
|
408 |
-
CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
|
409 |
-
)
|
410 |
-
CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
|
411 |
-
CoreFoundation, "kCFTypeDictionaryValueCallBacks"
|
412 |
-
)
|
413 |
-
|
414 |
-
CoreFoundation.CFTypeRef = CFTypeRef
|
415 |
-
CoreFoundation.CFArrayRef = CFArrayRef
|
416 |
-
CoreFoundation.CFStringRef = CFStringRef
|
417 |
-
CoreFoundation.CFDictionaryRef = CFDictionaryRef
|
418 |
-
|
419 |
-
except (AttributeError):
|
420 |
-
raise ImportError("Error initializing ctypes")
|
421 |
-
|
422 |
-
|
423 |
-
class CFConst(object):
|
424 |
-
"""
|
425 |
-
A class object that acts as essentially a namespace for CoreFoundation
|
426 |
-
constants.
|
427 |
-
"""
|
428 |
-
|
429 |
-
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
|
430 |
-
|
431 |
-
|
432 |
-
class SecurityConst(object):
|
433 |
-
"""
|
434 |
-
A class object that acts as essentially a namespace for Security constants.
|
435 |
-
"""
|
436 |
-
|
437 |
-
kSSLSessionOptionBreakOnServerAuth = 0
|
438 |
-
|
439 |
-
kSSLProtocol2 = 1
|
440 |
-
kSSLProtocol3 = 2
|
441 |
-
kTLSProtocol1 = 4
|
442 |
-
kTLSProtocol11 = 7
|
443 |
-
kTLSProtocol12 = 8
|
444 |
-
# SecureTransport does not support TLS 1.3 even if there's a constant for it
|
445 |
-
kTLSProtocol13 = 10
|
446 |
-
kTLSProtocolMaxSupported = 999
|
447 |
-
|
448 |
-
kSSLClientSide = 1
|
449 |
-
kSSLStreamType = 0
|
450 |
-
|
451 |
-
kSecFormatPEMSequence = 10
|
452 |
-
|
453 |
-
kSecTrustResultInvalid = 0
|
454 |
-
kSecTrustResultProceed = 1
|
455 |
-
# This gap is present on purpose: this was kSecTrustResultConfirm, which
|
456 |
-
# is deprecated.
|
457 |
-
kSecTrustResultDeny = 3
|
458 |
-
kSecTrustResultUnspecified = 4
|
459 |
-
kSecTrustResultRecoverableTrustFailure = 5
|
460 |
-
kSecTrustResultFatalTrustFailure = 6
|
461 |
-
kSecTrustResultOtherError = 7
|
462 |
-
|
463 |
-
errSSLProtocol = -9800
|
464 |
-
errSSLWouldBlock = -9803
|
465 |
-
errSSLClosedGraceful = -9805
|
466 |
-
errSSLClosedNoNotify = -9816
|
467 |
-
errSSLClosedAbort = -9806
|
468 |
-
|
469 |
-
errSSLXCertChainInvalid = -9807
|
470 |
-
errSSLCrypto = -9809
|
471 |
-
errSSLInternal = -9810
|
472 |
-
errSSLCertExpired = -9814
|
473 |
-
errSSLCertNotYetValid = -9815
|
474 |
-
errSSLUnknownRootCert = -9812
|
475 |
-
errSSLNoRootCert = -9813
|
476 |
-
errSSLHostNameMismatch = -9843
|
477 |
-
errSSLPeerHandshakeFail = -9824
|
478 |
-
errSSLPeerUserCancelled = -9839
|
479 |
-
errSSLWeakPeerEphemeralDHKey = -9850
|
480 |
-
errSSLServerAuthCompleted = -9841
|
481 |
-
errSSLRecordOverflow = -9847
|
482 |
-
|
483 |
-
errSecVerifyFailed = -67808
|
484 |
-
errSecNoTrustSettings = -25263
|
485 |
-
errSecItemNotFound = -25300
|
486 |
-
errSecInvalidTrustSettings = -25262
|
487 |
-
|
488 |
-
# Cipher suites. We only pick the ones our default cipher string allows.
|
489 |
-
# Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
|
490 |
-
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
|
491 |
-
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
|
492 |
-
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
|
493 |
-
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
|
494 |
-
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
|
495 |
-
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
|
496 |
-
TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
|
497 |
-
TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
|
498 |
-
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
|
499 |
-
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
|
500 |
-
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
|
501 |
-
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
|
502 |
-
TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
|
503 |
-
TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
|
504 |
-
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
|
505 |
-
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
|
506 |
-
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
|
507 |
-
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
|
508 |
-
TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
|
509 |
-
TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
|
510 |
-
TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
|
511 |
-
TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
|
512 |
-
TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
|
513 |
-
TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
|
514 |
-
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
|
515 |
-
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
|
516 |
-
TLS_AES_128_GCM_SHA256 = 0x1301
|
517 |
-
TLS_AES_256_GCM_SHA384 = 0x1302
|
518 |
-
TLS_AES_128_CCM_8_SHA256 = 0x1305
|
519 |
-
TLS_AES_128_CCM_SHA256 = 0x1304
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/iterator/detail/tuple_of_iterator_references.h
DELETED
@@ -1,263 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/tuple.h>
|
21 |
-
#include <thrust/pair.h>
|
22 |
-
#include <thrust/detail/reference_forward_declaration.h>
|
23 |
-
|
24 |
-
namespace thrust
|
25 |
-
{
|
26 |
-
namespace detail
|
27 |
-
{
|
28 |
-
|
29 |
-
|
30 |
-
template<
|
31 |
-
typename T0, typename T1, typename T2,
|
32 |
-
typename T3, typename T4, typename T5,
|
33 |
-
typename T6, typename T7, typename T8,
|
34 |
-
typename T9
|
35 |
-
>
|
36 |
-
class tuple_of_iterator_references
|
37 |
-
: public thrust::tuple<T0,T1,T2,T3,T4,T5,T6,T7,T8,T9>
|
38 |
-
{
|
39 |
-
private:
|
40 |
-
typedef thrust::tuple<T0,T1,T2,T3,T4,T5,T6,T7,T8,T9> super_t;
|
41 |
-
|
42 |
-
public:
|
43 |
-
// allow implicit construction from tuple<refs>
|
44 |
-
inline __host__ __device__
|
45 |
-
tuple_of_iterator_references(const super_t &other)
|
46 |
-
: super_t(other)
|
47 |
-
{}
|
48 |
-
|
49 |
-
// allow assignment from tuples
|
50 |
-
// XXX might be worthwhile to guard this with an enable_if is_assignable
|
51 |
-
__thrust_exec_check_disable__
|
52 |
-
template<typename U1, typename U2>
|
53 |
-
inline __host__ __device__
|
54 |
-
tuple_of_iterator_references &operator=(const detail::cons<U1,U2> &other)
|
55 |
-
{
|
56 |
-
super_t::operator=(other);
|
57 |
-
return *this;
|
58 |
-
}
|
59 |
-
|
60 |
-
// allow assignment from pairs
|
61 |
-
// XXX might be worthwhile to guard this with an enable_if is_assignable
|
62 |
-
__thrust_exec_check_disable__
|
63 |
-
template<typename U1, typename U2>
|
64 |
-
inline __host__ __device__
|
65 |
-
tuple_of_iterator_references &operator=(const thrust::pair<U1,U2> &other)
|
66 |
-
{
|
67 |
-
super_t::operator=(other);
|
68 |
-
return *this;
|
69 |
-
}
|
70 |
-
|
71 |
-
// allow assignment from reference<tuple>
|
72 |
-
// XXX perhaps we should generalize to reference<T>
|
73 |
-
// we could captures reference<pair> this way
|
74 |
-
__thrust_exec_check_disable__
|
75 |
-
template<typename U0, typename U1, typename U2,
|
76 |
-
typename U3, typename U4, typename U5,
|
77 |
-
typename U6, typename U7, typename U8,
|
78 |
-
typename U9,
|
79 |
-
typename Pointer, typename Derived>
|
80 |
-
inline __host__ __device__
|
81 |
-
// XXX gcc-4.2 crashes on is_assignable
|
82 |
-
// typename thrust::detail::enable_if<
|
83 |
-
// thrust::detail::is_assignable<
|
84 |
-
// super_t,
|
85 |
-
// const thrust::tuple<U0,U1,U2,U3,U4,U5,U6,U7,U8,U9>
|
86 |
-
// >::value,
|
87 |
-
// tuple_of_iterator_references &
|
88 |
-
// >::type
|
89 |
-
tuple_of_iterator_references &
|
90 |
-
operator=(const thrust::reference<thrust::tuple<U0,U1,U2,U3,U4,U5,U6,U7,U8,U9>, Pointer, Derived> &other)
|
91 |
-
{
|
92 |
-
typedef thrust::tuple<U0,U1,U2,U3,U4,U5,U6,U7,U8,U9> tuple_type;
|
93 |
-
|
94 |
-
// XXX perhaps this could be accelerated
|
95 |
-
tuple_type other_tuple = other;
|
96 |
-
super_t::operator=(other_tuple);
|
97 |
-
return *this;
|
98 |
-
}
|
99 |
-
|
100 |
-
|
101 |
-
// duplicate thrust::tuple's constructors
|
102 |
-
inline __host__ __device__
|
103 |
-
tuple_of_iterator_references() {}
|
104 |
-
|
105 |
-
inline __host__ __device__
|
106 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0)
|
107 |
-
: super_t(t0,
|
108 |
-
static_cast<const null_type&>(null_type()),
|
109 |
-
static_cast<const null_type&>(null_type()),
|
110 |
-
static_cast<const null_type&>(null_type()),
|
111 |
-
static_cast<const null_type&>(null_type()),
|
112 |
-
static_cast<const null_type&>(null_type()),
|
113 |
-
static_cast<const null_type&>(null_type()),
|
114 |
-
static_cast<const null_type&>(null_type()),
|
115 |
-
static_cast<const null_type&>(null_type()),
|
116 |
-
static_cast<const null_type&>(null_type()))
|
117 |
-
{}
|
118 |
-
|
119 |
-
inline __host__ __device__
|
120 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
121 |
-
typename access_traits<T1>::parameter_type t1)
|
122 |
-
: super_t(t0, t1,
|
123 |
-
static_cast<const null_type&>(null_type()),
|
124 |
-
static_cast<const null_type&>(null_type()),
|
125 |
-
static_cast<const null_type&>(null_type()),
|
126 |
-
static_cast<const null_type&>(null_type()),
|
127 |
-
static_cast<const null_type&>(null_type()),
|
128 |
-
static_cast<const null_type&>(null_type()),
|
129 |
-
static_cast<const null_type&>(null_type()),
|
130 |
-
static_cast<const null_type&>(null_type()))
|
131 |
-
{}
|
132 |
-
|
133 |
-
inline __host__ __device__
|
134 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
135 |
-
typename access_traits<T1>::parameter_type t1,
|
136 |
-
typename access_traits<T2>::parameter_type t2)
|
137 |
-
: super_t(t0, t1, t2,
|
138 |
-
static_cast<const null_type&>(null_type()),
|
139 |
-
static_cast<const null_type&>(null_type()),
|
140 |
-
static_cast<const null_type&>(null_type()),
|
141 |
-
static_cast<const null_type&>(null_type()),
|
142 |
-
static_cast<const null_type&>(null_type()),
|
143 |
-
static_cast<const null_type&>(null_type()),
|
144 |
-
static_cast<const null_type&>(null_type()))
|
145 |
-
{}
|
146 |
-
|
147 |
-
inline __host__ __device__
|
148 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
149 |
-
typename access_traits<T1>::parameter_type t1,
|
150 |
-
typename access_traits<T2>::parameter_type t2,
|
151 |
-
typename access_traits<T3>::parameter_type t3)
|
152 |
-
: super_t(t0, t1, t2, t3,
|
153 |
-
static_cast<const null_type&>(null_type()),
|
154 |
-
static_cast<const null_type&>(null_type()),
|
155 |
-
static_cast<const null_type&>(null_type()),
|
156 |
-
static_cast<const null_type&>(null_type()),
|
157 |
-
static_cast<const null_type&>(null_type()),
|
158 |
-
static_cast<const null_type&>(null_type()))
|
159 |
-
{}
|
160 |
-
|
161 |
-
inline __host__ __device__
|
162 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
163 |
-
typename access_traits<T1>::parameter_type t1,
|
164 |
-
typename access_traits<T2>::parameter_type t2,
|
165 |
-
typename access_traits<T3>::parameter_type t3,
|
166 |
-
typename access_traits<T4>::parameter_type t4)
|
167 |
-
: super_t(t0, t1, t2, t3, t4,
|
168 |
-
static_cast<const null_type&>(null_type()),
|
169 |
-
static_cast<const null_type&>(null_type()),
|
170 |
-
static_cast<const null_type&>(null_type()),
|
171 |
-
static_cast<const null_type&>(null_type()),
|
172 |
-
static_cast<const null_type&>(null_type()))
|
173 |
-
{}
|
174 |
-
|
175 |
-
inline __host__ __device__
|
176 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
177 |
-
typename access_traits<T1>::parameter_type t1,
|
178 |
-
typename access_traits<T2>::parameter_type t2,
|
179 |
-
typename access_traits<T3>::parameter_type t3,
|
180 |
-
typename access_traits<T4>::parameter_type t4,
|
181 |
-
typename access_traits<T5>::parameter_type t5)
|
182 |
-
: super_t(t0, t1, t2, t3, t4, t5,
|
183 |
-
static_cast<const null_type&>(null_type()),
|
184 |
-
static_cast<const null_type&>(null_type()),
|
185 |
-
static_cast<const null_type&>(null_type()),
|
186 |
-
static_cast<const null_type&>(null_type()))
|
187 |
-
{}
|
188 |
-
|
189 |
-
inline __host__ __device__
|
190 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
191 |
-
typename access_traits<T1>::parameter_type t1,
|
192 |
-
typename access_traits<T2>::parameter_type t2,
|
193 |
-
typename access_traits<T3>::parameter_type t3,
|
194 |
-
typename access_traits<T4>::parameter_type t4,
|
195 |
-
typename access_traits<T5>::parameter_type t5,
|
196 |
-
typename access_traits<T6>::parameter_type t6)
|
197 |
-
: super_t(t0, t1, t2, t3, t4, t5, t6,
|
198 |
-
static_cast<const null_type&>(null_type()),
|
199 |
-
static_cast<const null_type&>(null_type()),
|
200 |
-
static_cast<const null_type&>(null_type()))
|
201 |
-
{}
|
202 |
-
|
203 |
-
inline __host__ __device__
|
204 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
205 |
-
typename access_traits<T1>::parameter_type t1,
|
206 |
-
typename access_traits<T2>::parameter_type t2,
|
207 |
-
typename access_traits<T3>::parameter_type t3,
|
208 |
-
typename access_traits<T4>::parameter_type t4,
|
209 |
-
typename access_traits<T5>::parameter_type t5,
|
210 |
-
typename access_traits<T6>::parameter_type t6,
|
211 |
-
typename access_traits<T7>::parameter_type t7)
|
212 |
-
: super_t(t0, t1, t2, t3, t4, t5, t6, t7,
|
213 |
-
static_cast<const null_type&>(null_type()),
|
214 |
-
static_cast<const null_type&>(null_type()))
|
215 |
-
{}
|
216 |
-
|
217 |
-
inline __host__ __device__
|
218 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
219 |
-
typename access_traits<T1>::parameter_type t1,
|
220 |
-
typename access_traits<T2>::parameter_type t2,
|
221 |
-
typename access_traits<T3>::parameter_type t3,
|
222 |
-
typename access_traits<T4>::parameter_type t4,
|
223 |
-
typename access_traits<T5>::parameter_type t5,
|
224 |
-
typename access_traits<T6>::parameter_type t6,
|
225 |
-
typename access_traits<T7>::parameter_type t7,
|
226 |
-
typename access_traits<T8>::parameter_type t8)
|
227 |
-
: super_t(t0, t1, t2, t3, t4, t5, t6, t7, t8,
|
228 |
-
static_cast<const null_type&>(null_type()))
|
229 |
-
{}
|
230 |
-
|
231 |
-
inline __host__ __device__
|
232 |
-
tuple_of_iterator_references(typename access_traits<T0>::parameter_type t0,
|
233 |
-
typename access_traits<T1>::parameter_type t1,
|
234 |
-
typename access_traits<T2>::parameter_type t2,
|
235 |
-
typename access_traits<T3>::parameter_type t3,
|
236 |
-
typename access_traits<T4>::parameter_type t4,
|
237 |
-
typename access_traits<T5>::parameter_type t5,
|
238 |
-
typename access_traits<T6>::parameter_type t6,
|
239 |
-
typename access_traits<T7>::parameter_type t7,
|
240 |
-
typename access_traits<T8>::parameter_type t8,
|
241 |
-
typename access_traits<T9>::parameter_type t9)
|
242 |
-
: super_t(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9)
|
243 |
-
{}
|
244 |
-
};
|
245 |
-
|
246 |
-
|
247 |
-
// this overload of swap() permits swapping tuple_of_iterator_references returned as temporaries from
|
248 |
-
// iterator dereferences
|
249 |
-
template<
|
250 |
-
typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9,
|
251 |
-
typename U0, typename U1, typename U2, typename U3, typename U4, typename U5, typename U6, typename U7, typename U8, typename U9
|
252 |
-
>
|
253 |
-
inline __host__ __device__
|
254 |
-
void swap(tuple_of_iterator_references<T0,T1,T2,T3,T4,T5,T6,T7,T8,T9> x,
|
255 |
-
tuple_of_iterator_references<U0,U1,U2,U3,U4,U5,U6,U7,U8,U9> y)
|
256 |
-
{
|
257 |
-
x.swap(y);
|
258 |
-
}
|
259 |
-
|
260 |
-
|
261 |
-
} // end detail
|
262 |
-
} // end thrust
|
263 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/per_device_resource.h
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/detail/cpp11_required.h>
|
21 |
-
|
22 |
-
#if THRUST_CPP_DIALECT >= 2011
|
23 |
-
|
24 |
-
#include <thrust/system/detail/generic/per_device_resource.h>
|
25 |
-
#include <thrust/system/detail/adl/per_device_resource.h>
|
26 |
-
#include <thrust/mr/allocator.h>
|
27 |
-
|
28 |
-
#include <thrust/detail/execution_policy.h>
|
29 |
-
#include <thrust/mr/allocator.h>
|
30 |
-
|
31 |
-
namespace thrust
|
32 |
-
{
|
33 |
-
|
34 |
-
/*! Returns a global instance of \p MR for the current device of the provided system.
|
35 |
-
*
|
36 |
-
* \tparam MR type of a memory resource to get an instance from. Must be \p DefaultConstructible.
|
37 |
-
* \param system execution policy for which the resource is requested.
|
38 |
-
* \returns a pointer to a global instance of \p MR for the current device.
|
39 |
-
*/
|
40 |
-
template<typename MR, typename DerivedPolicy>
|
41 |
-
__host__
|
42 |
-
MR * get_per_device_resource(const thrust::detail::execution_policy_base<DerivedPolicy> & system)
|
43 |
-
{
|
44 |
-
using thrust::system::detail::generic::get_per_device_resource;
|
45 |
-
|
46 |
-
return get_per_device_resource<MR>(
|
47 |
-
thrust::detail::derived_cast(
|
48 |
-
thrust::detail::strip_const(system)));
|
49 |
-
}
|
50 |
-
|
51 |
-
/*! A helper allocator class that uses global per device instances of a given upstream memory resource. Requires the memory
|
52 |
-
* resource to be default constructible.
|
53 |
-
*
|
54 |
-
* \tparam T the type that will be allocated by this allocator.
|
55 |
-
* \tparam MR the upstream memory resource to use for memory allocation. Must derive from
|
56 |
-
* \p thrust::mr::memory_resource and must be \p final.
|
57 |
-
* \tparam ExecutionPolicy the execution policy of the system to be used to retrieve the resource for the current device.
|
58 |
-
*/
|
59 |
-
template<typename T, typename Upstream, typename ExecutionPolicy>
|
60 |
-
class per_device_allocator : public thrust::mr::allocator<T, Upstream>
|
61 |
-
{
|
62 |
-
typedef thrust::mr::allocator<T, Upstream> base;
|
63 |
-
|
64 |
-
public:
|
65 |
-
/*! The \p rebind metafunction provides the type of an \p per_device_allocator instantiated with another type.
|
66 |
-
*
|
67 |
-
* \tparam U the other type to use for instantiation.
|
68 |
-
*/
|
69 |
-
template<typename U>
|
70 |
-
struct rebind
|
71 |
-
{
|
72 |
-
/*! The typedef \p other gives the type of the rebound \p per_device_allocator.
|
73 |
-
*/
|
74 |
-
typedef per_device_allocator<U, Upstream, ExecutionPolicy> other;
|
75 |
-
};
|
76 |
-
|
77 |
-
/*! Default constructor. Uses \p get_global_resource to get the global instance of \p Upstream and initializes the
|
78 |
-
* \p allocator base subobject with that resource.
|
79 |
-
*/
|
80 |
-
__host__
|
81 |
-
per_device_allocator() : base(get_per_device_resource<Upstream>(ExecutionPolicy()))
|
82 |
-
{
|
83 |
-
}
|
84 |
-
|
85 |
-
/*! Copy constructor. Copies the memory resource pointer. */
|
86 |
-
__host__ __device__
|
87 |
-
per_device_allocator(const per_device_allocator & other)
|
88 |
-
: base(other) {}
|
89 |
-
|
90 |
-
/*! Conversion constructor from an allocator of a different type. Copies the memory resource pointer. */
|
91 |
-
template<typename U>
|
92 |
-
__host__ __device__
|
93 |
-
per_device_allocator(const per_device_allocator<U, Upstream, ExecutionPolicy> & other)
|
94 |
-
: base(other) {}
|
95 |
-
|
96 |
-
/*! Destructor. */
|
97 |
-
__host__ __device__
|
98 |
-
~per_device_allocator() {}
|
99 |
-
};
|
100 |
-
|
101 |
-
|
102 |
-
} // end namespace thrust
|
103 |
-
|
104 |
-
#endif // THRUST_CPP_DIALECT >= 2011
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/config.h
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
#pragma once
|
28 |
-
|
29 |
-
#include <thrust/detail/config.h>
|
30 |
-
|
31 |
-
#if defined(__CUDACC__)
|
32 |
-
# if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__>= 350 && defined(__CUDACC_RDC__))
|
33 |
-
# define __THRUST_HAS_CUDART__ 1
|
34 |
-
# define THRUST_RUNTIME_FUNCTION __host__ __device__ __forceinline__
|
35 |
-
# else
|
36 |
-
# define __THRUST_HAS_CUDART__ 0
|
37 |
-
# define THRUST_RUNTIME_FUNCTION __host__ __forceinline__
|
38 |
-
# endif
|
39 |
-
#else
|
40 |
-
# define __THRUST_HAS_CUDART__ 0
|
41 |
-
# define THRUST_RUNTIME_FUNCTION __host__ __forceinline__
|
42 |
-
#endif
|
43 |
-
|
44 |
-
#ifdef __CUDA_ARCH__
|
45 |
-
#define THRUST_DEVICE_CODE
|
46 |
-
#endif
|
47 |
-
|
48 |
-
#ifdef THRUST_AGENT_ENTRY_NOINLINE
|
49 |
-
#define THRUST_AGENT_ENTRY_INLINE_ATTR __noinline__
|
50 |
-
#else
|
51 |
-
#define THRUST_AGENT_ENTRY_INLINE_ATTR __forceinline__
|
52 |
-
#endif
|
53 |
-
|
54 |
-
#define THRUST_DEVICE_FUNCTION __device__ __forceinline__
|
55 |
-
#define THRUST_HOST_FUNCTION __host__ __forceinline__
|
56 |
-
#define THRUST_FUNCTION __host__ __device__ __forceinline__
|
57 |
-
#if 0
|
58 |
-
#define THRUST_ARGS(...) __VA_ARGS__
|
59 |
-
#define THRUST_STRIP_PARENS(X) X
|
60 |
-
#define THRUST_AGENT_ENTRY(ARGS) THRUST_FUNCTION static void entry(THRUST_STRIP_PARENS(THRUST_ARGS ARGS))
|
61 |
-
#else
|
62 |
-
#define THRUST_AGENT_ENTRY(...) THRUST_AGENT_ENTRY_INLINE_ATTR __device__ static void entry(__VA_ARGS__)
|
63 |
-
#endif
|
64 |
-
|
65 |
-
#ifdef THRUST_DEBUG_SYNC
|
66 |
-
#define THRUST_DEBUG_SYNC_FLAG true
|
67 |
-
#else
|
68 |
-
#define THRUST_DEBUG_SYNC_FLAG false
|
69 |
-
#endif
|
70 |
-
|
71 |
-
#define THRUST_CUB_NS_PREFIX namespace thrust { namespace cuda_cub {
|
72 |
-
#define THRUST_CUB_NS_POSTFIX } }
|
73 |
-
|
74 |
-
#ifndef THRUST_IGNORE_CUB_VERSION_CHECK
|
75 |
-
#include <thrust/version.h>
|
76 |
-
#include <cub/util_namespace.cuh> // This includes <cub/version.cuh> in newer releases.
|
77 |
-
#if THRUST_VERSION != CUB_VERSION
|
78 |
-
#error The version of CUB in your include path is not compatible with this release of Thrust. CUB is now included in the CUDA Toolkit, so you no longer need to use your own checkout of CUB. Define THRUST_IGNORE_CUB_VERSION_CHECK to ignore this.
|
79 |
-
#endif
|
80 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|