Commit
·
b063d89
1
Parent(s):
3da387e
Update parquet files (step 61 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Accessdata Password Recovery Toolkit Crack Pros and Cons of Using It for Password Recovery.md +0 -128
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/AnyDesk Client The Ultimate Guide to Downloading and Using the Best Remote Desktop Software.md +0 -41
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Diner De ConsLe Movie Download __LINK__ 720p Movie.md +0 -21
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fizika masalalar yechish usullari pdf Oquv qollanma va namunalar.md +0 -105
- spaces/1gistliPinn/ChatGPT4/Examples/FoneLab 9.1.58 Crack With Activation Number Free Download 2019.md +0 -32
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Unlock Tool The Best Way to Play PUBG Mobile at 90 FPS.md +0 -115
- spaces/1phancelerku/anime-remove-background/Download Monster in My Pocket APK and Play the Classic Atari Remake.md +0 -140
- spaces/1phancelerku/anime-remove-background/Download Spider-Man 2000 APK - The Classic Web-Slinging Adventure on Android.md +0 -96
- spaces/1phancelerku/anime-remove-background/FIFA Mobile 22 Hack How to Unlock All Players Kits and Stadiums for Free.md +0 -101
- spaces/2ndelement/voicevox/voicevox_engine/utility/connect_base64_waves.py +0 -60
- spaces/AIFILMS/StyleGANEX/models/mtcnn/__init__.py +0 -0
- spaces/AIFILMS/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/box_utils.py +0 -238
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/__init__.py +0 -0
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/portaspeech.py +0 -230
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/version.py +0 -1
- spaces/AIZero2HeroBootcamp/ChatGPTandLangchain/app.py +0 -442
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/_base_/default_runtime.py +0 -49
- spaces/Aditya9790/yolo7-object-tracking/app.py +0 -293
- spaces/Aer0xander/sd-to-diffusers/utils.py +0 -6
- spaces/Amrrs/DragGan-Inversion/torch_utils/ops/conv2d_gradfix.py +0 -225
- spaces/Amrrs/DragGan-Inversion/viz/drag_widget.py +0 -173
- spaces/Amrrs/portfolio/index.html +0 -107
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/deis.md +0 -22
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/euler.md +0 -21
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/attention_flax.py +0 -446
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/pipeline_utils.py +0 -1698
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/spectrogram_diffusion/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/coco_detection.py +0 -48
- spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py +0 -2
- spaces/Artgor/digit-draw-detect/src/utils.py +0 -105
- spaces/Artrajz/vits-simple-api/utils/nlp.py +0 -97
- spaces/Ashrafb/Tesseract-OCR/README.md +0 -13
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/enums.py +0 -85
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/mbcsgroupprober.py +0 -57
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distro/__init__.py +0 -54
- spaces/Bart92/RVC_HF/Applio-RVC-Fork/utils/README.md +0 -6
- spaces/Benson/text-generation/Examples/60 Segundos Reatomized Apk Descargar Gratis Android.md +0 -75
- spaces/Benson/text-generation/Examples/Descargar El Juego De Ftbol Apk.md +0 -79
- spaces/BetterAPI/BetterChat/src/lib/utils/concatUint8Arrays.ts +0 -12
- spaces/BiTransSciencia/www/README.md +0 -11
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/throttling.py +0 -55
- spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/packaging/_musllinux.py +0 -136
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/helpers.py +0 -1088
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/appengine.py +0 -314
- spaces/BillBojangeles2000/bart-large-cnn-samsum/app.py +0 -3
- spaces/Blackroot/Fancy-Audiogen/audio.py +0 -59
- spaces/Boadiwaa/Recipes/openai/api_resources/file.py +0 -131
- spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/utils.py +0 -100
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp +0 -43
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Accessdata Password Recovery Toolkit Crack Pros and Cons of Using It for Password Recovery.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Accessdata Password Recovery Toolkit Crack: What You Need to Know</h1>
|
3 |
-
<p>If you need to gain access to password-protected files, then you might have heard of Accessdata Password Recovery Toolkit (PRTK), a software that can recover passwords from encrypted files and containers. But what if you don't have a license for PRTK? Can you use a crack to bypass the activation process and use the software for free?</p>
|
4 |
-
<p>In this article, we will explain what Accessdata Password Recovery Toolkit is, how it works, how to use it, and why you should avoid using a crack for it. We will also provide some alternatives to using a crack that are safer and more reliable.</p>
|
5 |
-
<h2>Accessdata Password Recovery Toolkit Crack</h2><br /><p><b><b>Download Zip</b> ✯✯✯ <a href="https://byltly.com/2uKvuo">https://byltly.com/2uKvuo</a></b></p><br /><br />
|
6 |
-
<h2>How Accessdata Password Recovery Toolkit Works</h2>
|
7 |
-
<p>Accessdata Password Recovery Toolkit is a part of Accessdata's Forensic Toolkit (FTK), a suite of tools for digital forensics and incident response. PRTK can recover passwords from various types of encrypted files and containers, such as MS Word, PDF, TrueCrypt, BitLocker, ZIP, RAR, and many more.</p>
|
8 |
-
<p>PRTK uses different methods to recover passwords, such as brute-force, dictionary, rainbow tables, known-plaintext, and hybrid attacks. It can also create custom dictionaries and profiles based on the characteristics of the target file or container. PRTK can run multiple password recovery attacks simultaneously on different files or containers, using multiple processors and GPUs.</p>
|
9 |
-
<p>PRTK can also integrate with other tools in FTK, such as FTK Imager, Registry Viewer, FTK Lab, and AD Enterprise. This allows you to perform comprehensive analysis and investigation on encrypted data.</p>
|
10 |
-
<h2>How to Use Accessdata Password Recovery Toolkit</h2>
|
11 |
-
<h3>How to Install and Initialize the Software</h3>
|
12 |
-
<p>To use Accessdata Password Recovery Toolkit, you need to have a valid license for FTK. You can purchase a license from Exterro, the company that acquired Accessdata in 2020. You can also request a free trial or a demo from their website.</p>
|
13 |
-
<p>Once you have a license, you can download the software from Exterro's website. You will need to register an account and provide your license information. You will also need to download FTK Imager, which is required for PRTK.</p>
|
14 |
-
<p>After downloading the software, you need to install it on your computer. You will need administrator privileges to do so. You will also need to activate the software with your license key. You can do this online or offline.</p>
|
15 |
-
<p>Once the software is installed and activated, you need to initialize it for first use. You will need to configure some settings, such as the location of your dictionaries, rainbow tables, profiles, logs, etc. You will also need to update your software regularly to get the latest features and fixes.</p>
|
16 |
-
<p>How to use Accessdata Password Recovery Toolkit Crack<br />
|
17 |
-
Accessdata Password Recovery Toolkit Crack download link<br />
|
18 |
-
Accessdata Password Recovery Toolkit Crack serial key<br />
|
19 |
-
Accessdata Password Recovery Toolkit Crack activation code<br />
|
20 |
-
Accessdata Password Recovery Toolkit Crack license key<br />
|
21 |
-
Accessdata Password Recovery Toolkit Crack full version<br />
|
22 |
-
Accessdata Password Recovery Toolkit Crack free trial<br />
|
23 |
-
Accessdata Password Recovery Toolkit Crack tutorial<br />
|
24 |
-
Accessdata Password Recovery Toolkit Crack review<br />
|
25 |
-
Accessdata Password Recovery Toolkit Crack alternative<br />
|
26 |
-
Accessdata Password Recovery Toolkit Crack vs Passware Kit Forensic<br />
|
27 |
-
Accessdata Password Recovery Toolkit Crack features<br />
|
28 |
-
Accessdata Password Recovery Toolkit Crack system requirements<br />
|
29 |
-
Accessdata Password Recovery Toolkit Crack user guide<br />
|
30 |
-
Accessdata Password Recovery Toolkit Crack support<br />
|
31 |
-
Accessdata Password Recovery Toolkit Crack price<br />
|
32 |
-
Accessdata Password Recovery Toolkit Crack discount code<br />
|
33 |
-
Accessdata Password Recovery Toolkit Crack coupon code<br />
|
34 |
-
Accessdata Password Recovery Toolkit Crack refund policy<br />
|
35 |
-
Accessdata Password Recovery Toolkit Crack testimonials<br />
|
36 |
-
Accessdata Password Recovery Toolkit Crack pros and cons<br />
|
37 |
-
Accessdata Password Recovery Toolkit Crack comparison<br />
|
38 |
-
Accessdata Password Recovery Toolkit Crack benefits<br />
|
39 |
-
Accessdata Password Recovery Toolkit Crack limitations<br />
|
40 |
-
Accessdata Password Recovery Toolkit Crack installation guide<br />
|
41 |
-
Accessdata Password Recovery Toolkit Crack troubleshooting<br />
|
42 |
-
Accessdata Password Recovery Toolkit Crack update<br />
|
43 |
-
Accessdata Password Recovery Toolkit Crack upgrade<br />
|
44 |
-
Accessdata Password Recovery Toolkit Crack compatibility<br />
|
45 |
-
Accessdata Password Recovery Toolkit Crack performance<br />
|
46 |
-
Accessdata Password Recovery Toolkit Crack quality<br />
|
47 |
-
Accessdata Password Recovery Toolkit Crack reliability<br />
|
48 |
-
Accessdata Password Recovery Toolkit Crack security<br />
|
49 |
-
Accessdata Password Recovery Toolkit Crack privacy<br />
|
50 |
-
Accessdata Password Recovery Toolkit Crack warranty<br />
|
51 |
-
Accessdata Password Recovery Toolkit Crack customer service<br />
|
52 |
-
Accessdata Password Recovery Toolkit Crack feedback<br />
|
53 |
-
Accessdata Password Recovery Toolkit Crack ratings<br />
|
54 |
-
Accessdata Password Recovery Toolkit Crack success stories<br />
|
55 |
-
Accessdata Password Recovery Toolkit Crack case studies<br />
|
56 |
-
How to get Accessdata Password Recovery Toolkit Crack for free<br />
|
57 |
-
How to crack Accessdata Password Recovery Toolkit password<br />
|
58 |
-
How to recover lost password with Accessdata Password Recovery Toolkit Crack <br />
|
59 |
-
How to bypass password protection with Accessdata Password Recovery Toolkit Crack <br />
|
60 |
-
How to decrypt encrypted files with Accessdata Password Recovery Toolkit Crack <br />
|
61 |
-
How to extract password hashes with Accessdata Password Recovery Toolkit Crack <br />
|
62 |
-
How to brute force passwords with Accessdata Password Recovery Toolkit Crack <br />
|
63 |
-
How to reset passwords with Accessdata Password Recovery Toolkit Crack <br />
|
64 |
-
How to unlock accounts with Accessdata Password Recovery Toolkit Crack</p>
|
65 |
-
<h3>How to Identify Encrypted Files with FTK</h3>
|
66 |
-
<p>Before you can recover passwords from encrypted files or containers, you need to identify them first. You can use FTK Imager to scan your hard drive or an image file for encrypted files or containers. FTK Imager can detect various types of encryption algorithms and formats.</p>
|
67 |
-
<p>To use FTK Imager, you need to launch it from the Start menu or the desktop shortcut. You will see a window with four tabs: Evidence Tree, File List, Gallery View, and Hex View. You can use these tabs to view different aspects of your data.</p>
|
68 |
-
<p>To scan for encrypted files or containers, you need to add an evidence item. You can do this by clicking on the File menu and selecting Add Evidence Item. You can choose from different types of evidence items, such as Physical Drive, Logical Drive, Image File, Contents of Folder, etc.</p>
|
69 |
-
<p>After adding an evidence item, you will see it in the Evidence Tree tab. You can expand it by clicking on the plus sign next to it. You will see different partitions or folders under it. You can select any partition or folder and right-click on it. You will see an option called Scan For Encrypted Files/Containers. Click on it.</p>
|
70 |
-
<p>A new window will pop up showing the progress of the scan. The scan may take some time depending on the size of your data. When the scan is complete, you will see a list of encrypted files or containers in the File List tab. You can sort them by name, size, type, encryption algorithm, etc.</p>
|
71 |
-
<p>You can select any encrypted file or container and right-click on it. You will see an option called Export Selected Files/Containers To PRTK Queue File (.pqf). Click on it. This will create a file that contains information about the encrypted file or container that you want to decrypt with PRTK.</p>
|
72 |
-
<h3>How to Use the Dictionary Tool in PRTK</h3>
|
73 |
-
<p>A dictionary attack is one of the methods that PRTK uses to recover passwords from encrypted files or containers. A dictionary attack tries different words or phrases from a list until it finds the correct password.</p>
|
74 |
-
<p>PRTK comes with some built-in dictionaries that contain common words or phrases that are used as passwords. However, you can also create your own custom dictionaries based on your knowledge of the target file or container.</p>
|
75 |
-
<p>To create a custom dictionary, you need to use the Dictionary Tool in PRTK. You can launch it from the Tools menu or by clicking on the icon that looks like a book in the toolbar.</p>
|
76 |
-
<p>The Dictionary Tool window has two tabs: Create Dictionary and Edit Dictionary. In the Create Dictionary tab, you can create a new dictionary by entering words or phrases in the text box at the bottom. You can also import words or phrases from a text file by clicking on the Import button.</p>
|
77 |
-
<p>You can also modify an existing dictionary by using the Edit Dictionary tab. In this tab, you can open an existing dictionary by clicking on the Open button. You can then add or delete words or phrases from it.</p>
|
78 |
-
<p>After creating or editing a dictionary, you need to save it by clicking on the Save button. You can give it any name you want but make sure it has a .dic extension.</p>
|
79 |
-
<h3>How to Use Rules and Profiles in PRTK</h3>
|
80 |
-
<p>Rules and profiles are another way that PRTK uses to recover passwords from encrypted files or containers. Rules are sets of instructions that tell PRTK how to modify words or phrases from dictionaries before trying them as passwords. Profiles are combinations of rules that apply different modifications at once.</p>
|
81 |
-
<p>PRTK comes with some built-in rules and profiles that cover common scenarios such as adding numbers or symbols at the end of words or phrases; changing case; replacing letters with numbers; etc.</p>
|
82 |
-
<p>However, you can also create your own custom rules and profiles based on your knowledge of the target file or container.</p>
|
83 |
-
<p>To create a custom rule, you need to use the Rule Editor in PRTK. You can launch it from the Tools menu or by clicking on the icon that looks like a wrench in the toolbar.</p>
|
84 |
-
<p>The Rule Editor window has two tabs: Create Rule and Edit Rule. In the Create Rule tab, you can create a new rule by entering commands in the text box at the bottom. Each command consists of an operator followed by one or more arguments separated by commas.</p>
|
85 |
-
<p>For example:</p>
|
86 |
-
- $1,2,3 adds the numbers 1, 2, and 3 at the end of the word or phrase - C changes the case of the first letter of the word or phrase - R1,2 replaces the first letter of the word or phrase with the second letter You can also use variables to represent different types of characters, such as: - %l for lowercase letters - %u for uppercase letters - %d for digits - %s for symbols You can also use modifiers to apply different conditions or operations to the commands, such as: - ! to negate a command - ? to make a command optional - * to repeat a command a random number of times - + to repeat a command one or more times - n to repeat a command n times - n,m to repeat a command between n and m times For example: - C?%l+ changes the case of the first letter of the word or phrase and adds one or more lowercase letters at the end - R%l,%d2 replaces every lowercase letter in the word or phrase with two digits You can also use parentheses to group commands together and use logical operators to combine them, such as: - & for AND - | for OR - ^ for XOR For example: - (C|R%l,%u)&$%d2 applies either changing the case of the first letter or replacing every lowercase letter with an uppercase letter and adds two digits at the end After creating a rule, you need to save it by clicking on the Save button. You can give it any name you want but make sure it has a .rul extension. You can also modify an existing rule by using the Edit Rule tab. In this tab, you can open an existing rule by clicking on the Open button. You can then add or delete commands from it. To create a custom profile, you need to use the Profile Editor in PRTK. You can launch it from the Tools menu or by clicking on the icon that looks like a folder in the toolbar. The Profile Editor window has two tabs: Create Profile and Edit Profile. In the Create Profile tab, you can create a new profile by selecting rules from the list on the left and adding them to the list on the right. You can also change the order of the rules by dragging and dropping them. You can also import rules from a text file by clicking on the Import button. The text file should contain one rule per line with its name and extension. After creating a profile, you need to save it by clicking on the Save button. You can give it any name you want but make sure it has a .pro extension. You can also modify an existing profile by using the Edit Profile tab. In this tab, you can open an existing profile by clicking on the Open button. You can then add or delete rules from it. <h3>How to Decrypt Files and Containers with PRTK</h3>
|
87 |
-
<p>After creating or selecting your dictionaries, rules, and profiles, you are ready to use PRTK to decrypt files and containers. To do this, you need to launch PRTK from the Start menu or the desktop shortcut.</p>
|
88 |
-
<p>You will see a window with four tabs: Queue Manager, Attack Manager, Results Manager, and Log Viewer. You can use these tabs to manage your password recovery tasks.</p>
|
89 |
-
<p>To decrypt files and containers with PRTK, you need to add them to the Queue Manager tab. You can do this by clicking on the Add button and selecting one of these options:</p>
|
90 |
-
- Add Files/Containers: This allows you to browse your computer and select individual files or containers that you want to decrypt. - Add PQF File: This allows you to select a PQF file that contains information about encrypted files or containers that you want to decrypt. You can create a PQF file using FTK Imager as explained earlier. - Add Folder: This allows you to select a folder that contains encrypted files or containers that you want to decrypt. After adding files or containers to the Queue Manager tab, you will see them in a list with some information such as name, size, type, encryption algorithm, etc. You can select any file or container and right-click on it. You will see some options such as: - Attack: This allows you to start a password recovery attack on the selected file or container. - Properties: This allows you to view more details about the selected file or container. - Remove: This allows you to remove the selected file or container from the list. - Remove All: This allows you to remove all files or containers from the list. To start a password recovery attack on a file or container, you need to select it and click on the Attack button. A new window will pop up showing different options for your attack. You can choose from different types of attacks such as: - Brute Force: This tries all possible combinations of characters until it finds the correct password. - Dictionary: This tries different words or phrases from a list until it finds the correct password. - Rainbow Tables: This uses precomputed tables of hashes and passwords to find matches. - Known Plaintext: This uses known parts of plaintext and ciphertext to find patterns. - Hybrid: This combines different types of attacks together. You can also choose different dictionaries, rules, and profiles for your attack. You can select from built-in ones or custom ones that you created earlier. You can also adjust some settings for your attack such as: - Timeout: This sets how long PRTK will try each password before moving on to the next one. - Threads: This sets how many processors PRTK will use for your attack. - GPUs: This sets how many graphics cards PRTK will use for your attack. - Priority: This sets how much CPU power PRTK will use for your attack. After choosing your options for your attack, you need to click on the Start button. PRTK will start trying different passwords for your file or container. You can monitor your attack in the Attack Manager tab. You will see some information such as status, progress, speed, elapsed time, estimated time left, etc. You can also pause or stop your attack at any time by clicking on the Pause or Stop buttons. If PRTK finds a password for your file or container, it will show it in green in the Results Manager tab. You will also see some information such as name, size, type, encryption algorithm, password length, etc. You can select any file or container and right-click on it. You will see some options such as: - Decrypt: This allows you to decrypt your file or container using PRTK. - Copy Password: This allows you to copy your password to clipboard. - Export Results: This allows you to export your results to a text file. - Remove: This allows you to remove your file or container from the list. - Remove All: This allows you to remove all files or containers from the list. the Decrypt button. A new window will pop up asking you to select a destination folder for your decrypted file or container. You can also choose to overwrite the original file or container or keep both. After selecting your destination folder, you need to click on the Decrypt button. PRTK will decrypt your file or container and save it in the destination folder. You can also decrypt your file or container using other tools such as FTK Imager or FTK Lab. You just need to copy the password from PRTK and paste it in the other tool. <h2>The Risks and Challenges of Using a Crack for Accessdata Password Recovery Toolkit</h2>
|
91 |
-
<p>As you can see, Accessdata Password Recovery Toolkit is a powerful and useful software that can help you recover passwords from encrypted files and containers. However, it is not a cheap software. A license for FTK can cost thousands of dollars per year.</p>
|
92 |
-
<p>That's why some people might be tempted to use a crack for PRTK. A crack is a program that modifies the software to bypass the activation process and use it for free. You can find many cracks for PRTK on the internet, especially on torrent sites.</p>
|
93 |
-
<p>However, using a crack for PRTK is not a good idea. There are many risks and challenges that come with using a crack. Here are some of them:</p>
|
94 |
-
<h3>Legal and Ethical Issues</h3>
|
95 |
-
<p>Using a crack for PRTK is illegal and unethical. It violates the terms of service and the license agreement of the software. It also infringes on the intellectual property rights of Exterro, the company that owns Accessdata.</p>
|
96 |
-
<p>If you use a crack for PRTK, you could face legal consequences such as fines, lawsuits, or even criminal charges. You could also damage your reputation and credibility as a professional or a student.</p>
|
97 |
-
<p>Moreover, using a crack for PRTK could raise ethical questions about your motives and intentions. Why do you need to recover passwords from encrypted files or containers? Are you authorized to do so? Are you respecting the privacy and security of the owners of those files or containers?</p>
|
98 |
-
<p>Using a crack for PRTK could make you look suspicious and untrustworthy. You could lose the trust and respect of your clients, colleagues, teachers, or peers.</p>
|
99 |
-
<h3>Security and Quality Issues</h3>
|
100 |
-
<p>Using a crack for PRTK is risky and unreliable. It could expose you to malware and compromise your results.</p>
|
101 |
-
<p>Many cracks for PRTK are infected with viruses, trojans, worms, spyware, ransomware, or other malicious programs. These programs could harm your computer, steal your data, encrypt your files, or demand money from you.</p>
|
102 |
-
<p>Even if the crack for PRTK is not infected with malware, it could still cause problems with your software. It could make it unstable, slow, buggy, or incompatible with other tools. It could also prevent you from updating your software or getting technical support from Exterro.</p>
|
103 |
-
<p>Furthermore, using a crack for PRTK could affect the quality and accuracy of your password recovery results. It could make your software miss some passwords, generate false positives, or corrupt your files or containers.</p>
|
104 |
-
<p>Using a crack for PRTK could jeopardize your work and waste your time and resources.</p>
|
105 |
-
<h3>Alternatives to Using a Crack</h3>
|
106 |
-
<p>Using a crack for PRTK is not worth it. There are better alternatives that are safer and more reliable.</p>
|
107 |
-
<p>One alternative is to get a legitimate license for FTK. You can purchase a license from Exterro's website or contact them for more information. You can also request a free trial or a demo to test the software before buying it.</p>
|
108 |
-
<p>A legitimate license for FTK will give you access to all the features and benefits of PRTK without any risks or challenges. You will be able to use the software legally and ethically, update it regularly, get technical support from Exterro, and ensure the quality and accuracy of your password recovery results.</p>
|
109 |
-
<p>Another alternative is to use other tools for password recovery that are free or cheaper than FTK. There are many tools available on the internet that can recover passwords from encrypted files or containers. Some examples are:</p>
|
110 |
-
- John the Ripper: A free and open source password cracker that supports many encryption algorithms and formats. - Hashcat: A free and open source password recovery tool that uses GPUs to accelerate password cracking. - Elcomsoft Password Recovery Bundle: A commercial password recovery suite that supports various file types and encryption methods. - Passware Kit Forensic: A commercial password recovery software that integrates with FTK Imager and supports many file types and encryption methods. These tools may not have all the features and capabilities of PRTK, but they can still help you recover passwords from encrypted files or containers in some cases. <h2>Conclusion</h2>
|
111 |
-
<p>In conclusion, Accessdata Password Recovery Toolkit is a powerful and useful software that can recover passwords from encrypted files and containers. However, it is not a cheap software. That's why some people might be tempted to use a crack for it.</p>
|
112 |
-
<p>However, using a crack for PRTK is not a good idea. There are many risks and challenges that come with using a crack. It is illegal and unethical; it exposes you to malware and compromises your results; it makes you look suspicious and untrustworthy.</p>
|
113 |
-
<p>Instead of using a crack for PRTK, you should consider getting a legitimate license for FTK or using other tools for password recovery that are free or cheaper than FTK. These alternatives are safer and more reliable than using a crack.</p>
|
114 |
-
<p>If you need to gain access to password-protected files, then don't use a crack for PRTK. Use a legitimate license or another tool instead.</p>
|
115 |
-
<h2>FAQs</h2>
|
116 |
-
<h4>What is Accessdata Password Recovery Toolkit?</h4>
|
117 |
-
<p>Accessdata Password Recovery Toolkit (PRTK) is a software that can recover passwords from encrypted files and containers.</p>
|
118 |
-
<h4>What is a crack for Accessdata Password Recovery Toolkit?</h4>
|
119 |
-
<p>A crack for Accessdata Password Recovery Toolkit (PRTK) is a program that modifies the software to bypass the activation process and use it for free.</p>
|
120 |
-
<h4>Why should I avoid using a crack for Accessdata Password Recovery Toolkit?</h4>
|
121 |
-
<p>You should avoid using a crack for Accessdata Password Recovery Toolkit (PRTK) because it is illegal and unethical; it exposes you to malware and compromises your results; it makes you look suspicious and untrustworthy.</p>
|
122 |
-
<h4>What are some alternatives to using a crack for Accessdata Password Recovery Toolkit?</h4>
|
123 |
-
<p>Some alternatives to using a crack for Accessdata Password Recovery Toolkit (PRTK) are getting a legitimate license for FTK or using other tools for password recovery that are free or cheaper than FTK.</p>
|
124 |
-
<h4>Where can I get more information about Accessdata Password Recovery Toolkit?</h4>
|
125 |
-
<p>You can get more information about Accessdata Password Recovery Toolkit (PRTK) from Exterro's website: https://www.exterro.com/ftk-product-downloads/password-recovery-toolkit-prtk-version-8-2-1</p>
|
126 |
-
</p> 0a6ba089eb<br />
|
127 |
-
<br />
|
128 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/AnyDesk Client The Ultimate Guide to Downloading and Using the Best Remote Desktop Software.md
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Use AnyDesk Client for Remote Desktop Access</h1>
|
3 |
-
<p>AnyDesk is a fast and secure remote desktop software that allows you to access, control and administrate all your devices when working remotely. Whether you need to work from home, provide technical support, collaborate with your team, or access your personal computer, AnyDesk can help you do it easily and efficiently.</p>
|
4 |
-
<h2>anydesk client download</h2><br /><p><b><b>DOWNLOAD</b> ––– <a href="https://byltly.com/2uKw5s">https://byltly.com/2uKw5s</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will show you how to download and use AnyDesk client for Windows, one of the most popular operating systems supported by AnyDesk. You can also download AnyDesk for other platforms, such as macOS, Linux, Android, iOS, and more.</p>
|
6 |
-
|
7 |
-
<h2>How to Download AnyDesk Client for Windows</h2>
|
8 |
-
<p>Downloading AnyDesk client for Windows is very simple and fast. Just follow these steps:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Go to <a href="https://anydesk.com/en/downloads/windows">https://anydesk.com/en/downloads/windows</a> and click on the "Download Now" button. This will start downloading the latest version of AnyDesk for Windows (v7.1.11).</li>
|
11 |
-
<li>Once the download is complete, open the file and follow the installation wizard. You can choose to install AnyDesk on your computer or run it as a portable application.</li>
|
12 |
-
<li>After the installation is done, you will see the AnyDesk interface on your screen. You can now start using AnyDesk client for remote desktop access.</li>
|
13 |
-
</ol>
|
14 |
-
|
15 |
-
<h2>How to Use AnyDesk Client for Remote Desktop Access</h2>
|
16 |
-
<p>Using AnyDesk client for remote desktop access is very easy and intuitive. Here are some basic steps to get you started:</p>
|
17 |
-
<ul>
|
18 |
-
<li>To access a remote device, you need to enter its AnyDesk address or alias in the "Remote Desk" field and click on "Connect". You can also scan a QR code or use the address book to find your contacts.</li>
|
19 |
-
<li>To allow access to your device, you need to share your AnyDesk address or alias with the person who wants to connect. You can also create a custom alias for your device or generate a one-time access code.</li>
|
20 |
-
<li>When a connection request is received, you need to accept it by clicking on "Accept" or reject it by clicking on "Reject". You can also set up unattended access or whitelist trusted devices for automatic acceptance.</li>
|
21 |
-
<li>Once the connection is established, you can see and control the remote device on your screen. You can also use the toolbar at the top of the window to access various features and settings, such as chat, file transfer, audio and video transmission, session recording, keyboard and mouse settings, and more.</li>
|
22 |
-
<li>To end the connection, you can click on the "X" button at the top right corner of the window or press Ctrl+Alt+X on your keyboard.</li>
|
23 |
-
</ul>
|
24 |
-
|
25 |
-
<h2>Why Choose AnyDesk Client for Remote Desktop Access</h2>
|
26 |
-
<p>AnyDesk client is one of the best remote desktop software available in the market. Here are some of the reasons why you should choose AnyDesk for your remote desktop needs:</p>
|
27 |
-
<p></p>
|
28 |
-
<ul>
|
29 |
-
<li>AnyDesk offers dynamic performance and smooth remote desktop connections with low latency and high frame rates.</li>
|
30 |
-
<li>AnyDesk ensures end-to-end security and privacy with TLS 1.2 encryption technology and verification of connections.</li>
|
31 |
-
<li>AnyDesk provides flexibility and customization options with your own brand and logo, group policies, user management, account feature, permission management, and more.</li>
|
32 |
-
<li>AnyDesk supports cross-compatibility and platform independence with various operating systems and devices.</li>
|
33 |
-
<li>AnyDesk offers free updates and affordable pricing plans for different use cases and scenarios.</li>
|
34 |
-
</ul>
|
35 |
-
|
36 |
-
<h2>Conclusion</h2>
|
37 |
-
<p>AnyDesk client is a powerful and reliable remote desktop software that can help you work remotely with ease and efficiency. Whether you need to access your personal computer, provide technical support, collaborate with your team, or manage your devices, AnyDesk can do it all for you.</p>
|
38 |
-
<p>To download and use AnyDesk client for Windows, just follow the simple steps we have shown you in this article. You can also download AnyDesk for other platforms from <a href="https://anydesk.com/">https://anydesk.com/</a>.</p>
|
39 |
-
<p>If you have any questions or feedback about AnyDesk client, feel free to contact</p> ddb901b051<br />
|
40 |
-
<br />
|
41 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Diner De ConsLe Movie Download __LINK__ 720p Movie.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Diner De Cons,Le Movie in 720p Quality</h1>
|
3 |
-
<p>Diner De Cons,Le is a classic French comedy movie that was released in 1998. The movie tells the story of a group of friends who have a weekly dinner where they invite a fool to make fun of him. The movie is based on a play by Francis Veber and stars Thierry Lhermitte, Jacques Villeret and Francis Huster.</p>
|
4 |
-
<h2>Diner De Cons,Le Movie Download 720p Movie</h2><br /><p><b><b>Download Zip</b> ⚹⚹⚹ <a href="https://byltly.com/2uKzT2">https://byltly.com/2uKzT2</a></b></p><br /><br />
|
5 |
-
<p>If you are looking for a way to download Diner De Cons,Le movie in 720p quality, you have come to the right place. In this article, we will show you how to use a torrent site to find and download the movie in high definition. Here are the steps you need to follow:</p>
|
6 |
-
<ol>
|
7 |
-
<li>Go to a torrent site that has Diner De Cons,Le movie available. You can use sites like YTS.MX[^2^], Archive.org[^3^] or Crinponogu[^4^] to search for the movie.</li>
|
8 |
-
<li>Select the movie quality you want to download. We recommend choosing 720p BluRay as it offers a good balance between file size and video quality.</li>
|
9 |
-
<li>Download the torrent file or magnet link of the movie. You will need a torrent client software like uTorrent or BitTorrent to open the file or link and start downloading the movie.</li>
|
10 |
-
<li>Wait for the download to finish. Depending on your internet speed and the number of seeders and peers, it may take from a few minutes to a few hours to complete the download.</li>
|
11 |
-
<li>Enjoy watching Diner De Cons,Le movie in 720p quality. You can use any media player that supports MP4 format to play the movie on your computer or device.</li>
|
12 |
-
</ol>
|
13 |
-
<p>Diner De Cons,Le is a hilarious and witty movie that will make you laugh out loud. It is one of the best French comedies ever made and has won several awards and nominations. If you want to watch this movie in 720p quality, follow the steps above and download it from a torrent site.</p>
|
14 |
-
|
15 |
-
<p>If you want to know more about the plot of Diner De Cons,Le movie, here is a brief summary. The movie focuses on the interaction between Pierre Brochant and François Pignon, who are very different in personality and intelligence. Pierre is a successful and arrogant publisher, who enjoys mocking and humiliating others. François is a naive and kind-hearted tax inspector, who loves making matchstick models of famous monuments.</p>
|
16 |
-
<p>Pierre invites François to his apartment, hoping to take him to the dinner of fools later. However, he injures his back and has to stay home. He tries to get rid of François, but he keeps making things worse for him. He accidentally reveals Pierre's affair with his mistress Marlène to his wife Christine, who leaves him. He also invites a ruthless tax auditor Lucien Cheval to Pierre's apartment, who discovers Pierre's tax evasion. He also causes trouble with Pierre's old friend Juste Leblanc, who still loves Christine.</p>
|
17 |
-
<p></p>
|
18 |
-
<p>Through a series of hilarious and absurd situations, Pierre realizes that François is not as stupid as he thought. He also learns to appreciate his friendship and loyalty. He reconciles with Christine and Leblanc, and manages to escape from Cheval's investigation. He also decides to quit the dinner of fools, and invites François to have dinner with him as a friend.</p>
|
19 |
-
<p>Diner De Cons,Le movie is a brilliant comedy that explores the themes of human nature, friendship, and social class. It shows how appearances can be deceiving, and how people can surprise us with their hidden talents and qualities. It also criticizes the cruelty and snobbery of the rich and powerful, who exploit and ridicule the weak and innocent. It is a movie that will make you laugh and think at the same time.</p> cec2833e83<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fizika masalalar yechish usullari pdf Oquv qollanma va namunalar.md
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1><b>Fizika Masalalar Yechish Usullari PDF</b></h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Fizika masalalar yechish usullari (physics problem solving methods) are a set of strategies and techniques that can help you solve various types of physics problems. Physics problems are often challenging and complex, requiring you to apply your knowledge, skills, and creativity to find the correct solutions. Learning and practicing fizika masalalar yechish usullari can help you improve your understanding of physics concepts, develop your logical thinking and analytical skills, and enhance your confidence and motivation in learning physics.</p>
|
5 |
-
<h2>fizika masalalar yechish usullari pdf</h2><br /><p><b><b>Download</b> ===== <a href="https://byltly.com/2uKzj9">https://byltly.com/2uKzj9</a></b></p><br /><br />
|
6 |
-
<p>Whether you are a student or a teacher of physics, you can benefit from using fizika masalalar yechish usullari. As a student, you can use these methods to tackle homework assignments, prepare for exams, and participate in competitions. As a teacher, you can use these methods to design effective learning activities, assess student performance, and provide feedback and guidance. In this article, you will learn about the types of physics problems, the general steps of problem solving in physics, some specific methods and techniques that can help you solve different types of problems, and some skills that you need to develop to become a better problem solver in physics. You will also find a link to download a PDF file that contains these methods and examples for your reference.</p>
|
7 |
-
<h2>Types of Physics Problems</h2>
|
8 |
-
<p>Physics problems can be classified into different types based on their level of difficulty, content, and format. Some common types of physics problems are:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>Qualitative problems:</b> These problems require you to explain or describe physical phenomena or concepts using words or diagrams. You do not need to perform any calculations or use any formulas. For example: Explain why objects fall when they are dropped.</li>
|
11 |
-
<li><b>Quantitative problems:</b> These problems require you to calculate numerical values or quantities using formulas or equations. You need to perform algebraic manipulations, unit conversions, or other mathematical operations. For example: Calculate the speed of a car that travels 100 km in 2 hours.</li>
|
12 |
-
<li><b>Conceptual problems:</b> These problems require you to apply your understanding of physical principles or laws to analyze or predict physical situations or outcomes. You may need to use qualitative reasoning or quantitative calculations or both. For example: Predict what will happen to the motion of a pendulum if its length is doubled.</li>
|
13 |
-
<li><b>Application problems:</b> These problems require you to apply your knowledge and skills of physics to solve real-world problems or scenarios. You may need to use multiple concepts or formulas or both. For example: Determine how much force is needed to lift a 50 kg box using a pulley system.</li>
|
14 |
-
<li><b>Multiple-choice problems:</b> These problems require you to choose the correct answer from a list of options. You may need to use any of the above types of problem solving methods or a combination of them. For example: Which of the following statements is true about gravity? (a) Gravity is a force that attracts all objects with mass. (b) Gravity is a force that depends on the distance between two objects. (c) Gravity is a force that depends on the mass of two objects. (d) All of the above.</li>
|
15 |
-
</ul>
|
16 |
-
<p>Each type of problem has its own features and challenges. For example, qualitative problems may require you to use your intuition or common sense, but they may also involve misconceptions or vague terms. Quantitative problems may require you to memorize or recall formulas or equations, but they may also involve errors or uncertainties in measurements or calculations. Conceptual problems may require you to synthesize or integrate multiple concepts or principles, but they may also involve assumptions or simplifications that may not be valid in reality. Application problems may require you to model or simulate real-world situations or systems, but they may also involve complex or unknown variables or parameters. Multiple-choice problems may require you to eliminate incorrect options or compare different options, but they may also involve distractors or tricks that may confuse you.</p>
|
17 |
-
<h2>Problem Solving Methods</h2>
|
18 |
-
<p>The general steps of problem solving in physics are:</p>
|
19 |
-
<ol>
|
20 |
-
<li><b>Read and understand the problem:</b> In this step, you need to identify what is given and what is asked in the problem. You need to pay attention to the keywords, units, symbols, diagrams, graphs, tables, or other information that are provided in the problem statement. You also need to check if there are any missing or extra information that may affect the solution.</li>
|
21 |
-
<li><b>Plan a strategy:</b> In this step, you need to decide how to approach the problem. You need to choose an appropriate type of problem solving method based on the type of problem. You also need to select relevant concepts, formulas, equations, principles, laws, rules, or relationships that are applicable to the problem.</li>
|
22 |
-
<li><b>Execute the solution:</b> In this step, you need to implement your strategy by performing calculations, manipulations, operations, or other actions that are required by your chosen method. You need to show your work clearly and systematically by writing down each step with proper notation, units, and explanations. You also need to check your work for errors, consistency, and reasonableness.</li>
|
23 |
-
<li><b>Evaluate the result:</b> In this step, you need to verify your result by comparing it with the given information, the expected outcome, or other sources. You need to check if your result makes sense physically, logically, and mathematically. You also need to report your result with appropriate units, significance, and accuracy.</li>
|
24 |
-
</ol>
|
25 |
-
<p>In addition to these general steps, there are some specific methods and techniques that can help you solve different types of problems. Some examples of these methods and techniques are:</p>
|
26 |
-
<p>fizikadan masalalar yechish texnologiyasi<br />
|
27 |
-
kimyodan masalalar yechish usullari o'quv qo'llanma<br />
|
28 |
-
fizik masalalar turlari ularni yechish metodlari<br />
|
29 |
-
fizikadan masalalar yechish algoritmik usuli<br />
|
30 |
-
fizikadan masalalar yechish fanining vazifasi<br />
|
31 |
-
fizikadan masalalar yechishning ahamiyati<br />
|
32 |
-
fizikadan masalalar yechish jarayonida fanlararo aloqa<br />
|
33 |
-
fizikadan masalalar yechish rejasini tuzish<br />
|
34 |
-
fizikadan masalalar yechishning tarbiyaviy ahamiyati<br />
|
35 |
-
fizikadan masalalar yechish nazorat ishlarini o'tkazish metodikasi<br />
|
36 |
-
fizikadan masalalar yechish olimpiada masalalari<br />
|
37 |
-
fizikadan masalalar yechish zamonaviy pedagogik texnologiya vositalari<br />
|
38 |
-
fizikadan masalalar yechish innovatsion texnologiya metodlari<br />
|
39 |
-
fizika o'qitishda masala yechishning asosiy bosqichlari<br />
|
40 |
-
fizika o'qitishda masala yechishning algoritmik usuli<br />
|
41 |
-
fizika o'qitishda masala yechishning ijodiy usullari<br />
|
42 |
-
fizika o'qitishda masala shartini tahlil qilish<br />
|
43 |
-
fizika o'qitishda masala grafik usuli bilan yechish<br />
|
44 |
-
fizika o'qitishda masala eksperimental usuli bilan yechish<br />
|
45 |
-
fizika o'qitishda masala sifat usuli bilan yechish<br />
|
46 |
-
fizika o'qitishda masala matematik usuli bilan yechish<br />
|
47 |
-
fizika o'qitishda masala logarifmik usuli bilan yechish<br />
|
48 |
-
fizika o'qitishda masala trigonometrik usuli bilan yechish<br />
|
49 |
-
fizika o'qitishda masala vektorlar usuli bilan yechish<br />
|
50 |
-
fizika o'qitishda masala integral hisob usuli bilan yechish<br />
|
51 |
-
fizika o'qitishda masala differensial hisob usuli bilan yechish<br />
|
52 |
-
fizika o'qitishda masala matritsa hisob usuli bilan yechish<br />
|
53 |
-
fizika o'qitishda masala koordinata tizimlari orasida o'tkaziladigan formulalar<br />
|
54 |
-
fizika o'qitishda masala kinematika qonunlari va formulalari<br />
|
55 |
-
fizika o'qitishda masala dinamika qonunlari va formulalari<br />
|
56 |
-
fizika o'qitishda masala statika qonunlari va formulalari<br />
|
57 |
-
fizika o'qitishda masala molekulyar kinetik nazariyasi va formulalari<br />
|
58 |
-
fizika o'qitishda masala termodinamika qonunlari va formulalari<br />
|
59 |
-
fizika o'qitishda masala elektrostatika qonunlari va formulalari<br />
|
60 |
-
fizika o'qitishda masala elektrodinamika qonunlari va formulalari<br />
|
61 |
-
fizika o'qitishda masala magnetizm qonunlari va formulalari<br />
|
62 |
-
fizika o'qitishda masala optika qonunlari va formulalari<br />
|
63 |
-
fizika o'qitishda masala akustika qonunlari va formulalari<br />
|
64 |
-
fizika o'qitishda masala atom nazariyasi va formulalari<br />
|
65 |
-
fizika o'qitishda masala nukleon nazariyasi va formulalari<br />
|
66 |
-
fizika o'qitishda masala kvant nazariyasi va formulalari</p>
|
67 |
-
<ul>
|
68 |
-
<li><b>Drawing diagrams:</b> This method involves sketching pictures or figures that represent physical situations or systems. You can use diagrams to visualize or illustrate physical phenomena, concepts, or relationships. You can also use diagrams to label or identify given quantities, unknown variables, or other relevant information. Diagrams can help you simplify complex problems, organize your thoughts, and communicate your ideas.</li>
|
69 |
-
<li><b>Making tables:</b> This method involves arranging data or information into rows and columns. You can use tables to display numerical values, quantities, or units. You can also use tables to compare different options, cases, or scenarios. Tables can help you organize data, identify patterns, and perform calculations.</li>
|
70 |
-
<li><b>Solving equations:</b> This method involves finding unknown values or quantities by using algebraic expressions, formulas, or equations. You can use equations to model physical situations or systems mathematically. You can also use equations to manipulate variables, solve for unknowns, or substitute values. Equations can help you express relationships, apply rules, and calculate results.</li>
|
71 |
-
<li><b>Using dimensional analysis:</b> This method involves checking or converting units by using dimensional quantities. You can use dimensional analysis to ensure consistency and compatibility among units. You can also use dimensional analysis to convert units, simplify expressions, or derive formulas Dimensional analysis. You can use dimensional analysis to check the consistency and compatibility of units in an equation or expression. You can also use dimensional analysis to convert units, simplify expressions, or derive formulas. Dimensional analysis can help you avoid errors, ensure accuracy, and verify results.</li>
|
72 |
-
<li><b>Using proportions:</b> This method involves setting up ratios or fractions that are equal to each other. You can use proportions to compare or relate different quantities or variables. You can also use proportions to solve for unknowns, scale up or down values, or find percentages. Proportions can help you express relationships, apply rules, and calculate results.</li>
|
73 |
-
</ul>
|
74 |
-
<p>These are just some examples of the many methods and techniques that you can use to solve physics problems. You may need to use one or more of these methods or techniques depending on the type and complexity of the problem. You may also need to combine these methods or techniques with other skills or tools such as calculators, graphs, charts, or software.</p>
|
75 |
-
<h2>Problem Solving Skills</h2>
|
76 |
-
<p>To become a better problem solver in physics, you need to develop some skills that are essential for effective and efficient problem solving. Some of these skills are:</p>
|
77 |
-
<ul>
|
78 |
-
<li><b>Reading comprehension:</b> This skill involves understanding the meaning and context of the problem statement. You need to read the problem carefully and critically, paying attention to the details and nuances of the language and information. You also need to identify the main idea and the purpose of the problem, as well as any assumptions or conditions that may affect the solution.</li>
|
79 |
-
<li><b>Analysis:</b> This skill involves breaking down the problem into smaller and simpler parts or components. You need to analyze the given information and the unknown variables, as well as their relationships and dependencies. You also need to analyze the type and level of difficulty of the problem, as well as the appropriate method or technique to use.</li>
|
80 |
-
<li><b>Modeling:</b> This skill involves representing the problem using mathematical symbols, expressions, formulas, equations, diagrams, tables, graphs, or other tools. You need to model the problem accurately and realistically, using relevant concepts and principles of physics. You also need to model the problem clearly and systematically, using proper notation, units, and explanations.</li>
|
81 |
-
<li><b>Calculation:</b> This skill involves performing mathematical operations or manipulations to find the solution of the problem. You need to calculate correctly and precisely, using appropriate formulas, equations, rules, or methods. You also need to calculate efficiently and logically, using shortcuts, tricks, or estimations when possible.</li>
|
82 |
-
<li><b>Checking:</b> This skill involves verifying the validity and accuracy of your solution. You need to check your solution against the given information, the expected outcome, or other sources. You also need to check your solution for errors, consistency, and reasonableness.</li>
|
83 |
-
</ul>
|
84 |
-
<p>To improve these skills, you need to practice regularly and systematically. You need to solve different types and levels of physics problems that challenge your knowledge, skills, and creativity. You also need to use feedback, reflection, and self-assessment to evaluate your performance and identify your strengths and weaknesses.</p>
|
85 |
-
<h2>Conclusion</h2>
|
86 |
-
<p>In this article, you have learned about fizika masalalar yechish usullari (physics problem solving methods), a set of strategies and techniques that can help you solve various types of physics problems. You have learned about the types of physics problems, the general steps of problem solving in physics, some specific methods and techniques that can help you solve different types of problems, and some skills that you need to develop to become a better problem solver in physics. You have also found a link to download a PDF file that contains these methods and examples for your reference.</p>
|
87 |
-
<p>Learning and practicing fizika masalalar yechish usullari can help you improve your understanding of physics concepts, develop your logical thinking and analytical skills, and enhance your confidence and motivation in learning physics. Whether you are a student or a teacher of physics, you can benefit from using fizika masalalar yechish usullari.</p>
|
88 |
-
<p>If you want to learn more about fizika masalalar yechish usullari, you can download this PDF file that contains these methods and examples: <a href="https://www.researchgate.net/publication/360284046_Boshlangich_sinf_oquvchilarini_masala_yechishga_orgatish_metodi_va_usullari">Fizika Masalalar Yechish Usullari PDF</a>. You can also watch this video that explains some types of physics problems and how to solve them: <a href="https://www.youtube.com/watch?v=_ynO5TMFvYA">Fizik masalalar turlari, ularni yechish metodlari</a>.</p>
|
89 |
-
<p>We hope you enjoyed this article and found it useful. We encourage you to try out these methods and share your feedback with us. Happy problem solving!</p>
|
90 |
-
<h2>FAQs</h2>
|
91 |
-
<ul>
|
92 |
-
<li><b>Q: What is dimensional analysis?</b></li>
|
93 |
-
<li>A: Dimensional analysis is a method that involves checking or converting units by using dimensional quantities. It can help you avoid errors, ensure accuracy, and verify results.</li>
|
94 |
-
<li><b>Q: What is a proportion?</b></li>
|
95 |
-
<li>A: A proportion is a ratio or fraction that is equal to another ratio or fraction. It can help you compare or relate different quantities or variables.</li>
|
96 |
-
<li><b>Q: What is a qualitative problem?</b></li>
|
97 |
-
<li>A: A qualitative problem is a problem that requires you to explain or describe physical phenomena or concepts using words or diagrams.</li>
|
98 |
-
<li><b>Q: What is a multiple-choice problem?</b></li>
|
99 |
-
<li>A: A multiple-choice problem is a problem that requires you to choose the correct answer from a list of options.</li>
|
100 |
-
<li><b>Q: What are some skills that you need to develop to become a better problem solver in physics?</b></li>
|
101 |
-
<li>A: Some skills that you need to develop are reading comprehension, analysis, modeling, calculation, and checking.</li>
|
102 |
-
</ul>
|
103 |
-
</p> 0a6ba089eb<br />
|
104 |
-
<br />
|
105 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/FoneLab 9.1.58 Crack With Activation Number Free Download 2019.md
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>FoneLab 9.1.58 Crack With Activation Number Free Download 2019</h1>
|
3 |
-
<p>FoneLab 9.1.58 Crack is a powerful and easy-to-use software that helps you recover deleted or lost data from your iOS devices, iTunes backup, or iCloud backup. It can recover various types of data, such as contacts, messages, photos, videos, notes, call history, WhatsApp, Safari bookmarks, and more. Whether you accidentally deleted your data, lost your device, or damaged it by water, virus, or system crash, FoneLab can help you get your data back in minutes.</p>
|
4 |
-
<p>In this article, we will show you how to download and install FoneLab 9.1.58 Crack with activation number for free. You will also learn about the features and benefits of using this software.</p>
|
5 |
-
<h2>FoneLab 9.1.58 Crack With Activation Number Free Download 2019</h2><br /><p><b><b>DOWNLOAD</b> >>>>> <a href="https://imgfil.com/2uxZ1T">https://imgfil.com/2uxZ1T</a></b></p><br /><br />
|
6 |
-
<h2>Features of FoneLab 9.1.58 Crack</h2>
|
7 |
-
<p>FoneLab 9.1.58 Crack has many features that make it stand out from other data recovery software. Here are some of them:</p>
|
8 |
-
<ul>
|
9 |
-
<li>It supports all iOS devices, including iPhone XS/XS Max/XR/X/8/8 Plus/7/7 Plus/SE/6s/6s Plus/6/6 Plus/5s/5c/5/4S, iPad Pro/Air/mini/4/3/2/1, iPod touch 6/5/4/3/2/1, etc.</li>
|
10 |
-
<li>It can recover data from iOS devices directly, without the need of iTunes or iCloud backup.</li>
|
11 |
-
<li>It can also extract data from iTunes backup or iCloud backup, even if you forgot your password or the backup is encrypted.</li>
|
12 |
-
<li>It can recover up to 19 types of data, including contacts, messages (SMS/MMS/iMessages), photos (camera roll/photo library/photo stream), videos (camera roll/videos), notes (including attachments), call history (including FaceTime calls), WhatsApp (including attachments), Safari bookmarks/history/cookies, calendars/reminders/events, voice memos/memos/audio messages/mail attachments/Kik/Viber/Facebook Messenger/LINE/Skype/Kik attachments/Viber attachments/Facebook Messenger attachments/LINE attachments/Skype attachments/wechat/wechat attachments/Tango/Tango attachments.</li>
|
13 |
-
<li>It allows you to preview the data before recovery and selectively recover what you want.</li>
|
14 |
-
<li>It can fix various iOS system issues, such as stuck in recovery mode/DFU mode/apple logo/screen of death/black screen/blue screen/red screen/white screen/boot loop/frozen/disabled/error 9/error 14/error 21/error 4013/error 4014/etc.</li>
|
15 |
-
<li>It can backup and restore your iOS data to your computer or another device with one click.</li>
|
16 |
-
<li>It has a user-friendly interface and fast scanning and recovery speed.</li>
|
17 |
-
<li>It is compatible with Windows 10/8.1/8/7/Vista/XP and Mac OS X 10.7 or above.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>How to Download and Install FoneLab 9.1.58 Crack With Activation Number Free</h2>
|
20 |
-
<p>If you want to try FoneLab 9.1.58 Crack with activation number for free, you can follow these steps:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Download the FoneLab 9.1.58 Crack setup file from <a href="https://crackhomes.com/fonelab-crack-activation-number/">this link</a>.</li>
|
23 |
-
<li>Extract the file using WinRAR or any other extraction tool.</li>
|
24 |
-
<li>Run the setup file and follow the installation wizard.</li>
|
25 |
-
<li>Copy the crack file from the crack folder and paste it into the installation directory.</li>
|
26 |
-
<li>Run the software and enter the activation number from the readme file.</li>
|
27 |
-
<li>Enjoy FoneLab 9.1.58 Crack with full features for free.</li>
|
28 |
-
</ol>
|
29 |
-
<h2>Conclusion</h2>
|
30 |
-
<p>FoneLab 9.1.58 Crack is a reliable and professional data recovery software that can help you recover your lost or deleted data from your iOS devices, iTunes backup, or iCloud backup in various</p> d5da3c52bf<br />
|
31 |
-
<br />
|
32 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Unlock Tool The Best Way to Play PUBG Mobile at 90 FPS.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Unlock 90 FPS in PUBG Mobile and Why You Should Do It</h1>
|
3 |
-
<p>PUBG Mobile is one of the most popular and competitive mobile games in the world. Millions of players enjoy the thrilling battle royale experience every day. However, not everyone gets to play the game at its full potential. If you want to take your PUBG Mobile gameplay to the next level, you should consider unlocking 90 FPS mode.</p>
|
4 |
-
<p>In this article, we will explain what FPS is and why it matters in PUBG Mobile, how to check your FPS in the game, how to enable 90 FPS mode, and what are the advantages and disadvantages of playing at 90 FPS. We will also share some tips and tricks to optimize your PUBG Mobile performance at 90 FPS. Finally, we will answer some frequently asked questions about 90 FPS mode in PUBG Mobile. Let's get started!</p>
|
5 |
-
<h2>apk unlock 90 fps pubg mobile</h2><br /><p><b><b>Download</b> » <a href="https://urlin.us/2uSUWr">https://urlin.us/2uSUWr</a></b></p><br /><br />
|
6 |
-
<h2>What is FPS and Why Does It Matter in PUBG Mobile?</h2>
|
7 |
-
<p>FPS stands for frames per second and it determines how smooth the game looks and feels on your screen. The higher the FPS, the more frames are displayed per second, resulting in a smoother and more realistic motion. The lower the FPS, the fewer frames are displayed per second, resulting in a choppier and more laggy motion.</p>
|
8 |
-
<p>Why does FPS matter in PUBG Mobile? Well, because it can affect your gameplay experience and performance in various ways. Higher FPS can give you an advantage in PUBG Mobile by improving your aim, reaction time, and visibility. Here are some of the benefits of playing PUBG Mobile at higher FPS:</p>
|
9 |
-
<ul>
|
10 |
-
<li><strong>Better aim:</strong> Higher FPS can make your crosshair movement more fluid and accurate, allowing you to track and hit your enemies more easily.</li>
|
11 |
-
<li><strong>Faster response:</strong> Higher FPS can reduce the input lag between your touch and the game action, allowing you to react faster to the situation.</li>
|
12 |
-
<li><strong>Better visibility:</strong> Higher FPS can make the game graphics more clear and crisp, allowing you to spot your enemies and items more easily.</li>
|
13 |
-
</ul>
|
14 |
-
<p>Of course, playing PUBG Mobile at higher FPS also has some drawbacks, such as higher battery consumption, more heat generation, and potential compatibility issues. We will discuss these in more detail later in this article.</p>
|
15 |
-
<h2>How to Check Your FPS in PUBG Mobile</h2>
|
16 |
-
<p>Before you enable 90 FPS mode in PUBG Mobile, you may want to check your current FPS in the game. This way, you can see how much improvement you can get from 90 FPS mode. There are two ways to check your FPS in PUBG Mobile:</p>
|
17 |
-
<ul>
|
18 |
-
<li><strong>Using a third-party app:</strong> You can use a third-party app like GameBench or FPS Meter to monitor your FPS in PUBG Mobile. These apps can show you a real-time overlay of your FPS on your screen while you play the game. However, you may need to grant some permissions or root access to these apps for them to work properly.</li>
|
19 |
-
<li><strong>Using the in-game settings:</strong> You can also enable the FPS counter in the game settings under Basic > Display FPS. This will show you a small number on the top left corner of your screen indicating your current FPS while you play the game. However, this method may not be very accurate or reliable as it may not update frequently or reflect the actual frame rate.</li>
|
20 |
-
</ul>
|
21 |
-
<p>Once you have checked your FPS in PUBG Mobile, you can proceed to enable 90 FPS mode if you want to enjoy a smoother gameplay experience.</p>
|
22 |
-
<h2>How to Enable 90 FPS in PUBG Mobile</h2>
|
23 |
-
<p>Enabling 90 FPS mode in PUBG Mobile is not very difficult, but it may not be available for everyone. Not all devices support 90 FPS mode in PUBG Mobile, only a few models from OnePlus, Samsung, Xiaomi, Google, and Apple. You can check the list of supported devices on various websites or forums.</p>
|
24 |
-
<p>If you have a supported device, you can enable 90 FPS mode in PUBG Mobile by following these steps:</p>
|
25 |
-
<ol>
|
26 |
-
<li><strong>Go to Settings > Graphics > Frame Rate</strong></li>
|
27 |
-
<li><strong>Select 90 FPS from the options</strong></li>
|
28 |
-
<li><strong>Enjoy the game at 90 FPS!</strong></li>
|
29 |
-
</ol>
|
30 |
-
<p>You may need to set your graphics quality to Smooth to unlock 90 FPS option. This will lower the resolution and texture quality of the game, but it will also improve the performance and stability of the game. You can also adjust other graphics settings according to your preference and device capability.</p>
|
31 |
-
<p>apk unlock 90 fps pubg mobile download<br />
|
32 |
-
apk unlock 90 fps pubg mobile global<br />
|
33 |
-
apk unlock 90 fps pubg mobile korea<br />
|
34 |
-
apk unlock 90 fps pubg mobile lite<br />
|
35 |
-
apk unlock 90 fps pubg mobile new era<br />
|
36 |
-
apk unlock 90 fps pubg mobile no root<br />
|
37 |
-
apk unlock 90 fps pubg mobile season 19<br />
|
38 |
-
apk unlock 90 fps pubg mobile vietnam<br />
|
39 |
-
how to apk unlock 90 fps pubg mobile<br />
|
40 |
-
how to install apk unlock 90 fps pubg mobile<br />
|
41 |
-
best apk unlock 90 fps pubg mobile<br />
|
42 |
-
free apk unlock 90 fps pubg mobile<br />
|
43 |
-
latest apk unlock 90 fps pubg mobile<br />
|
44 |
-
safe apk unlock 90 fps pubg mobile<br />
|
45 |
-
working apk unlock 90 fps pubg mobile<br />
|
46 |
-
apk unlock 90 fps pubg mobile android<br />
|
47 |
-
apk unlock 90 fps pubg mobile ios<br />
|
48 |
-
apk unlock 90 fps pubg mobile emulator<br />
|
49 |
-
apk unlock 90 fps pubg mobile pc<br />
|
50 |
-
apk unlock 90 fps pubg mobile phone<br />
|
51 |
-
apk unlock 90 fps pubg mobile smooth<br />
|
52 |
-
apk unlock 90 fps pubg mobile balanced<br />
|
53 |
-
apk unlock 90 fps pubg mobile hd<br />
|
54 |
-
apk unlock 90 fps pubg mobile hdr<br />
|
55 |
-
apk unlock 90 fps pubg mobile ultra hd<br />
|
56 |
-
apk unlock 90 fps pubg mobile appbrain[^2^]<br />
|
57 |
-
apk unlock 90 fps pubg mobile appcombo[^1^]<br />
|
58 |
-
apk unlock 90 fps pubg mobile apkpure<br />
|
59 |
-
apk unlock 90 fps pubg mobile uptodown<br />
|
60 |
-
apk unlock 90 fps pubg mobile play store<br />
|
61 |
-
apk unlock tool for pubg mobile 90 fps<br />
|
62 |
-
maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^1^]<br />
|
63 |
-
beatsoft - developer of maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^1^]<br />
|
64 |
-
review of maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^1^]<br />
|
65 |
-
tutorial of maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^1^]<br />
|
66 |
-
update of maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^1^]<br />
|
67 |
-
download size of maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^2^]<br />
|
68 |
-
android version required for maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^2^]<br />
|
69 |
-
download link of maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^2^]<br />
|
70 |
-
alternative of maxfpspubgm - apk unlock tool for pubg mobile 90 fps[^2^]<br />
|
71 |
-
benefits of unlocking 90 fps in pubg mobile with an apk<br />
|
72 |
-
drawbacks of unlocking 90 fps in pubg mobile with an apk<br />
|
73 |
-
risks of unlocking 90 fps in pubg mobile with an apk<br />
|
74 |
-
tips and tricks for unlocking 90 fps in pubg mobile with an apk<br />
|
75 |
-
faq about unlocking 90 fps in pubg mobile with an apk</p>
|
76 |
-
<h2>Advantages and Disadvantages of Playing PUBG Mobile at 90 FPS</h2>
|
77 |
-
<p>As we mentioned earlier, playing PUBG Mobile at 90 FPS has its pros and cons. Here are some of the advantages and disadvantages of playing PUBG Mobile at 90 FPS:</p>
|
78 |
-
<h3>Advantages</h3>
|
79 |
-
<ul>
|
80 |
-
<li><strong>Smoother gameplay:</strong> Playing PUBG Mobile at 90 FPS can make your gameplay experience more smooth and fluid. You can enjoy a more realistic motion and animation of the game characters and objects.</li>
|
81 |
-
<li><strong>Faster response:</strong> Playing PUBG Mobile at 90 FPS can make your response time faster and more accurate. You can react quicker to the enemy movements and actions, and execute your commands more precisely.</li>
|
82 |
-
<li><strong>Better accuracy:</strong> Playing PUBG Mobile at 90 FPS can make your aiming and shooting more accurate and consistent. You can track and hit your targets more easily, especially when they are moving fast or far away.</li>
|
83 |
-
<li><strong>More immersive experience:</strong> Playing PUBG Mobile at 90 FPS can make your gaming experience more immersive and enjoyable. You can feel more connected to the game world and the action, and have more fun playing the game.</li>
|
84 |
-
</ul>
|
85 |
-
<h3>Disadvantages</h3>
|
86 |
-
<ul>
|
87 |
-
<li><strong>Higher battery consumption:</strong> Playing PUBG Mobile at 90 FPS can drain your battery faster than playing at lower FPS. This is because your device has to work harder to render more frames per second, which consumes more power. You may need to charge your device more often or use a power bank if you play PUBG Mobile at 90 FPS for a long time.</li>
|
88 |
-
<li><strong>More heat generation:</strong> Playing PUBG Mobile at 90 FPS can also generate more heat on your device than playing at lower FPS. This is because your device has to process more data and graphics, which generates more heat. You may feel your device getting hot or warm after playing PUBG Mobile at 90 FPS for a while. This may affect your device performance and lifespan in the long run.</li>
|
89 |
-
<li><strong>Potential compatibility issues:</strong> Playing PUBG Mobile at 90 FPS may not be compatible with some devices or features. For example, some devices may not support 90 FPS mode at all, or may have some glitches or bugs when playing at 90 FPS. Some features like screen recording or streaming may not work well with 90 FPS mode, or may cause some lag or stuttering. You may need to disable 90 FPS mode if you encounter any compatibility issues.</li>
|
90 |
-
</ul>
|
91 |
-
<h2>Tips and Tricks to Optimize Your PUBG Mobile Performance at 90 FPS</h2>
|
92 |
-
<p>If you want to play PUBG Mobile at 90 FPS and get the best performance possible, you should follow some tips and tricks to optimize your device and game settings. Here are some of the tips and tricks that you can try:</p>
|
93 |
-
<ul>
|
94 |
-
<li><strong>Use a device with a high refresh rate display (90 Hz or above):</strong> To enjoy the full benefits of 90 FPS mode, you should use a device that has a high refresh rate display (90 Hz or above). This means that your screen can refresh 90 times or more per second, matching the frame rate of the game. This will make the game look smoother and more responsive on your screen. If you use a device with a low refresh rate display (60 Hz or below), you will not be able to see the difference between 60 FPS and 90 FPS, as your screen can only refresh 60 times or less per second.</li>
|
95 |
-
<li><strong>Close any background apps and disable any notifications that may interfere with your game:</strong> To play PUBG Mobile at 90 FPS without any lag or interruption, you should close any background apps that may consume your memory or CPU resources. You should also disable any notifications that may pop up on your screen while you play the game. These can distract you from the game and affect your performance.</li>
|
96 |
-
<li><strong>Adjust your sensitivity settings and controls according to your preference and device size:</strong> To play PUBG Mobile at 90 FPS with better accuracy and comfort, you should adjust your sensitivity settings and controls according to your preference and device size. You can customize your sensitivity settings for different scopes, gyroscope, camera, etc., under Settings > Sensitivity. You can also customize your controls layout, size, opacity, etc., under Settings > Controls. You should experiment with different settings and controls until you find the ones that suit you best.</li>
|
97 |
-
<li><strong>Use headphones or earphones to hear the sound cues better and communicate with your teammates:</strong> To play PUBG Mobile at 90 FPS with better awareness and coordination, you should use headphones or earphones to hear the sound cues better and communicate with your teammates. Sound cues are very important in PUBG Mobile, as they can help you locate your enemies, items, vehicles, etc., by their footsteps, gunshots, explosions, etc. You should also use voice chat or text chat to communicate with your teammates, as they can provide you with valuable information, support, and strategy.</li>
|
98 |
-
<li><strong>Practice your skills in training mode or arcade mode before jumping into a classic or ranked match:</strong> To play PUBG Mobile at 90 FPS with better confidence and competence, you should practice your skills in training mode or arcade mode before jumping into a classic or ranked match. Training mode allows you to test different weapons, attachments , and vehicles in a safe and controlled environment. Arcade mode allows you to play short and fast-paced matches with different modes, maps, and rules. These modes can help you improve your shooting, aiming, driving, looting, and survival skills in PUBG Mobile.</li>
|
99 |
-
</ul>
|
100 |
-
<h2>Conclusion</h2>
|
101 |
-
<p>PUBG Mobile is a fun and exciting game that can be enjoyed by anyone. However, if you want to have a more smooth and competitive gameplay experience, you should try playing the game at 90 FPS mode. This mode can make the game look and feel more realistic and responsive, giving you an edge over your opponents. However, you should also be aware of the drawbacks of playing at 90 FPS mode, such as higher battery consumption, more heat generation, and potential compatibility issues. You should also follow some tips and tricks to optimize your PUBG Mobile performance at 90 FPS mode, such as using a high refresh rate device, closing background apps, adjusting sensitivity settings and controls, using headphones or earphones, and practicing your skills in training mode or arcade mode.</p>
|
102 |
-
<p>We hope this article has helped you understand how to unlock 90 FPS mode in PUBG Mobile and why you should do it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!</p>
|
103 |
-
<h2>FAQs</h2>
|
104 |
-
<h3>Q1. How can I play PUBG Mobile at 90 FPS on unsupported devices?</h3>
|
105 |
-
<h4>A1. You may need to use a third-party tool like GFX Tool or FlashDog to modify the game files and enable 90 FPS option. However, this is not recommended as it may violate the game's terms of service and result in a ban.</h4>
|
106 |
-
<h3>Q2. How can I check if my device supports 90 FPS in PUBG Mobile?</h3>
|
107 |
-
<h4>A2. You can check the list of supported devices on various websites or forums. Alternatively, you can go to Settings > Graphics > Frame Rate and see if the 90 FPS option is available for you.</h4>
|
108 |
-
<h3>Q3. What is the difference between 60 FPS and 90 FPS in PUBG Mobile?</h3>
|
109 |
-
<h4>A3. The difference between 60 FPS and 90 FPS is that the latter displays more frames per second, making the game look smoother and more responsive. However, the difference may not be noticeable for some people or on some devices.</h4>
|
110 |
-
<h3>Q4. Does playing PUBG Mobile at 90 FPS affect my ping or network latency?</h3>
|
111 |
-
<h4>A4. No, playing PUBG Mobile at 90 FPS does not affect your ping or network latency. Ping is determined by your internet connection speed and quality, not by your frame rate.</h4>
|
112 |
-
<h3>Q5. What are some other ways to improve my PUBG Mobile performance besides enabling 90 FPS?</h3>
|
113 |
-
<h4>A5. Some other ways to improve your PUBG Mobile performance are updating your game and device software, clearing your cache and storage space, using a stable Wi-Fi connection, and avoiding playing in hot or humid environments.</h4></p> 197e85843d<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Monster in My Pocket APK and Play the Classic Atari Remake.md
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Monster in My Pocket Game Download for Android</h1>
|
3 |
-
<p>Do you love monsters and platform games? Do you want to relive your childhood memories of collecting and playing with tiny monster figures? If you answered yes to any of these questions, then you might be interested in <strong>Monster in My Pocket</strong>, a classic NES game that is now available for Android devices. In this article, we will tell you everything you need to know about this game, including what it is, how to download and play it on your Android phone or tablet, and why you should give it a try.</p>
|
4 |
-
<h2>monster in my pocket game download for android</h2><br /><p><b><b>DOWNLOAD</b> ○ <a href="https://jinyurl.com/2uNMMX">https://jinyurl.com/2uNMMX</a></b></p><br /><br />
|
5 |
-
<h2>What is Monster in My Pocket?</h2>
|
6 |
-
<p><strong>Monster in My Pocket</strong> is a media franchise that was developed by American company Morrison Entertainment Group in the late 1980s and early 1990s. The franchise focused on monsters and fantastical creatures from various sources, such as religion, mythology, folklore, fairy tales, literature, science fiction, and cryptozoology. The franchise produced trading cards, comic books, books, toys, a board game, a video game, and an animated special, along with music, clothing, kites, stickers, and other items.</p>
|
7 |
-
<h3>A brief history of the franchise</h3>
|
8 |
-
<p>The most popular product of the franchise was the toy line, which was released by Matchbox in 1990. It consisted of small, soft plastic figures representing different monsters, each with a point value assigned to them. There were over 200 monsters in the collection, ranging from well-known ones like Dracula, Frankenstein's Monster, and Werewolf, to obscure ones like Catoblepas, Haniver, and Tengu. The toys were initially solid-colored, but later series added more painted colors and details.</p>
|
9 |
-
<p>The toy line also inspired a comic book series that was published by Harvey Comics from 1991 to 1992. The comic book series followed the story of Vampire and Monster (the two main protagonists of the franchise) as they battled against Warlock (the main antagonist) and his army of evil monsters. The comic book series also introduced new characters and monsters that were not part of the toy line.</p>
|
10 |
-
<p>monster in my pocket game apk download for android<br />
|
11 |
-
how to download monster in my pocket game on android<br />
|
12 |
-
monster in my pocket game free download for android devices<br />
|
13 |
-
monster in my pocket game android download full version<br />
|
14 |
-
best monster in my pocket game download for android phones<br />
|
15 |
-
monster in my pocket game download for android tablet<br />
|
16 |
-
monster in my pocket game download for android emulator<br />
|
17 |
-
monster in my pocket game download for android offline<br />
|
18 |
-
monster in my pocket game download for android online<br />
|
19 |
-
monster in my pocket game download for android mod apk<br />
|
20 |
-
monster in my pocket game download for android latest version<br />
|
21 |
-
monster in my pocket game download for android without ads<br />
|
22 |
-
monster in my pocket game download for android with cheats<br />
|
23 |
-
monster in my pocket game download for android no root<br />
|
24 |
-
monster in my pocket game download for android hack<br />
|
25 |
-
monster in my pocket game download for android review<br />
|
26 |
-
monster in my pocket game download for android gameplay<br />
|
27 |
-
monster in my pocket game download for android tips and tricks<br />
|
28 |
-
monster in my pocket game download for android walkthrough<br />
|
29 |
-
monster in my pocket game download for android guide<br />
|
30 |
-
monster in my pocket game download for android features<br />
|
31 |
-
monster in my pocket game download for android requirements<br />
|
32 |
-
monster in my pocket game download for android size<br />
|
33 |
-
monster in my pocket game download for android compatibility<br />
|
34 |
-
monster in my pocket game download for android update<br />
|
35 |
-
monster in my pocket game download for android new version<br />
|
36 |
-
monster in my pocket game download for android old version<br />
|
37 |
-
monster in my pocket game download for android beta version<br />
|
38 |
-
monster in my pocket game download for android demo version<br />
|
39 |
-
monster in my pocket game download for android pro version<br />
|
40 |
-
monster in my pocket game download for android premium version<br />
|
41 |
-
monster in my pocket game download for android deluxe version<br />
|
42 |
-
monster in my pocket game download for android ultimate version<br />
|
43 |
-
monster in my pocket game download for android special edition<br />
|
44 |
-
monster in my pocket game download for android collector's edition<br />
|
45 |
-
monster in my pocket game download for android limited edition<br />
|
46 |
-
monster in my pocket game download for android exclusive edition<br />
|
47 |
-
monster in my pocket game download for android gold edition<br />
|
48 |
-
monster in my pocket game download for android platinum edition<br />
|
49 |
-
monster in my pocket game download for android diamond edition<br />
|
50 |
-
where to find monster in my pocket game download for android<br />
|
51 |
-
where to get monster in my pocket game download for android<br />
|
52 |
-
where to buy monster in my pocket game download for android<br />
|
53 |
-
where to play monster in my pocket game on android<br />
|
54 |
-
how to install monster in my pocket game on android<br />
|
55 |
-
how to uninstall monster in my pocket game on android<br />
|
56 |
-
how to update monster in my pocket game on android<br />
|
57 |
-
how to play monster in my pocket game on android with friends</p>
|
58 |
-
<p>The video game adaptation of the franchise was produced by Konami in 1992 for the NES platform. It was a platformer game that followed the storyline of the comic book series moderately close. It featured Vampire and Monster as playable characters who had to stop Warlock's plan to take over the world. The game had six stages that took place in different locations, such as a house, a kitchen, a sewer, a city, an oriental temple, and a mountain. The game also had various enemies and bosses that were based on the toy figures.</p>
|
59 |
-
<h3>The main features of the game</h3>
|
60 |
-
<p>The game had several features that made it stand out from other platformer games at the time. Some of these features were:</p>
|
61 |
-
<ul>
|
62 |
-
<li>The game had a co-op mode that allowed two players to play together as Vampire and Monster.</li>
|
63 |
-
<li>The game had a double jump mechanic that enabled the characters to jump higher and farther.</li>
|
64 |
-
<li>The game had a unique attack system that allowed the characters to shoot energy blasts from their hands or throw objects at their enemies.</li>
|
65 |
-
<li>The game had a variety of items that could be collected or used throughout the stages, such as keys, screws, health potions, extra lives, and power-ups.</li>
|
66 |
-
<li>The game had a colorful and detailed graphics style that captured the look and feel of the toy line and the comic book series.</li>
|
67 |
-
<li>The game had a catchy and memorable soundtrack that matched the mood and theme of each stage.</li>
|
68 |
-
</ul>
|
69 |
-
<h2>How <h2>How to download and play Monster in My Pocket on Android?</h2>
|
70 |
-
<p>Now that you know what Monster in My Pocket is and why it is such a great game, you might be wondering how you can download and play it on your Android device. Well, the good news is that it is not very difficult to do so, as long as you follow these simple steps:</p>
|
71 |
-
<h3>The steps to download the APK file</h3>
|
72 |
-
<p>The first thing you need to do is to download the APK file of the game, which is a file format that allows you to install and run applications on Android devices. There are many websites that offer APK files of various games, but not all of them are safe and reliable. Therefore, we recommend that you use a trusted and reputable source, such as [APKPure] or [APKMirror]. Here are the steps to download the APK file of Monster in My Pocket from APKPure:</p>
|
73 |
-
<ol>
|
74 |
-
<li>Go to the [APKPure website] and search for "Monster in My Pocket" in the search bar.</li>
|
75 |
-
<li>Select the game from the list of results and click on the "Download APK" button.</li>
|
76 |
-
<li>Wait for the download to finish and locate the file in your device's storage.</li>
|
77 |
-
</ol>
|
78 |
-
<h3>The steps to install and run the game</h3>
|
79 |
-
<p>The next thing you need to do is to install and run the game on your device. However, before you do that, you need to make sure that your device allows the installation of apps from unknown sources, which are sources other than the Google Play Store. To do that, you need to follow these steps:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Go to your device's settings and look for the option "Security" or "Privacy".</li>
|
82 |
-
<li>Tap on it and find the option "Unknown sources" or "Install unknown apps".</li>
|
83 |
-
<li>Enable it by toggling the switch or checking the box.</li>
|
84 |
-
</ol>
|
85 |
-
<p>Once you have done that, you can proceed to install and run the game by following these steps:</p>
|
86 |
-
<ol>
|
87 |
-
<li>Go to your device's file manager and locate the APK file of Monster in My Pocket that you downloaded earlier.</li>
|
88 |
-
<li>Tap on it and follow the instructions on the screen to install the game.</li>
|
89 |
-
<li>Wait for the installation to finish and look for the game's icon on your device's home screen or app drawer.</li>
|
90 |
-
<li>Tap on it and enjoy playing Monster in My Pocket on your Android device.</li>
|
91 |
-
</ol>
|
92 |
-
<h3>The tips and tricks to enjoy the game</h3>
|
93 |
-
<p>To make the most out of your gaming experience, here are some tips and tricks that you can use while playing Monster in My Pocket:</p>
|
94 |
-
<ul>
|
95 |
-
<li>Choose your character wisely. Vampire and Monster have different abilities and weaknesses. Vampire can fly for a short time and shoot energy blasts, but he is weak against fire. Monster can throw objects and has more health, but he is slow and cannot fly.</li>
|
96 |
-
<li>Use your double jump wisely. You can use it to reach higher places, avoid obstacles, or dodge enemies. However, you cannot use it again until you land on a solid surface.</li>
|
97 |
-
<li>Collect as many items as you can. They can help you in various ways, such as restoring your health, increasing your score, or giving you extra lives or power-ups.</li>
|
98 |
-
<li>Be careful with your power-ups. They can give you an edge over your enemies, but they also have drawbacks. For example, the fireball power-up lets you shoot fireballs, but it also makes you vulnerable to water. The ice power-up lets you freeze enemies, but it also makes you vulnerable to fire.</li>
|
99 |
-
<li>Explore every stage thoroughly. You might find hidden areas, secret passages, or bonus rooms that contain more items or enemies.</li>
|
100 |
-
<li>Watch out for environmental hazards. There are many things that can harm you in each stage, such as spikes, flames, water, electricity, or falling objects. Be alert and avoid them as much as possible.</li>
|
101 |
-
</ul>
|
102 |
-
<h2>Why should you play Monster in My Pocket on Android?</h2>
|
103 |
-
<p>You might be wondering why you should play Monster in My Pocket on Android when there are so many other games available for this platform. Well, there are many reasons why this game is worth playing on Android, such as:</p>
|
104 |
-
<h3>The benefits of playing on a mobile device</h3>
|
105 |
-
<p>Playing Monster in My Pocket on Android has several advantages over playing it on other platforms, such as:</p>
|
106 |
-
<ul>
|
107 |
-
<li>You can play it anytime and anywhere. You don't need a TV, a console, or a controller. All you need is your Android device and an internet connection.</li>
|
108 |
-
<li>You can play it with You can play it with your friends or family. You can use the co-op mode to team up with another player on the same device or on different devices. You can also compete with other players online or offline.</li>
|
109 |
-
<li>You can customize your gaming experience. You can adjust the settings, such as the sound, the graphics, the controls, or the difficulty. You can also use cheats or mods to enhance your gameplay.</li>
|
110 |
-
</ul>
|
111 |
-
<h3>The challenges and fun of the game</h3>
|
112 |
-
<p>Playing Monster in My Pocket on Android is also challenging and fun, because:</p>
|
113 |
-
<ul>
|
114 |
-
<li>The game has a high replay value. You can play it multiple times and discover new things, such as different paths, secrets, or endings. You can also try to beat your own records or achievements.</li>
|
115 |
-
<li>The game has a retro appeal. You can enjoy the nostalgic and charming graphics, music, and gameplay of the original NES game. You can also appreciate the creativity and diversity of the monster designs and personalities.</li>
|
116 |
-
<li>The game has a humorous tone. You can laugh at the funny and witty dialogues, situations, and references that the game offers. You can also have fun with the absurd and exaggerated scenarios that the game presents.</li>
|
117 |
-
</ul>
|
118 |
-
<h3>The nostalgia and charm of the game</h3>
|
119 |
-
<p>Playing Monster in My Pocket on Android is also nostalgic and charming, because:</p>
|
120 |
-
<ul>
|
121 |
-
<li>The game reminds you of your childhood memories. You can relive the joy and excitement of collecting and playing with the toy figures. You can also recall the stories and adventures that you imagined or read about them.</li>
|
122 |
-
<li>The game connects you with other fans of the franchise. You can share your opinions and experiences with other people who love Monster in My Pocket. You can also learn more about the franchise and its history and legacy.</li>
|
123 |
-
<li>The game inspires you to explore more monsters and myths. You can learn more about the origins and backgrounds of the monsters that appear in the game. You can also discover new monsters and myths that you never knew before.</li>
|
124 |
-
</ul>
|
125 |
-
<h2>Conclusion</h2>
|
126 |
-
<p>In conclusion, Monster in My Pocket is a classic NES game that is now available for Android devices. It is a platformer game that features monsters and fantastical creatures from various sources. It has several features that make it stand out from other platformer games, such as co-op mode, double jump mechanic, unique attack system, variety of items, colorful graphics, and catchy soundtrack. It also has several benefits, challenges, fun, nostalgia, and charm that make it worth playing on Android devices. If you are a fan of monsters and platform games, you should definitely download and play Monster in My Pocket on your Android device today.</p>
|
127 |
-
<h3>A call to action for the readers</h3>
|
128 |
-
<p>If you are interested in playing Monster in My Pocket on your Android device, you can download it from [APKPure] or [APKMirror] by following the steps that we explained above. You can also check out other sources that offer APK files of this game, but make sure that they are safe and reliable. Once you have downloaded and installed the game, you can start playing it right away and enjoy its features, benefits, challenges, fun, nostalgia, and charm.</p>
|
129 |
-
<h3>FAQs</h3>
|
130 |
-
<p>Here are some frequently asked questions about Monster in My Pocket:</p>
|
131 |
-
<ol>
|
132 |
-
<li>Q: Is Monster in My Pocket free to play on Android?<br>A: Yes, Monster in My Pocket is free to play on Android devices. However, some websites may require you to register or complete surveys before downloading the APK file of the game.</li>
|
133 |
-
<li>Q: Is Monster in My Pocket compatible with all Android devices?<br>A: No, Monster in My Pocket may not be compatible with some Android devices due to different hardware specifications or software versions. If you encounter any problems while playing the game on your device, you may need to update your device's system or use an emulator.</li>
|
134 |
-
<li>Q: Is Monster in My Pocket safe to play on Android?<br>A: Yes, Monster in My Pocket is safe to play on Android devices as long as you download it from a trusted and reputable source, such as [APKPure] or [APKMirror]. However, you should always be careful when downloading any APK file from unknown sources, as they may contain viruses or malware that could harm your device or steal your data.</li>
|
135 |
-
<li>Q: Is Monster in My Pocket still popular today?<br>A: Yes, Monster in My Pocket still has a loyal fan base today who appreciate its retro appeal and nostalgic charm. The franchise also has a cult following among collectors who seek rare or exclusive items related to it.</li>
|
136 |
-
<li>Q: Are there any other games like Monster in My Pocket?<br>A: Yes, there are many other games like Monster in My Pocket that feature monsters and platform games, such as: - Castlevania: A series of games that feature vampire hunters and other supernatural creatures in a Gothic setting. - Ghosts 'n Goblins: A series of games that feature a knight who has to rescue his princess from demons and undead. - Little Nemo: The Dream Master: A game that features a boy who can enter the dream world and transform into different animals. - Kid Dracula: A game that features a young vampire who has to defeat his father's enemies and reclaim his throne. - Monster Party: A game that features a boy who teams up with a monster to fight against bizarre and grotesque enemies.</li>
|
137 |
-
</ol>
|
138 |
-
<p>I hope you enjoyed reading this article and learned something new about Monster in My Pocket. If you have any questions or comments, feel free to leave them below. Thank you for your time and attention.</p> 197e85843d<br />
|
139 |
-
<br />
|
140 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Spider-Man 2000 APK - The Classic Web-Slinging Adventure on Android.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Spider-Man (2000 Video Game) Download APK: How to Play the Classic Game on Your Android Device</h1>
|
3 |
-
<p>If you are a fan of Spider-Man, you might remember the classic video game that was released in 2000 for PlayStation, Nintendo 64, Dreamcast, PC, and Game Boy Color. The game was based on the comic book series and featured an original story that involved Spider-Man fighting against various villains such as Venom, Carnage, Doctor Octopus, Mysterio, Rhino, Scorpion, and more. The game was praised for its graphics, gameplay, voice acting, and faithful adaptation of the Spider-Man universe.</p>
|
4 |
-
<h2>spider-man (2000 video game) download apk</h2><br /><p><b><b>Download File</b> ⚙⚙⚙ <a href="https://jinyurl.com/2uNNwV">https://jinyurl.com/2uNNwV</a></b></p><br /><br />
|
5 |
-
<p>But what if you want to play this game in 2023 on your Android device? Is it possible to download and install an apk file that will let you enjoy this classic game on your smartphone or tablet? The answer is yes, but there are some things you need to know before you do so. In this article, we will show you how to download and install Spider-Man (2000 video game) apk on your Android device, what are the features and gameplay of this game, what are the pros and cons of playing it on your mobile device, and whether it is worth playing in 2023.</p>
|
6 |
-
<h2>How to download and install Spider-Man (2000 video game) apk on your Android device?</h2>
|
7 |
-
<p>The first thing you need to do is to find a reliable source that offers the apk file for Spider-Man (2000 video game). There are many websites that claim to provide this file, but not all of them are trustworthy or safe. Some of them might contain malware or viruses that can harm your device or steal your personal information. Therefore, you need to be careful and do some research before you download anything from an unknown source.</p>
|
8 |
-
<p>One of the websites that we recommend is APKCombo, which is a reputable platform that offers free apk files for various Android games and apps. You can visit their website and search for "Spider-Man 2", which is the name of the apk file for Spider-Man (2000 video game). You will see a page that shows you some information about the file, such as its size, version, developer, rating, and screenshots. You can also read some reviews from other users who have downloaded and played the game. If you are satisfied with the file, you can click on the "Download APK" button and save the file to your device.</p>
|
9 |
-
<p>The next thing you need to do is to install the apk file on your device. Before you do that, you need to enable the "Unknown sources" option in your device's settings. This will allow you to install apps that are not from the official Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and toggle it on. You might see a warning message that tells you about the risks of installing apps from unknown sources, but you can ignore it if you trust the source of the apk file.</p>
|
10 |
-
<p>spider-man 2000 game apk free download<br />
|
11 |
-
spider-man 2000 android game download<br />
|
12 |
-
spider-man 2000 game download for mobile<br />
|
13 |
-
spider-man 2000 game apk mod<br />
|
14 |
-
spider-man 2000 game apk obb<br />
|
15 |
-
spider-man 2000 game apk offline<br />
|
16 |
-
spider-man 2000 game apk data<br />
|
17 |
-
spider-man 2000 game apk full version<br />
|
18 |
-
spider-man 2000 game apk highly compressed<br />
|
19 |
-
spider-man 2000 game apk latest version<br />
|
20 |
-
spider-man 2000 game download for android<br />
|
21 |
-
spider-man 2000 game download for pc<br />
|
22 |
-
spider-man 2000 game download for windows 10<br />
|
23 |
-
spider-man 2000 game download for windows 7<br />
|
24 |
-
spider-man 2000 game download for laptop<br />
|
25 |
-
spider-man 2000 game download for mac<br />
|
26 |
-
spider-man 2000 game download for ios<br />
|
27 |
-
spider-man 2000 game download for ps4<br />
|
28 |
-
spider-man 2000 game download for xbox one<br />
|
29 |
-
spider-man 2000 game download for psp<br />
|
30 |
-
how to download spider-man 2000 game apk<br />
|
31 |
-
how to install spider-man 2000 game apk<br />
|
32 |
-
how to play spider-man 2000 game apk<br />
|
33 |
-
how to run spider-man 2000 game apk<br />
|
34 |
-
how to update spider-man 2000 game apk<br />
|
35 |
-
where to download spider-man 2000 game apk<br />
|
36 |
-
where to find spider-man 2000 game apk<br />
|
37 |
-
where to get spider-man 2000 game apk<br />
|
38 |
-
where to buy spider-man 2000 game apk<br />
|
39 |
-
where to stream spider-man 2000 game apk<br />
|
40 |
-
best site to download spider-man 2000 game apk<br />
|
41 |
-
best app to download spider-man 2000 game apk<br />
|
42 |
-
best way to download spider-man 2000 game apk<br />
|
43 |
-
best source to download spider-man 2000 game apk<br />
|
44 |
-
best alternative to download spider-man 2000 game apk<br />
|
45 |
-
is it safe to download spider-man 2000 game apk<br />
|
46 |
-
is it legal to download spider-man 2000 game apk<br />
|
47 |
-
is it possible to download spider-man 2000 game apk<br />
|
48 |
-
is it easy to download spider-man 2000 game apk<br />
|
49 |
-
is it worth to download spider-man 2000 game apk<br />
|
50 |
-
reviews of spider-man 2000 game apk download<br />
|
51 |
-
ratings of spider-man 2000 game apk download<br />
|
52 |
-
feedback of spider-man 2000 game apk download<br />
|
53 |
-
testimonials of spider-man 2000 game apk download<br />
|
54 |
-
benefits of spider-man 2000 game apk download<br />
|
55 |
-
features of spider-man 2000 game apk download<br />
|
56 |
-
tips and tricks of spider-man 2000 game apk download<br />
|
57 |
-
guide and tutorial of spider-man 2000 game apk download<br />
|
58 |
-
walkthrough and gameplay of spider-man 2000 game apk download</p>
|
59 |
-
<p>Once you have enabled the unknown sources option, you can go to your device's file manager and locate the apk file that you have downloaded. Tap on it and follow the instructions on the screen to install it. You might see some permissions requests that ask you to allow the app to access your device's storage, camera, microphone, etc. You can grant these permissions if you want to enjoy the full features of the game, or deny them if you are concerned about your privacy. After the installation is complete, you will see an icon for Spider-Man 2 on your device's home screen or app drawer. Tap on it and start playing!</p>
|
60 |
-
<h2>What are the features and gameplay of Spider-Man (2000 video game)?</h2>
|
61 |
-
<p>Spider-Man (2000 video game) is an action-adventure game that lets you control Spider-Man as he swings, crawls, fights, and explores New York City. The game has a third-person perspective and uses a combination of 3D graphics and 2D sprites. The game also features voice acting from some of the actors who played Spider-Man and his allies and enemies in various animated series, such as Rino Romano, Jennifer Hale, Dee Bradley Baker, Daran Norris, and Mark Hamill.</p>
|
62 |
-
<p>The game has four modes: training, story, what if?, and gallery. The training mode teaches you the basic controls and moves of Spider-Man, such as web swinging, wall crawling, web shooting, punching, kicking, etc. The story mode follows the main plot of the game, which involves Spider-Man being framed for a bank robbery by a mysterious imposter and having to clear his name while facing various threats from his enemies. The what if? mode is a variation of the story mode that changes some of the events and outcomes of the game based on different choices and actions. The gallery mode lets you view various artworks, comic book covers, character bios, and cheats that you can unlock by playing the game.</p>
|
63 |
-
<p>The gameplay of Spider-Man (2000 video game) is divided into levels that have different objectives and challenges. Some of the levels require you to reach a certain destination, while others require you to defeat a certain number of enemies or a boss. Some of the levels also have optional objectives that can reward you with bonus points or items. The game also has some stealth elements that require you to avoid detection by enemies or cameras. The game also has some puzzle elements that require you to use your webbing or other items to solve problems or access hidden areas.</p>
|
64 |
-
<p>The game also has a variety of villains that Spider-Man has to face throughout the game. Some of them are classic foes from the comic books, such as Venom, Carnage, Doctor Octopus, Mysterio, Rhino, Scorpion, Lizard, Electro, Sandman, and Vulture. Some of them are original creations for the game, such as Monster Ock, a fusion of Doctor Octopus and Carnage, and the Spider-Slayers, robotic enemies that hunt Spider-Man. The game also has some allies that help Spider-Man along the way, such as Black Cat, Daredevil, Captain America, Human Torch, and Punisher.</p>
|
65 |
-
<p>The game also has some easter eggs that reference other Marvel characters and events, such as the Fantastic Four, Iron Man, Thor, Hulk, X-Men, Blade, Ghost Rider, and more. Some of these easter eggs can be found by exploring the levels or using certain cheats. For example, if you enter the cheat code "GBHSRSPM", you can play as Spider-Man wearing a Fantastic Four costume with a paper bag over his head, which is a reference to an issue of the comic book where Spider-Man did the same thing.</p>
|
66 |
-
<h2>What are the pros and cons of playing Spider-Man (2000 video game) on your Android device?</h2>
|
67 |
-
<p>Playing Spider-Man (2000 video game) on your Android device can have some advantages and disadvantages compared to playing it on a console or a PC. Here are some of them:</p>
|
68 |
-
<h3>Pros</h3>
|
69 |
-
<ul>
|
70 |
-
<li>You can play the game anytime and anywhere you want, as long as you have your device with you.</li>
|
71 |
-
<li>You can enjoy the game on a larger screen if you have a tablet or a device that supports screen mirroring or casting.</li>
|
72 |
-
<li>You can customize the game's settings and controls to suit your preferences and device's specifications.</li>
|
73 |
-
<li>You can save your progress and resume it later without losing any data.</li>
|
74 |
-
<li>You can access some cheats and mods that can enhance your gaming experience or make it more fun.</li>
|
75 |
-
</ul>
|
76 |
-
<h3>Cons</h3>
|
77 |
-
<ul>
|
78 |
-
<li>You might experience some lagging, crashing, or compatibility issues depending on your device's model, operating system, or memory.</li>
|
79 |
-
<li>You might drain your device's battery faster if you play the game for a long time or at a high brightness level.</li>
|
80 |
-
<li>You might use up your device's storage space if you download a large apk file or additional data for the game.</li>
|
81 |
-
<li>You might expose your device to malware or viruses if you download an apk file from an untrusted source or grant unnecessary permissions to the app.</li>
|
82 |
-
<li>You might violate some intellectual property rights or terms of service if you download an apk file from an unofficial source or modify the game's content without permission.</li>
|
83 |
-
</ul>
|
84 |
-
<h2>Conclusion: Is Spider-Man (2000 video game) worth playing in 2023?</h2>
|
85 |
-
<p>Spider-Man (2000 video game) is a classic game that many Spider-Man fans and gamers still love and enjoy today. The game has a captivating story, engaging gameplay, impressive graphics, and memorable voice acting. The game also has a lot of features and easter eggs that make it fun and rewarding to play. The game is also compatible with Android devices, which means you can play it on your smartphone or tablet with ease.</p>
|
86 |
-
<p>However, playing Spider-Man (2000 video game) on your Android device also has some drawbacks that you need to consider. The game might not run smoothly or properly on some devices due to technical issues or limitations. The game might also consume a lot of your device's battery, storage, or data. The game might also pose some risks to your device's security or privacy if you download an apk file from an unreliable source or grant unnecessary permissions to the app. The game might also infringe some legal or ethical rules if you download an apk file from an unofficial source or alter the game's content without authorization.</p>
|
87 |
-
<p>Therefore, whether Spider-Man (2000 video game) is worth playing in 2023 depends on your personal preference and judgment. If you are a fan of Spider-Man or retro games, and you have a compatible and secure device, you might enjoy playing this game and reliving its nostalgia. However, if you are not interested in Spider-Man or old games, or you have an incompatible or unsafe device, you might not like playing this game and find it outdated or boring.</p>
|
88 |
-
<p>The choice is yours. If you want to try out Spider-Man (2000 video game) on your Android device, you can follow the steps we have provided in this article to download and install the apk file from APKCombo. If you have any questions or feedback about this game or this article, feel free to leave a comment below. We would love to hear from you!</p>
|
89 |
-
<h2>FAQs</h2>
|
90 |
-
<h4>Q: What is the difference between Spider-Man (2000 video game) and Spider-Man 2: Enter Electro?</h4> A: Spider-Man 2: Enter Electro is the sequel to Spider-Man (2000 video game) that was released in 2001 for PlayStation and PC. The game follows a new story that involves Spider-Man trying to stop Electro from obtaining a powerful device that can amplify his powers. The game has some improvements and additions over the first game, such as new moves, costumes, levels, enemies, and bosses. However, the game also has some drawbacks, such as lower graphics quality, shorter gameplay, and less voice acting. <h4>Q: How can I play Spider-Man (2000 video game) on other devices besides Android?</h4>
|
91 |
-
A: If you want to play Spider-Man (2000 video game) on other devices, you have a few options. You can play the original version of the game on PlayStation, Nintendo 64, Dreamcast, PC, or Game Boy Color if you have these consoles or emulators. You can also play a remastered version of the game on PlayStation 3 or Xbox 360 if you have these consoles or emulators. You can also play a ported version of the game on iOS or Windows Phone if you have these devices or emulators. <h4>Q: What are some of the cheats and mods that I can use for Spider-Man (2000 video game)?</h4>
|
92 |
-
A: There are many cheats and mods that you can use for Spider-Man (2000 video game) to make it more fun or challenging. Some of the cheats are codes that you can enter in the main menu or during the game to unlock various features, such as costumes, levels, characters, abilities, etc. Some of the mods are files that you can download and install on your device to change some aspects of the game, such as graphics, sound, gameplay, etc. You can find some of the cheats and mods online from various sources, such as YouTube, Reddit, APKPure, etc. <h4>Q: What are some of the best Spider-Man games that I can play in 2023?</h4>
|
93 |
-
A: There are many Spider-Man games that you can play in 2023 that are based on different versions of Spider-Man from different media, such as comics, movies, cartoons, etc. Some of the best Spider-Man games that we recommend are: - Spider-Man: Miles Morales (2020): A spin-off of Spider-Man (2018) that follows Miles Morales as he becomes the new Spider-Man and faces new threats in New York City. - Marvel's Spider-Man (2018): A critically acclaimed game that features an original story and gameplay that lets you explore a realistic and dynamic open-world New York City as Spider-Man. - Spider-Man: Shattered Dimensions (2010): A unique game that lets you play as four different versions of Spider-Man from different dimensions and timelines, each with their own abilities and styles. - Ultimate Spider-Man (2005): A comic book-inspired game that lets you play as both Spider-Man and Venom in a cel-shaded world that follows the Ultimate Marvel storyline. - Spider-Man 2 (2004): A movie-based game that lets you swing freely around a large and detailed New York City as Spider-Man and face various villains and challenges. <h4>Q: How can I learn more about Spider-Man and his universe?</h4>
|
94 |
-
A: If you are interested in learning more about Spider-Man and his universe, there are many sources that you can check out. You can read some of the comic books that feature Spider-Man and his allies and enemies from different eras and genres. You can watch some of the movies or shows that adapt or expand on Spider-Man's stories and characters from different perspectives and styles. You can also visit some of the websites or forums that discuss or analyze Spider-Man's lore and trivia from various angles and viewpoints.</p> 197e85843d<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FIFA Mobile 22 Hack How to Unlock All Players Kits and Stadiums for Free.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download FIFA Mobile 22 Hack: How to Get Unlimited Coins and Gems</h1>
|
3 |
-
<p>FIFA Mobile 22 is one of the most popular and exciting soccer games on mobile devices. It allows you to build your ultimate team with over 15,000 authentic players from more than 600 clubs, compete in various modes such as World Cup, Champions League, Manager Mode, or Head-to-Head, and enjoy realistic graphics and gameplay. However, FIFA Mobile 22 also has some challenges and limitations that might frustrate some players. For example, you need coins and gems to buy players, upgrade your team, unlock features, or access premium content. Coins and gems are not easy to come by in the game, especially if you are a free-to-play user. You might have to spend a lot of time, effort, or even real money to get enough coins and gems for your needs.</p>
|
4 |
-
<h2>download fifa mobile 22 hack</h2><br /><p><b><b>Download Zip</b> ○○○ <a href="https://jinyurl.com/2uNLEr">https://jinyurl.com/2uNLEr</a></b></p><br /><br />
|
5 |
-
<p>That's why some players might want to hack FIFA Mobile 22 to get unlimited coins and gems. By hacking FIFA Mobile 22, you can bypass the restrictions and enjoy the game without any hassle. You can buy any player you want, upgrade your team to the max level, unlock all the features and events, or access any premium content you desire. Sounds tempting, right? But before you rush to download FIFA Mobile 22 hack, you should be aware of the risks and consequences of doing so. In this article, we will explain what are the dangers of hacking FIFA Mobile 22, how to download FIFA Mobile 22 hack safely and easily, and what are some alternative ways to get coins and gems without hacking.</p>
|
6 |
-
<h2>Disclaimer: The risks and consequences of hacking FIFA Mobile 22</h2>
|
7 |
-
<p>Before we proceed, we have to warn you about the potential dangers of hacking FIFA Mobile 22. Hacking is an illegal and unethical activity that violates the terms of service of EA Sports, the developer of FIFA Mobile 22. If you hack FIFA Mobile 22, you might face some serious consequences, such as:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Getting banned from the game permanently or temporarily</li>
|
10 |
-
<li>Losing your progress, data, or account</li>
|
11 |
-
<li>Exposing your personal information or device to malware or hackers</li>
|
12 |
-
<li>Getting sued by EA Sports for damages or losses</li>
|
13 |
-
</ul>
|
14 |
-
<p <p>Therefore, we advise you to proceed at your own risk and responsibility if you decide to hack FIFA Mobile 22. We also suggest you to use a reliable and trusted source for downloading FIFA Mobile 22 hack, such as the one we will provide in the next section. Do not download FIFA Mobile 22 hack from unknown or suspicious websites, as they might contain viruses or malware that can harm your device or steal your information.</p>
|
15 |
-
<h2>How to download FIFA Mobile 22 hack: A step-by-step guide</h2>
|
16 |
-
<p>If you are still interested in hacking FIFA Mobile 22, here is a step-by-step guide on how to download FIFA Mobile 22 hack from a reputable website. Follow these instructions carefully and you will be able to get unlimited coins and gems in no time.</p>
|
17 |
-
<p>download fifa mobile 22 mod apk unlimited money<br />
|
18 |
-
download fifa mobile 22 cheat engine<br />
|
19 |
-
download fifa mobile 22 hack tool<br />
|
20 |
-
download fifa mobile 22 mod menu<br />
|
21 |
-
download fifa mobile 22 hack ios<br />
|
22 |
-
download fifa mobile 22 hack android<br />
|
23 |
-
download fifa mobile 22 hack no verification<br />
|
24 |
-
download fifa mobile 22 hack online<br />
|
25 |
-
download fifa mobile 22 hack generator<br />
|
26 |
-
download fifa mobile 22 hack apk obb<br />
|
27 |
-
download fifa mobile 22 mod apk latest version<br />
|
28 |
-
download fifa mobile 22 cheat codes<br />
|
29 |
-
download fifa mobile 22 hack for pc<br />
|
30 |
-
download fifa mobile 22 mod apk revdl<br />
|
31 |
-
download fifa mobile 22 hack without human verification<br />
|
32 |
-
download fifa mobile 22 hack coins and points<br />
|
33 |
-
download fifa mobile 22 mod apk data<br />
|
34 |
-
download fifa mobile 22 cheat sheet<br />
|
35 |
-
download fifa mobile 22 hack app<br />
|
36 |
-
download fifa mobile 22 mod apk offline<br />
|
37 |
-
download fifa mobile 22 hack no survey<br />
|
38 |
-
download fifa mobile 22 hack free<br />
|
39 |
-
download fifa mobile 22 hack unlimited everything<br />
|
40 |
-
download fifa mobile 22 mod apk rexdl<br />
|
41 |
-
download fifa mobile 22 cheat app<br />
|
42 |
-
download fifa mobile 22 hack for ios no jailbreak<br />
|
43 |
-
download fifa mobile 22 mod apk android 1<br />
|
44 |
-
download fifa mobile 22 cheat mod apk<br />
|
45 |
-
download fifa mobile 22 hack version<br />
|
46 |
-
download fifa mobile 22 mod apk happymod<br />
|
47 |
-
download fifa mobile 22 hack for android no root<br />
|
48 |
-
download fifa mobile 22 mod apk pure<br />
|
49 |
-
download fifa mobile 22 cheat engine apk<br />
|
50 |
-
download fifa mobile 22 hack file<br />
|
51 |
-
download fifa mobile 22 mod apk full unlocked<br />
|
52 |
-
download fifa mobile 22 hack with lucky patcher<br />
|
53 |
-
download fifa mobile 22 mod apk unlimited coins and points<br />
|
54 |
-
download fifa mobile 22 cheat tool apk<br />
|
55 |
-
download fifa mobile 22 hack by game guardian<br />
|
56 |
-
download fifa mobile 22 mod apk mega mod</p>
|
57 |
-
<ol>
|
58 |
-
<li>Go to the website [text], which is one of the best and safest sources for downloading FIFA Mobile 22 hack. You can access the website from any browser or device.</li>
|
59 |
-
<li>On the homepage, you will see a button that says "Download FIFA Mobile 22 Hack". Click on it and you will be redirected to a verification page.</li>
|
60 |
-
<li>On the verification page, you will have to complete a short and simple survey or offer to prove that you are a human and not a bot. This is a necessary step to prevent abuse and ensure the quality of the service. The survey or offer will only take a few minutes and will not cost you anything.</li>
|
61 |
-
<li>After completing the verification, you will be able to download FIFA Mobile 22 hack as an APK file. Save the file on your device and locate it using a file manager.</li>
|
62 |
-
<li>Before installing the APK file, make sure that you have enabled the "Unknown Sources" option in your device settings. This will allow you to install apps from sources other than the Google Play Store.</li>
|
63 |
-
<li>Tap on the APK file and follow the instructions on the screen to install FIFA Mobile 22 hack on your device. You might have to grant some permissions to the app for it to work properly.</li>
|
64 |
-
<li>Once the installation is done, you can launch FIFA Mobile 22 hack from your app drawer or home screen. You will see a user-friendly interface that will let you customize your preferences and settings.</li>
|
65 |
-
<li>Enter the amount of coins and gems that you want to generate and click on the "Start Hack" button. The hack will start working and inject the resources into your game account.</li>
|
66 |
-
<li>Wait for a few seconds or minutes until the hack is finished. You will see a confirmation message on the screen when it is done.</li>
|
67 |
-
<li>Open FIFA Mobile 22 and enjoy your unlimited coins and gems. You can use them to buy players, upgrade your team, unlock features, or access premium content as much as you want.</li>
|
68 |
-
</ol>
|
69 |
-
<p>Congratulations! You have successfully hacked FIFA Mobile 22 and got unlimited coins and gems. You can now enjoy the game without any limitations or restrictions. However, if you are not comfortable with hacking FIFA Mobile 22 or want to try some alternative ways to get coins and gems without hacking, keep reading.</p>
|
70 |
-
<h2>Alternative ways to get coins and gems without hacking</h2>
|
71 |
-
<p>Hacking FIFA Mobile 22 is not the only way to get coins and gems in the game. There are some legitimate and safe methods that you can use to earn coins and gems without breaking any rules or risking any consequences. Here are some of them:</p>
|
72 |
-
<table>
|
73 |
-
<tr><th>Method</th><th>Description</th><th>Benefits</th><th>Drawbacks</th></tr>
|
74 |
-
<tr><td>Completing tasks</td><td>FIFA Mobile 22 offers various tasks that you can complete to earn coins and gems. These tasks include daily, weekly, monthly, seasonal, or special tasks that require you to perform certain actions or achieve certain goals in the game.</td><td>- Easy and simple - Rewarding and satisfying - Diverse and varied</td><td>- Time-consuming - Repetitive - Limited</td></tr>
|
75 |
-
<tr><td>Participating in events</td><td>FIFA Mobile 22 also features various events that you can participate in to earn coins and gems. These events include World Cup, Champions League, Manager Mode, Head-to-Head, or other themed events that offer different challenges and rewards.</td><td>- Fun and exciting - Competitive and challenging - Generous and lucrative</td><td>- Difficult - Demanding - Seasonal</td></tr>
|
76 |
-
<tr><td>Achieving achievements</td><td>FIFA Mobile 22 has a list of achievements that you can achieve to earn coins and gems. These achievements include milestones, records, feats, or accomplishments that reflect your progress and performance in the game.</td><td>- Motivating and inspiring - Reflective and rewarding - Incremental and cumulative</td><td>- Hard and rare - Fixed and finite - Hidden and obscure</td></tr>
|
77 |
-
<tr><td>Watching ads</td><td>FIFA Mobile 22 allows you to watch ads to earn coins and gems. These ads are usually short and relevant to the game or your interests. You can watch ads from the store, the rewards center, or the events page.</td><td>- Quick and easy - Free and unlimited - Optional and voluntary</td><td>- Boring and annoying - Low and variable - Intrusive and distracting</td></tr>
|
78 |
-
</table>
|
79 |
-
<p>As you can see, there are some pros and cons of each method. You can choose the one that suits your preferences, goals, and playstyle. You can also combine different methods to maximize your coin and gem income. Here are some tips and tricks on how to optimize your coin and gem income in FIFA Mobile 22:</p>
|
80 |
-
<ul>
|
81 |
-
<li>Play the game regularly and complete the daily tasks every day. They are the easiest and most consistent way to earn coins and gems.</li>
|
82 |
-
<li>Participate in the events that match your skill level and team strength. They are the most fun and rewarding way to earn coins and gems.</li>
|
83 |
-
<li>Achieve the achievements that are within your reach and match your playstyle. They are the most motivating and reflective way to earn coins and gems.</li>
|
84 |
-
<li>Watch ads when you have some spare time or need some extra coins or gems. They are the quickest and easiest way to earn coins and gems.</li>
|
85 |
-
<li>Save your coins and gems for the players, features, or content that you really want or need. Do not waste them on unnecessary or impulsive purchases.</li>
|
86 |
-
</ul>
|
87 |
-
<h2>Conclusion: Summarize the main points and give a final verdict</h2>
|
88 |
-
<p>In conclusion, FIFA Mobile 22 is a great soccer game that offers a lot of fun and excitement for mobile gamers. However, it also has some challenges and limitations that might make some players want to hack it to get unlimited coins and gems. In this article, we have explained what are the risks and consequences of hacking FIFA Mobile 22, how to download FIFA Mobile 22 hack safely and easily, and what are some alternative ways to get coins and gems without hacking.</p>
|
89 |
-
<p>Our final verdict is that hacking FIFA Mobile 22 is not worth it. It is illegal, unethical, risky, and unnecessary. You might end up losing more than you gain by hacking FIFA Mobile 22. You might lose your account, your progress, your data, or even your device. You might also lose the fun, challenge, satisfaction, and integrity of playing FIFA Mobile 22.</p>
|
90 |
-
<p>We recommend you to play FIFA Mobile 22 without hacking it. You can still enjoy the game without unlimited coins and gems. You can still earn enough coins and gems by playing the game legitimately and safely. You can still build your ultimate team, compete in various modes, and enjoy realistic graphics and gameplay.</p>
|
91 |
-
<p>We hope you found this article helpful and informative. If you have any questions, comments, or feedback, please feel free to share them in the comments section below. We would love to hear from you. Thank you for reading!</p>
|
92 |
-
<h3>FAQs</h3>
|
93 |
-
<ol>
|
94 |
-
<li>Q: Is FIFA Mobile 22 hack safe to use?<br>A: No, FIFA Mobile 22 hack is not safe to use. It might contain viruses or malware that can harm your device or steal your information. It might also get you banned from the game or sued by EA Sports.</li>
|
95 |
-
<li>Q: Is FIFA Mobile 22 hack free to download?<br>A: Yes, FIFA Mobile 22 hack is free to download from some websites. However, you might have to complete a verification process before downloading it. You might also have to pay for some features or updates of the hack.</li>
|
96 |
-
<li>Q: Is FIFA Mobile 22 hack compatible with all devices?<br>A: No, FIFA Mobile 22 hack is not compatible with all devices. It might only work on certain devices or operating systems. It might also require root or jailbreak access for some devices.</li>
|
97 |
-
<li>Q: Is FIFA Mobile 22 hack legal to use?<br>A: No, FIFA Mobile 22 hack is not legal to use. It violates the terms of service of EA Sports, the developer of FIFA Mobile 22. It also infringes on their intellectual property rights.</li>
|
98 |
-
<li>Q: Is FIFA Mobile 22 hack ethical to use?<br>A: No, FIFA Mobile 22 hack is not ethical to use. It gives you an unfair advantage over other players who play the game without hacking it. It also ruins the fun, challenge, satisfaction, and integrity of playing FIFA Mobile 22.</li>
|
99 |
-
</ol></p> 197e85843d<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/utility/connect_base64_waves.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
import base64
|
2 |
-
import io
|
3 |
-
from typing import List, Tuple
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import soundfile
|
7 |
-
from scipy.signal import resample
|
8 |
-
|
9 |
-
|
10 |
-
class ConnectBase64WavesException(Exception):
|
11 |
-
def __init__(self, message: str):
|
12 |
-
self.message = message
|
13 |
-
|
14 |
-
|
15 |
-
def decode_base64_waves(waves: List[str]) -> List[Tuple[np.ndarray, int]]:
|
16 |
-
"""
|
17 |
-
base64エンコードされた複数のwavデータをデコードする
|
18 |
-
Parameters
|
19 |
-
----------
|
20 |
-
waves: list[str]
|
21 |
-
base64エンコードされたwavデータのリスト
|
22 |
-
Returns
|
23 |
-
-------
|
24 |
-
waves_nparray_sr: List[Tuple[np.ndarray, int]]
|
25 |
-
(NumPy配列の音声波形データ, サンプリングレート) 形式のタプルのリスト
|
26 |
-
"""
|
27 |
-
if len(waves) == 0:
|
28 |
-
raise ConnectBase64WavesException("wavファイルが含まれていません")
|
29 |
-
|
30 |
-
waves_nparray_sr = []
|
31 |
-
for wave in waves:
|
32 |
-
try:
|
33 |
-
wav_bin = base64.standard_b64decode(wave)
|
34 |
-
except ValueError:
|
35 |
-
raise ConnectBase64WavesException("base64デコードに失敗しました")
|
36 |
-
try:
|
37 |
-
_data = soundfile.read(io.BytesIO(wav_bin))
|
38 |
-
except Exception:
|
39 |
-
raise ConnectBase64WavesException("wavファイルを読み込めませんでした")
|
40 |
-
waves_nparray_sr.append(_data)
|
41 |
-
|
42 |
-
return waves_nparray_sr
|
43 |
-
|
44 |
-
|
45 |
-
def connect_base64_waves(waves: List[str]) -> Tuple[np.ndarray, int]:
|
46 |
-
waves_nparray_sr = decode_base64_waves(waves)
|
47 |
-
|
48 |
-
max_sampling_rate = max([sr for _, sr in waves_nparray_sr])
|
49 |
-
max_channels = max([x.ndim for x, _ in waves_nparray_sr])
|
50 |
-
assert 0 < max_channels <= 2
|
51 |
-
|
52 |
-
waves_nparray_list = []
|
53 |
-
for nparray, sr in waves_nparray_sr:
|
54 |
-
if sr != max_sampling_rate:
|
55 |
-
nparray = resample(nparray, max_sampling_rate * len(nparray) // sr)
|
56 |
-
if nparray.ndim < max_channels:
|
57 |
-
nparray = np.array([nparray, nparray]).T
|
58 |
-
waves_nparray_list.append(nparray)
|
59 |
-
|
60 |
-
return np.concatenate(waves_nparray_list), max_sampling_rate
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/models/mtcnn/__init__.py
DELETED
File without changes
|
spaces/AIFILMS/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/box_utils.py
DELETED
@@ -1,238 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from PIL import Image
|
3 |
-
|
4 |
-
|
5 |
-
def nms(boxes, overlap_threshold=0.5, mode='union'):
|
6 |
-
"""Non-maximum suppression.
|
7 |
-
|
8 |
-
Arguments:
|
9 |
-
boxes: a float numpy array of shape [n, 5],
|
10 |
-
where each row is (xmin, ymin, xmax, ymax, score).
|
11 |
-
overlap_threshold: a float number.
|
12 |
-
mode: 'union' or 'min'.
|
13 |
-
|
14 |
-
Returns:
|
15 |
-
list with indices of the selected boxes
|
16 |
-
"""
|
17 |
-
|
18 |
-
# if there are no boxes, return the empty list
|
19 |
-
if len(boxes) == 0:
|
20 |
-
return []
|
21 |
-
|
22 |
-
# list of picked indices
|
23 |
-
pick = []
|
24 |
-
|
25 |
-
# grab the coordinates of the bounding boxes
|
26 |
-
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
|
27 |
-
|
28 |
-
area = (x2 - x1 + 1.0) * (y2 - y1 + 1.0)
|
29 |
-
ids = np.argsort(score) # in increasing order
|
30 |
-
|
31 |
-
while len(ids) > 0:
|
32 |
-
|
33 |
-
# grab index of the largest value
|
34 |
-
last = len(ids) - 1
|
35 |
-
i = ids[last]
|
36 |
-
pick.append(i)
|
37 |
-
|
38 |
-
# compute intersections
|
39 |
-
# of the box with the largest score
|
40 |
-
# with the rest of boxes
|
41 |
-
|
42 |
-
# left top corner of intersection boxes
|
43 |
-
ix1 = np.maximum(x1[i], x1[ids[:last]])
|
44 |
-
iy1 = np.maximum(y1[i], y1[ids[:last]])
|
45 |
-
|
46 |
-
# right bottom corner of intersection boxes
|
47 |
-
ix2 = np.minimum(x2[i], x2[ids[:last]])
|
48 |
-
iy2 = np.minimum(y2[i], y2[ids[:last]])
|
49 |
-
|
50 |
-
# width and height of intersection boxes
|
51 |
-
w = np.maximum(0.0, ix2 - ix1 + 1.0)
|
52 |
-
h = np.maximum(0.0, iy2 - iy1 + 1.0)
|
53 |
-
|
54 |
-
# intersections' areas
|
55 |
-
inter = w * h
|
56 |
-
if mode == 'min':
|
57 |
-
overlap = inter / np.minimum(area[i], area[ids[:last]])
|
58 |
-
elif mode == 'union':
|
59 |
-
# intersection over union (IoU)
|
60 |
-
overlap = inter / (area[i] + area[ids[:last]] - inter)
|
61 |
-
|
62 |
-
# delete all boxes where overlap is too big
|
63 |
-
ids = np.delete(
|
64 |
-
ids,
|
65 |
-
np.concatenate([[last], np.where(overlap > overlap_threshold)[0]])
|
66 |
-
)
|
67 |
-
|
68 |
-
return pick
|
69 |
-
|
70 |
-
|
71 |
-
def convert_to_square(bboxes):
|
72 |
-
"""Convert bounding boxes to a square form.
|
73 |
-
|
74 |
-
Arguments:
|
75 |
-
bboxes: a float numpy array of shape [n, 5].
|
76 |
-
|
77 |
-
Returns:
|
78 |
-
a float numpy array of shape [n, 5],
|
79 |
-
squared bounding boxes.
|
80 |
-
"""
|
81 |
-
|
82 |
-
square_bboxes = np.zeros_like(bboxes)
|
83 |
-
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
|
84 |
-
h = y2 - y1 + 1.0
|
85 |
-
w = x2 - x1 + 1.0
|
86 |
-
max_side = np.maximum(h, w)
|
87 |
-
square_bboxes[:, 0] = x1 + w * 0.5 - max_side * 0.5
|
88 |
-
square_bboxes[:, 1] = y1 + h * 0.5 - max_side * 0.5
|
89 |
-
square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0
|
90 |
-
square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0
|
91 |
-
return square_bboxes
|
92 |
-
|
93 |
-
|
94 |
-
def calibrate_box(bboxes, offsets):
|
95 |
-
"""Transform bounding boxes to be more like true bounding boxes.
|
96 |
-
'offsets' is one of the outputs of the nets.
|
97 |
-
|
98 |
-
Arguments:
|
99 |
-
bboxes: a float numpy array of shape [n, 5].
|
100 |
-
offsets: a float numpy array of shape [n, 4].
|
101 |
-
|
102 |
-
Returns:
|
103 |
-
a float numpy array of shape [n, 5].
|
104 |
-
"""
|
105 |
-
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
|
106 |
-
w = x2 - x1 + 1.0
|
107 |
-
h = y2 - y1 + 1.0
|
108 |
-
w = np.expand_dims(w, 1)
|
109 |
-
h = np.expand_dims(h, 1)
|
110 |
-
|
111 |
-
# this is what happening here:
|
112 |
-
# tx1, ty1, tx2, ty2 = [offsets[:, i] for i in range(4)]
|
113 |
-
# x1_true = x1 + tx1*w
|
114 |
-
# y1_true = y1 + ty1*h
|
115 |
-
# x2_true = x2 + tx2*w
|
116 |
-
# y2_true = y2 + ty2*h
|
117 |
-
# below is just more compact form of this
|
118 |
-
|
119 |
-
# are offsets always such that
|
120 |
-
# x1 < x2 and y1 < y2 ?
|
121 |
-
|
122 |
-
translation = np.hstack([w, h, w, h]) * offsets
|
123 |
-
bboxes[:, 0:4] = bboxes[:, 0:4] + translation
|
124 |
-
return bboxes
|
125 |
-
|
126 |
-
|
127 |
-
def get_image_boxes(bounding_boxes, img, size=24):
|
128 |
-
"""Cut out boxes from the image.
|
129 |
-
|
130 |
-
Arguments:
|
131 |
-
bounding_boxes: a float numpy array of shape [n, 5].
|
132 |
-
img: an instance of PIL.Image.
|
133 |
-
size: an integer, size of cutouts.
|
134 |
-
|
135 |
-
Returns:
|
136 |
-
a float numpy array of shape [n, 3, size, size].
|
137 |
-
"""
|
138 |
-
|
139 |
-
num_boxes = len(bounding_boxes)
|
140 |
-
width, height = img.size
|
141 |
-
|
142 |
-
[dy, edy, dx, edx, y, ey, x, ex, w, h] = correct_bboxes(bounding_boxes, width, height)
|
143 |
-
img_boxes = np.zeros((num_boxes, 3, size, size), 'float32')
|
144 |
-
|
145 |
-
for i in range(num_boxes):
|
146 |
-
img_box = np.zeros((h[i], w[i], 3), 'uint8')
|
147 |
-
|
148 |
-
img_array = np.asarray(img, 'uint8')
|
149 |
-
img_box[dy[i]:(edy[i] + 1), dx[i]:(edx[i] + 1), :] = \
|
150 |
-
img_array[y[i]:(ey[i] + 1), x[i]:(ex[i] + 1), :]
|
151 |
-
|
152 |
-
# resize
|
153 |
-
img_box = Image.fromarray(img_box)
|
154 |
-
img_box = img_box.resize((size, size), Image.BILINEAR)
|
155 |
-
img_box = np.asarray(img_box, 'float32')
|
156 |
-
|
157 |
-
img_boxes[i, :, :, :] = _preprocess(img_box)
|
158 |
-
|
159 |
-
return img_boxes
|
160 |
-
|
161 |
-
|
162 |
-
def correct_bboxes(bboxes, width, height):
|
163 |
-
"""Crop boxes that are too big and get coordinates
|
164 |
-
with respect to cutouts.
|
165 |
-
|
166 |
-
Arguments:
|
167 |
-
bboxes: a float numpy array of shape [n, 5],
|
168 |
-
where each row is (xmin, ymin, xmax, ymax, score).
|
169 |
-
width: a float number.
|
170 |
-
height: a float number.
|
171 |
-
|
172 |
-
Returns:
|
173 |
-
dy, dx, edy, edx: a int numpy arrays of shape [n],
|
174 |
-
coordinates of the boxes with respect to the cutouts.
|
175 |
-
y, x, ey, ex: a int numpy arrays of shape [n],
|
176 |
-
corrected ymin, xmin, ymax, xmax.
|
177 |
-
h, w: a int numpy arrays of shape [n],
|
178 |
-
just heights and widths of boxes.
|
179 |
-
|
180 |
-
in the following order:
|
181 |
-
[dy, edy, dx, edx, y, ey, x, ex, w, h].
|
182 |
-
"""
|
183 |
-
|
184 |
-
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
|
185 |
-
w, h = x2 - x1 + 1.0, y2 - y1 + 1.0
|
186 |
-
num_boxes = bboxes.shape[0]
|
187 |
-
|
188 |
-
# 'e' stands for end
|
189 |
-
# (x, y) -> (ex, ey)
|
190 |
-
x, y, ex, ey = x1, y1, x2, y2
|
191 |
-
|
192 |
-
# we need to cut out a box from the image.
|
193 |
-
# (x, y, ex, ey) are corrected coordinates of the box
|
194 |
-
# in the image.
|
195 |
-
# (dx, dy, edx, edy) are coordinates of the box in the cutout
|
196 |
-
# from the image.
|
197 |
-
dx, dy = np.zeros((num_boxes,)), np.zeros((num_boxes,))
|
198 |
-
edx, edy = w.copy() - 1.0, h.copy() - 1.0
|
199 |
-
|
200 |
-
# if box's bottom right corner is too far right
|
201 |
-
ind = np.where(ex > width - 1.0)[0]
|
202 |
-
edx[ind] = w[ind] + width - 2.0 - ex[ind]
|
203 |
-
ex[ind] = width - 1.0
|
204 |
-
|
205 |
-
# if box's bottom right corner is too low
|
206 |
-
ind = np.where(ey > height - 1.0)[0]
|
207 |
-
edy[ind] = h[ind] + height - 2.0 - ey[ind]
|
208 |
-
ey[ind] = height - 1.0
|
209 |
-
|
210 |
-
# if box's top left corner is too far left
|
211 |
-
ind = np.where(x < 0.0)[0]
|
212 |
-
dx[ind] = 0.0 - x[ind]
|
213 |
-
x[ind] = 0.0
|
214 |
-
|
215 |
-
# if box's top left corner is too high
|
216 |
-
ind = np.where(y < 0.0)[0]
|
217 |
-
dy[ind] = 0.0 - y[ind]
|
218 |
-
y[ind] = 0.0
|
219 |
-
|
220 |
-
return_list = [dy, edy, dx, edx, y, ey, x, ex, w, h]
|
221 |
-
return_list = [i.astype('int32') for i in return_list]
|
222 |
-
|
223 |
-
return return_list
|
224 |
-
|
225 |
-
|
226 |
-
def _preprocess(img):
|
227 |
-
"""Preprocessing step before feeding the network.
|
228 |
-
|
229 |
-
Arguments:
|
230 |
-
img: a float numpy array of shape [h, w, c].
|
231 |
-
|
232 |
-
Returns:
|
233 |
-
a float numpy array of shape [1, c, h, w].
|
234 |
-
"""
|
235 |
-
img = img.transpose((2, 0, 1))
|
236 |
-
img = np.expand_dims(img, 0)
|
237 |
-
img = (img - 127.5) * 0.0078125
|
238 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/__init__.py
DELETED
File without changes
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/portaspeech.py
DELETED
@@ -1,230 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import Linear
|
5 |
-
|
6 |
-
from modules.commons.conv import ConvBlocks, ConditionalConvBlocks
|
7 |
-
from modules.commons.common_layers import Embedding
|
8 |
-
from modules.commons.rel_transformer import RelTransformerEncoder
|
9 |
-
from modules.commons.transformer import MultiheadAttention, FFTBlocks
|
10 |
-
from modules.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word
|
11 |
-
from modules.portaspeech.fs import FS_DECODERS, FastSpeech
|
12 |
-
from modules.portaspeech.fvae import FVAE
|
13 |
-
from utils.tts_utils import group_hidden_by_segs
|
14 |
-
from utils.hparams import hparams
|
15 |
-
|
16 |
-
class SinusoidalPosEmb(nn.Module):
|
17 |
-
def __init__(self, dim):
|
18 |
-
super().__init__()
|
19 |
-
self.dim = dim
|
20 |
-
|
21 |
-
def forward(self, x):
|
22 |
-
"""
|
23 |
-
|
24 |
-
:param x: [B, T]
|
25 |
-
:return: [B, T, H]
|
26 |
-
"""
|
27 |
-
device = x.device
|
28 |
-
half_dim = self.dim // 2
|
29 |
-
emb = math.log(10000) / (half_dim - 1)
|
30 |
-
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
|
31 |
-
emb = x[:, :, None] * emb[None, :]
|
32 |
-
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
|
33 |
-
return emb
|
34 |
-
|
35 |
-
|
36 |
-
class PortaSpeech(FastSpeech):
|
37 |
-
def __init__(self, ph_dictionary, word_dictionary, out_dims=None):
|
38 |
-
super().__init__(ph_dictionary, out_dims)
|
39 |
-
# build linguistic encoder
|
40 |
-
if hparams['use_word_encoder']:
|
41 |
-
# default False, use independent word embedding instead of phoneme encoding to represent word
|
42 |
-
self.word_encoder = RelTransformerEncoder(
|
43 |
-
len(word_dictionary), self.hidden_size, self.hidden_size, self.hidden_size, 2,
|
44 |
-
hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
|
45 |
-
if hparams['dur_level'] == 'word':
|
46 |
-
if hparams['word_encoder_type'] == 'rel_fft':
|
47 |
-
self.ph2word_encoder = RelTransformerEncoder(
|
48 |
-
0, self.hidden_size, self.hidden_size, self.hidden_size, 2,
|
49 |
-
hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
|
50 |
-
if hparams['word_encoder_type'] == 'fft':
|
51 |
-
self.ph2word_encoder = FFTBlocks(
|
52 |
-
self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads'])
|
53 |
-
self.sin_pos = SinusoidalPosEmb(self.hidden_size)
|
54 |
-
self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
|
55 |
-
self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
|
56 |
-
self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
|
57 |
-
self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False)
|
58 |
-
self.attn.enable_torch_version = False
|
59 |
-
if hparams['text_encoder_postnet']:
|
60 |
-
self.text_encoder_postnet = ConvBlocks(
|
61 |
-
self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2)
|
62 |
-
else:
|
63 |
-
self.sin_pos = SinusoidalPosEmb(self.hidden_size)
|
64 |
-
# build VAE decoder
|
65 |
-
if hparams['use_fvae']:
|
66 |
-
del self.decoder
|
67 |
-
del self.mel_out
|
68 |
-
self.fvae = FVAE(
|
69 |
-
c_in_out=self.out_dims,
|
70 |
-
hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'],
|
71 |
-
kernel_size=hparams['fvae_kernel_size'],
|
72 |
-
enc_n_layers=hparams['fvae_enc_n_layers'],
|
73 |
-
dec_n_layers=hparams['fvae_dec_n_layers'],
|
74 |
-
c_cond=self.hidden_size,
|
75 |
-
use_prior_flow=hparams['use_prior_flow'],
|
76 |
-
flow_hidden=hparams['prior_flow_hidden'],
|
77 |
-
flow_kernel_size=hparams['prior_flow_kernel_size'],
|
78 |
-
flow_n_steps=hparams['prior_flow_n_blocks'],
|
79 |
-
strides=[hparams['fvae_strides']],
|
80 |
-
encoder_type=hparams['fvae_encoder_type'],
|
81 |
-
decoder_type=hparams['fvae_decoder_type'],
|
82 |
-
)
|
83 |
-
else:
|
84 |
-
self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
|
85 |
-
self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
|
86 |
-
if hparams['use_pitch_embed']:
|
87 |
-
self.pitch_embed = Embedding(300, self.hidden_size, 0)
|
88 |
-
if hparams['add_word_pos']:
|
89 |
-
self.word_pos_proj = Linear(self.hidden_size, self.hidden_size)
|
90 |
-
|
91 |
-
def build_embedding(self, dictionary, embed_dim):
|
92 |
-
num_embeddings = len(dictionary)
|
93 |
-
emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
|
94 |
-
return emb
|
95 |
-
|
96 |
-
def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None,
|
97 |
-
spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None,
|
98 |
-
global_step=None, *args, **kwargs):
|
99 |
-
ret = {}
|
100 |
-
style_embed = self.forward_style_embed(spk_embed, spk_id)
|
101 |
-
x, tgt_nonpadding = self.run_text_encoder(
|
102 |
-
txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, **kwargs)
|
103 |
-
x = x * tgt_nonpadding
|
104 |
-
ret['nonpadding'] = tgt_nonpadding
|
105 |
-
if hparams['use_pitch_embed']:
|
106 |
-
x = x + self.pitch_embed(pitch)
|
107 |
-
ret['decoder_inp'] = x
|
108 |
-
ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step)
|
109 |
-
return ret
|
110 |
-
|
111 |
-
def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, **kwargs):
|
112 |
-
word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word]
|
113 |
-
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
|
114 |
-
use_bert = hparams.get("use_bert") is True
|
115 |
-
if use_bert:
|
116 |
-
ph_encoder_out = self.ph_encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word,
|
117 |
-
graph_lst=kwargs['graph_lst'], etypes_lst=kwargs['etypes_lst'],
|
118 |
-
cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed
|
119 |
-
else:
|
120 |
-
ph_encoder_out = self.ph_encoder(txt_tokens) * src_nonpadding + style_embed
|
121 |
-
if hparams['use_word_encoder']:
|
122 |
-
word_encoder_out = self.word_encoder(word_tokens) + style_embed
|
123 |
-
ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word)
|
124 |
-
if hparams['dur_level'] == 'word':
|
125 |
-
word_encoder_out = 0
|
126 |
-
h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0]
|
127 |
-
word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word)
|
128 |
-
if hparams['use_word_encoder']:
|
129 |
-
word_encoder_out = word_encoder_out + self.word_encoder(word_tokens)
|
130 |
-
mel2word = self.forward_dur(ph_encoder_out, mel2word, ret, ph2word=ph2word, word_len=word_len)
|
131 |
-
mel2word = clip_mel2token_to_multiple(mel2word, hparams['frames_multiple'])
|
132 |
-
tgt_nonpadding = (mel2word > 0).float()[:, :, None]
|
133 |
-
enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H]
|
134 |
-
dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
|
135 |
-
dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph]
|
136 |
-
x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask)
|
137 |
-
if hparams['add_word_pos']:
|
138 |
-
x = x + self.word_pos_proj(dec_pos)
|
139 |
-
ret['attn'] = weight
|
140 |
-
else:
|
141 |
-
mel2ph = self.forward_dur(ph_encoder_out, mel2ph, ret)
|
142 |
-
mel2ph = clip_mel2token_to_multiple(mel2ph, hparams['frames_multiple'])
|
143 |
-
mel2word = mel2ph_to_mel2word(mel2ph, ph2word)
|
144 |
-
x = expand_states(ph_encoder_out, mel2ph)
|
145 |
-
if hparams['add_word_pos']:
|
146 |
-
dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
|
147 |
-
x = x + self.word_pos_proj(dec_pos)
|
148 |
-
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
|
149 |
-
if hparams['use_word_encoder']:
|
150 |
-
x = x + expand_states(word_encoder_out, mel2word)
|
151 |
-
return x, tgt_nonpadding
|
152 |
-
|
153 |
-
def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask):
|
154 |
-
ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1))
|
155 |
-
word_enc_out_expend = expand_states(word_encoder_out, mel2word)
|
156 |
-
word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1)
|
157 |
-
if hparams['text_encoder_postnet']:
|
158 |
-
word_enc_out_expend = self.dec_res_proj(word_enc_out_expend)
|
159 |
-
word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend)
|
160 |
-
dec_q = x_res = word_enc_out_expend
|
161 |
-
else:
|
162 |
-
dec_q = self.dec_query_proj(word_enc_out_expend)
|
163 |
-
x_res = self.dec_res_proj(word_enc_out_expend)
|
164 |
-
ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1)
|
165 |
-
x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9)
|
166 |
-
x = x.transpose(0, 1)
|
167 |
-
x = x + x_res
|
168 |
-
return x, weight
|
169 |
-
|
170 |
-
def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0):
|
171 |
-
if not hparams['use_fvae']:
|
172 |
-
x = self.decoder(x)
|
173 |
-
x = self.mel_out(x)
|
174 |
-
ret['kl'] = 0
|
175 |
-
return x * tgt_nonpadding
|
176 |
-
else:
|
177 |
-
decoder_inp = x
|
178 |
-
x = x.transpose(1, 2) # [B, H, T]
|
179 |
-
tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T]
|
180 |
-
if infer:
|
181 |
-
z = self.fvae(cond=x, infer=True)
|
182 |
-
else:
|
183 |
-
tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T]
|
184 |
-
z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae(
|
185 |
-
tgt_mels, tgt_nonpadding_BHT, cond=x)
|
186 |
-
if global_step < hparams['posterior_start_steps']:
|
187 |
-
z = torch.randn_like(z)
|
188 |
-
x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2)
|
189 |
-
ret['pre_mel_out'] = x_recon
|
190 |
-
return x_recon
|
191 |
-
|
192 |
-
def forward_dur(self, dur_input, mel2word, ret, **kwargs):
|
193 |
-
"""
|
194 |
-
|
195 |
-
:param dur_input: [B, T_txt, H]
|
196 |
-
:param mel2ph: [B, T_mel]
|
197 |
-
:param txt_tokens: [B, T_txt]
|
198 |
-
:param ret:
|
199 |
-
:return:
|
200 |
-
"""
|
201 |
-
src_padding = dur_input.data.abs().sum(-1) == 0
|
202 |
-
dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach())
|
203 |
-
dur = self.dur_predictor(dur_input, src_padding)
|
204 |
-
if hparams['dur_level'] == 'word':
|
205 |
-
word_len = kwargs['word_len']
|
206 |
-
ph2word = kwargs['ph2word']
|
207 |
-
B, T_ph = ph2word.shape
|
208 |
-
dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur)
|
209 |
-
dur = dur[:, 1:]
|
210 |
-
ret['dur'] = dur
|
211 |
-
if mel2word is None:
|
212 |
-
mel2word = self.length_regulator(dur).detach()
|
213 |
-
return mel2word
|
214 |
-
|
215 |
-
def get_pos_embed(self, word2word, x2word):
|
216 |
-
x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph]
|
217 |
-
x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1)
|
218 |
-
x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H]
|
219 |
-
return x_pos
|
220 |
-
|
221 |
-
def store_inverse_all(self):
|
222 |
-
def remove_weight_norm(m):
|
223 |
-
try:
|
224 |
-
if hasattr(m, 'store_inverse'):
|
225 |
-
m.store_inverse()
|
226 |
-
nn.utils.remove_weight_norm(m)
|
227 |
-
except ValueError: # this module didn't have weight norm
|
228 |
-
return
|
229 |
-
|
230 |
-
self.apply(remove_weight_norm)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/version.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
__version__ = '0.2.1'
|
|
|
|
spaces/AIZero2HeroBootcamp/ChatGPTandLangchain/app.py
DELETED
@@ -1,442 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import openai
|
3 |
-
import os
|
4 |
-
import base64
|
5 |
-
import glob
|
6 |
-
import json
|
7 |
-
import mistune
|
8 |
-
import pytz
|
9 |
-
import math
|
10 |
-
import requests
|
11 |
-
import time
|
12 |
-
import re
|
13 |
-
import textract
|
14 |
-
|
15 |
-
from datetime import datetime
|
16 |
-
from openai import ChatCompletion
|
17 |
-
from xml.etree import ElementTree as ET
|
18 |
-
from bs4 import BeautifulSoup
|
19 |
-
from collections import deque
|
20 |
-
from audio_recorder_streamlit import audio_recorder
|
21 |
-
|
22 |
-
from dotenv import load_dotenv
|
23 |
-
from PyPDF2 import PdfReader
|
24 |
-
from langchain.text_splitter import CharacterTextSplitter
|
25 |
-
from langchain.embeddings import OpenAIEmbeddings
|
26 |
-
from langchain.vectorstores import FAISS
|
27 |
-
from langchain.chat_models import ChatOpenAI
|
28 |
-
from langchain.memory import ConversationBufferMemory
|
29 |
-
from langchain.chains import ConversationalRetrievalChain
|
30 |
-
from templates import css, bot_template, user_template
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
def generate_filename(prompt, file_type):
|
35 |
-
central = pytz.timezone('US/Central')
|
36 |
-
safe_date_time = datetime.now(central).strftime("%m%d_%H%M") # Date and time DD-HHMM
|
37 |
-
safe_prompt = "".join(x for x in prompt if x.isalnum())[:90] # Limit file name size and trim whitespace
|
38 |
-
return f"{safe_date_time}_{safe_prompt}.{file_type}" # Return a safe file name
|
39 |
-
|
40 |
-
|
41 |
-
def transcribe_audio(openai_key, file_path, model):
|
42 |
-
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
43 |
-
headers = {
|
44 |
-
"Authorization": f"Bearer {openai_key}",
|
45 |
-
}
|
46 |
-
with open(file_path, 'rb') as f:
|
47 |
-
data = {'file': f}
|
48 |
-
response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
|
49 |
-
if response.status_code == 200:
|
50 |
-
st.write(response.json())
|
51 |
-
chatResponse = chat_with_model(response.json().get('text'), '') # *************************************
|
52 |
-
transcript = response.json().get('text')
|
53 |
-
#st.write('Responses:')
|
54 |
-
#st.write(chatResponse)
|
55 |
-
filename = generate_filename(transcript, 'txt')
|
56 |
-
create_file(filename, transcript, chatResponse)
|
57 |
-
return transcript
|
58 |
-
else:
|
59 |
-
st.write(response.json())
|
60 |
-
st.error("Error in API call.")
|
61 |
-
return None
|
62 |
-
|
63 |
-
def save_and_play_audio(audio_recorder):
|
64 |
-
audio_bytes = audio_recorder()
|
65 |
-
if audio_bytes:
|
66 |
-
filename = generate_filename("Recording", "wav")
|
67 |
-
with open(filename, 'wb') as f:
|
68 |
-
f.write(audio_bytes)
|
69 |
-
st.audio(audio_bytes, format="audio/wav")
|
70 |
-
return filename
|
71 |
-
return None
|
72 |
-
|
73 |
-
def create_file(filename, prompt, response):
|
74 |
-
if filename.endswith(".txt"):
|
75 |
-
with open(filename, 'w') as file:
|
76 |
-
file.write(f"{prompt}\n{response}")
|
77 |
-
elif filename.endswith(".htm"):
|
78 |
-
with open(filename, 'w') as file:
|
79 |
-
file.write(f"{prompt} {response}")
|
80 |
-
elif filename.endswith(".md"):
|
81 |
-
with open(filename, 'w') as file:
|
82 |
-
file.write(f"{prompt}\n\n{response}")
|
83 |
-
|
84 |
-
def truncate_document(document, length):
|
85 |
-
return document[:length]
|
86 |
-
def divide_document(document, max_length):
|
87 |
-
return [document[i:i+max_length] for i in range(0, len(document), max_length)]
|
88 |
-
|
89 |
-
def get_table_download_link(file_path):
|
90 |
-
with open(file_path, 'r') as file:
|
91 |
-
try:
|
92 |
-
data = file.read()
|
93 |
-
except:
|
94 |
-
st.write('')
|
95 |
-
return file_path
|
96 |
-
b64 = base64.b64encode(data.encode()).decode()
|
97 |
-
file_name = os.path.basename(file_path)
|
98 |
-
ext = os.path.splitext(file_name)[1] # get the file extension
|
99 |
-
if ext == '.txt':
|
100 |
-
mime_type = 'text/plain'
|
101 |
-
elif ext == '.py':
|
102 |
-
mime_type = 'text/plain'
|
103 |
-
elif ext == '.xlsx':
|
104 |
-
mime_type = 'text/plain'
|
105 |
-
elif ext == '.csv':
|
106 |
-
mime_type = 'text/plain'
|
107 |
-
elif ext == '.htm':
|
108 |
-
mime_type = 'text/html'
|
109 |
-
elif ext == '.md':
|
110 |
-
mime_type = 'text/markdown'
|
111 |
-
else:
|
112 |
-
mime_type = 'application/octet-stream' # general binary data type
|
113 |
-
href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
|
114 |
-
return href
|
115 |
-
|
116 |
-
def CompressXML(xml_text):
|
117 |
-
root = ET.fromstring(xml_text)
|
118 |
-
for elem in list(root.iter()):
|
119 |
-
if isinstance(elem.tag, str) and 'Comment' in elem.tag:
|
120 |
-
elem.parent.remove(elem)
|
121 |
-
return ET.tostring(root, encoding='unicode', method="xml")
|
122 |
-
|
123 |
-
def read_file_content(file,max_length):
|
124 |
-
if file.type == "application/json":
|
125 |
-
content = json.load(file)
|
126 |
-
return str(content)
|
127 |
-
elif file.type == "text/html" or file.type == "text/htm":
|
128 |
-
content = BeautifulSoup(file, "html.parser")
|
129 |
-
return content.text
|
130 |
-
elif file.type == "application/xml" or file.type == "text/xml":
|
131 |
-
tree = ET.parse(file)
|
132 |
-
root = tree.getroot()
|
133 |
-
xml = CompressXML(ET.tostring(root, encoding='unicode'))
|
134 |
-
return xml
|
135 |
-
elif file.type == "text/markdown" or file.type == "text/md":
|
136 |
-
md = mistune.create_markdown()
|
137 |
-
content = md(file.read().decode())
|
138 |
-
return content
|
139 |
-
elif file.type == "text/plain":
|
140 |
-
return file.getvalue().decode()
|
141 |
-
else:
|
142 |
-
return ""
|
143 |
-
|
144 |
-
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
145 |
-
model = model_choice
|
146 |
-
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
147 |
-
conversation.append({'role': 'user', 'content': prompt})
|
148 |
-
if len(document_section)>0:
|
149 |
-
conversation.append({'role': 'assistant', 'content': document_section})
|
150 |
-
|
151 |
-
start_time = time.time()
|
152 |
-
report = []
|
153 |
-
res_box = st.empty()
|
154 |
-
collected_chunks = []
|
155 |
-
collected_messages = []
|
156 |
-
|
157 |
-
for chunk in openai.ChatCompletion.create(
|
158 |
-
model='gpt-3.5-turbo',
|
159 |
-
messages=conversation,
|
160 |
-
temperature=0.5,
|
161 |
-
stream=True
|
162 |
-
):
|
163 |
-
|
164 |
-
collected_chunks.append(chunk) # save the event response
|
165 |
-
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
166 |
-
collected_messages.append(chunk_message) # save the message
|
167 |
-
|
168 |
-
content=chunk["choices"][0].get("delta",{}).get("content")
|
169 |
-
|
170 |
-
try:
|
171 |
-
report.append(content)
|
172 |
-
if len(content) > 0:
|
173 |
-
result = "".join(report).strip()
|
174 |
-
#result = result.replace("\n", "")
|
175 |
-
res_box.markdown(f'*{result}*')
|
176 |
-
except:
|
177 |
-
st.write(' ')
|
178 |
-
|
179 |
-
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
180 |
-
st.write("Elapsed time:")
|
181 |
-
st.write(time.time() - start_time)
|
182 |
-
return full_reply_content
|
183 |
-
|
184 |
-
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
185 |
-
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
186 |
-
conversation.append({'role': 'user', 'content': prompt})
|
187 |
-
if len(file_content)>0:
|
188 |
-
conversation.append({'role': 'assistant', 'content': file_content})
|
189 |
-
response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
|
190 |
-
return response['choices'][0]['message']['content']
|
191 |
-
|
192 |
-
def extract_mime_type(file):
|
193 |
-
# Check if the input is a string
|
194 |
-
if isinstance(file, str):
|
195 |
-
pattern = r"type='(.*?)'"
|
196 |
-
match = re.search(pattern, file)
|
197 |
-
if match:
|
198 |
-
return match.group(1)
|
199 |
-
else:
|
200 |
-
raise ValueError(f"Unable to extract MIME type from {file}")
|
201 |
-
# If it's not a string, assume it's a streamlit.UploadedFile object
|
202 |
-
elif isinstance(file, streamlit.UploadedFile):
|
203 |
-
return file.type
|
204 |
-
else:
|
205 |
-
raise TypeError("Input should be a string or a streamlit.UploadedFile object")
|
206 |
-
|
207 |
-
from io import BytesIO
|
208 |
-
import re
|
209 |
-
|
210 |
-
def extract_file_extension(file):
|
211 |
-
# get the file name directly from the UploadedFile object
|
212 |
-
file_name = file.name
|
213 |
-
pattern = r".*?\.(.*?)$"
|
214 |
-
match = re.search(pattern, file_name)
|
215 |
-
if match:
|
216 |
-
return match.group(1)
|
217 |
-
else:
|
218 |
-
raise ValueError(f"Unable to extract file extension from {file_name}")
|
219 |
-
|
220 |
-
def pdf2txt(docs):
|
221 |
-
text = ""
|
222 |
-
for file in docs:
|
223 |
-
file_extension = extract_file_extension(file)
|
224 |
-
# print the file extension
|
225 |
-
st.write(f"File type extension: {file_extension}")
|
226 |
-
|
227 |
-
# read the file according to its extension
|
228 |
-
try:
|
229 |
-
if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']:
|
230 |
-
text += file.getvalue().decode('utf-8')
|
231 |
-
elif file_extension.lower() == 'pdf':
|
232 |
-
from PyPDF2 import PdfReader
|
233 |
-
pdf = PdfReader(BytesIO(file.getvalue()))
|
234 |
-
for page in range(len(pdf.pages)):
|
235 |
-
text += pdf.pages[page].extract_text() # new PyPDF2 syntax
|
236 |
-
except Exception as e:
|
237 |
-
st.write(f"Error processing file {file.name}: {e}")
|
238 |
-
|
239 |
-
return text
|
240 |
-
|
241 |
-
def pdf2txt_old(pdf_docs):
|
242 |
-
st.write(pdf_docs)
|
243 |
-
for file in pdf_docs:
|
244 |
-
mime_type = extract_mime_type(file)
|
245 |
-
st.write(f"MIME type of file: {mime_type}")
|
246 |
-
|
247 |
-
text = ""
|
248 |
-
for pdf in pdf_docs:
|
249 |
-
pdf_reader = PdfReader(pdf)
|
250 |
-
for page in pdf_reader.pages:
|
251 |
-
text += page.extract_text()
|
252 |
-
return text
|
253 |
-
|
254 |
-
def txt2chunks(text):
|
255 |
-
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
|
256 |
-
return text_splitter.split_text(text)
|
257 |
-
|
258 |
-
def vector_store(text_chunks):
|
259 |
-
key = os.getenv('OPENAI_API_KEY')
|
260 |
-
embeddings = OpenAIEmbeddings(openai_api_key=key)
|
261 |
-
return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
|
262 |
-
|
263 |
-
def get_chain(vectorstore):
|
264 |
-
llm = ChatOpenAI()
|
265 |
-
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
|
266 |
-
return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory)
|
267 |
-
|
268 |
-
def process_user_input(user_question):
|
269 |
-
response = st.session_state.conversation({'question': user_question})
|
270 |
-
st.session_state.chat_history = response['chat_history']
|
271 |
-
for i, message in enumerate(st.session_state.chat_history):
|
272 |
-
template = user_template if i % 2 == 0 else bot_template
|
273 |
-
st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
|
274 |
-
# Save file output from PDF query results
|
275 |
-
filename = generate_filename(user_question, 'txt')
|
276 |
-
create_file(filename, user_question, message.content)
|
277 |
-
|
278 |
-
#st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
279 |
-
|
280 |
-
def divide_prompt(prompt, max_length):
|
281 |
-
words = prompt.split()
|
282 |
-
chunks = []
|
283 |
-
current_chunk = []
|
284 |
-
current_length = 0
|
285 |
-
for word in words:
|
286 |
-
if len(word) + current_length <= max_length:
|
287 |
-
current_length += len(word) + 1 # Adding 1 to account for spaces
|
288 |
-
current_chunk.append(word)
|
289 |
-
else:
|
290 |
-
chunks.append(' '.join(current_chunk))
|
291 |
-
current_chunk = [word]
|
292 |
-
current_length = len(word)
|
293 |
-
chunks.append(' '.join(current_chunk)) # Append the final chunk
|
294 |
-
return chunks
|
295 |
-
|
296 |
-
def main():
|
297 |
-
# Sidebar and global
|
298 |
-
openai.api_key = os.getenv('OPENAI_API_KEY')
|
299 |
-
st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
|
300 |
-
|
301 |
-
# File type for output, model choice
|
302 |
-
menu = ["txt", "htm", "xlsx", "csv", "md", "py"] #619
|
303 |
-
choice = st.sidebar.selectbox("Output File Type:", menu)
|
304 |
-
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
305 |
-
|
306 |
-
# Audio, transcribe, GPT:
|
307 |
-
filename = save_and_play_audio(audio_recorder)
|
308 |
-
if filename is not None:
|
309 |
-
transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
|
310 |
-
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
311 |
-
filename=None # since transcription is finished next time just use the saved transcript
|
312 |
-
|
313 |
-
# prompt interfaces
|
314 |
-
user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
315 |
-
|
316 |
-
# file section interface for prompts against large documents as context
|
317 |
-
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
318 |
-
with collength:
|
319 |
-
max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
|
320 |
-
with colupload:
|
321 |
-
uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
|
322 |
-
|
323 |
-
# Document section chat
|
324 |
-
document_sections = deque()
|
325 |
-
document_responses = {}
|
326 |
-
if uploaded_file is not None:
|
327 |
-
file_content = read_file_content(uploaded_file, max_length)
|
328 |
-
document_sections.extend(divide_document(file_content, max_length))
|
329 |
-
if len(document_sections) > 0:
|
330 |
-
if st.button("👁️ View Upload"):
|
331 |
-
st.markdown("**Sections of the uploaded file:**")
|
332 |
-
for i, section in enumerate(list(document_sections)):
|
333 |
-
st.markdown(f"**Section {i+1}**\n{section}")
|
334 |
-
st.markdown("**Chat with the model:**")
|
335 |
-
for i, section in enumerate(list(document_sections)):
|
336 |
-
if i in document_responses:
|
337 |
-
st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
|
338 |
-
else:
|
339 |
-
if st.button(f"Chat about Section {i+1}"):
|
340 |
-
st.write('Reasoning with your inputs...')
|
341 |
-
response = chat_with_model(user_prompt, section, model_choice) # *************************************
|
342 |
-
st.write('Response:')
|
343 |
-
st.write(response)
|
344 |
-
document_responses[i] = response
|
345 |
-
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
346 |
-
create_file(filename, user_prompt, response)
|
347 |
-
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
348 |
-
|
349 |
-
if st.button('💬 Chat'):
|
350 |
-
st.write('Reasoning with your inputs...')
|
351 |
-
|
352 |
-
#response = chat_with_model(user_prompt, ''.join(list(document_sections,)), model_choice) # *************************************
|
353 |
-
|
354 |
-
# Divide the user_prompt into smaller sections
|
355 |
-
user_prompt_sections = divide_prompt(user_prompt, max_length)
|
356 |
-
full_response = ''
|
357 |
-
for prompt_section in user_prompt_sections:
|
358 |
-
# Process each section with the model
|
359 |
-
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
360 |
-
full_response += response + '\n' # Combine the responses
|
361 |
-
|
362 |
-
#st.write('Response:')
|
363 |
-
#st.write(full_response)
|
364 |
-
|
365 |
-
response = full_response
|
366 |
-
st.write('Response:')
|
367 |
-
st.write(response)
|
368 |
-
|
369 |
-
filename = generate_filename(user_prompt, choice)
|
370 |
-
create_file(filename, user_prompt, response)
|
371 |
-
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
372 |
-
|
373 |
-
all_files = glob.glob("*.*")
|
374 |
-
all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
|
375 |
-
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
|
376 |
-
|
377 |
-
# sidebar of files
|
378 |
-
file_contents=''
|
379 |
-
next_action=''
|
380 |
-
for file in all_files:
|
381 |
-
col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
|
382 |
-
with col1:
|
383 |
-
if st.button("🌐", key="md_"+file): # md emoji button
|
384 |
-
with open(file, 'r') as f:
|
385 |
-
file_contents = f.read()
|
386 |
-
next_action='md'
|
387 |
-
with col2:
|
388 |
-
st.markdown(get_table_download_link(file), unsafe_allow_html=True)
|
389 |
-
with col3:
|
390 |
-
if st.button("📂", key="open_"+file): # open emoji button
|
391 |
-
with open(file, 'r') as f:
|
392 |
-
file_contents = f.read()
|
393 |
-
next_action='open'
|
394 |
-
with col4:
|
395 |
-
if st.button("🔍", key="read_"+file): # search emoji button
|
396 |
-
with open(file, 'r') as f:
|
397 |
-
file_contents = f.read()
|
398 |
-
next_action='search'
|
399 |
-
with col5:
|
400 |
-
if st.button("🗑", key="delete_"+file):
|
401 |
-
os.remove(file)
|
402 |
-
st.experimental_rerun()
|
403 |
-
|
404 |
-
if len(file_contents) > 0:
|
405 |
-
if next_action=='open':
|
406 |
-
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
407 |
-
if next_action=='md':
|
408 |
-
st.markdown(file_contents)
|
409 |
-
if next_action=='search':
|
410 |
-
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
411 |
-
st.write('Reasoning with your inputs...')
|
412 |
-
response = chat_with_model(user_prompt, file_contents, model_choice)
|
413 |
-
filename = generate_filename(file_contents, choice)
|
414 |
-
create_file(filename, file_contents, response)
|
415 |
-
|
416 |
-
st.experimental_rerun()
|
417 |
-
#st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
418 |
-
|
419 |
-
if __name__ == "__main__":
|
420 |
-
main()
|
421 |
-
|
422 |
-
load_dotenv()
|
423 |
-
st.write(css, unsafe_allow_html=True)
|
424 |
-
|
425 |
-
st.header("Chat with documents :books:")
|
426 |
-
user_question = st.text_input("Ask a question about your documents:")
|
427 |
-
if user_question:
|
428 |
-
process_user_input(user_question)
|
429 |
-
|
430 |
-
with st.sidebar:
|
431 |
-
st.subheader("Your documents")
|
432 |
-
docs = st.file_uploader("import documents", accept_multiple_files=True)
|
433 |
-
with st.spinner("Processing"):
|
434 |
-
raw = pdf2txt(docs)
|
435 |
-
if len(raw) > 0:
|
436 |
-
length = str(len(raw))
|
437 |
-
text_chunks = txt2chunks(raw)
|
438 |
-
vectorstore = vector_store(text_chunks)
|
439 |
-
st.session_state.conversation = get_chain(vectorstore)
|
440 |
-
st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
|
441 |
-
filename = generate_filename(raw, 'txt')
|
442 |
-
create_file(filename, raw, '')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/_base_/default_runtime.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
default_scope = 'mmpose'
|
2 |
-
|
3 |
-
# hooks
|
4 |
-
default_hooks = dict(
|
5 |
-
timer=dict(type='IterTimerHook'),
|
6 |
-
logger=dict(type='LoggerHook', interval=50),
|
7 |
-
param_scheduler=dict(type='ParamSchedulerHook'),
|
8 |
-
checkpoint=dict(type='CheckpointHook', interval=10),
|
9 |
-
sampler_seed=dict(type='DistSamplerSeedHook'),
|
10 |
-
visualization=dict(type='PoseVisualizationHook', enable=False),
|
11 |
-
)
|
12 |
-
|
13 |
-
# custom hooks
|
14 |
-
custom_hooks = [
|
15 |
-
# Synchronize model buffers such as running_mean and running_var in BN
|
16 |
-
# at the end of each epoch
|
17 |
-
dict(type='SyncBuffersHook')
|
18 |
-
]
|
19 |
-
|
20 |
-
# multi-processing backend
|
21 |
-
env_cfg = dict(
|
22 |
-
cudnn_benchmark=False,
|
23 |
-
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
24 |
-
dist_cfg=dict(backend='nccl'),
|
25 |
-
)
|
26 |
-
|
27 |
-
# visualizer
|
28 |
-
vis_backends = [
|
29 |
-
dict(type='LocalVisBackend'),
|
30 |
-
# dict(type='TensorboardVisBackend'),
|
31 |
-
# dict(type='WandbVisBackend'),
|
32 |
-
]
|
33 |
-
visualizer = dict(
|
34 |
-
type='PoseLocalVisualizer', vis_backends=vis_backends, name='visualizer')
|
35 |
-
|
36 |
-
# logger
|
37 |
-
log_processor = dict(
|
38 |
-
type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
|
39 |
-
log_level = 'INFO'
|
40 |
-
load_from = None
|
41 |
-
resume = False
|
42 |
-
|
43 |
-
# file I/O backend
|
44 |
-
backend_args = dict(backend='local')
|
45 |
-
|
46 |
-
# training/validation/testing progress
|
47 |
-
train_cfg = dict(by_epoch=True)
|
48 |
-
val_cfg = dict()
|
49 |
-
test_cfg = dict()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aditya9790/yolo7-object-tracking/app.py
DELETED
@@ -1,293 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
|
4 |
-
import argparse
|
5 |
-
import time
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
import cv2
|
9 |
-
import torch
|
10 |
-
import torch.backends.cudnn as cudnn
|
11 |
-
from numpy import random
|
12 |
-
|
13 |
-
from models.experimental import attempt_load
|
14 |
-
from utils.datasets import LoadStreams, LoadImages
|
15 |
-
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
|
16 |
-
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
|
17 |
-
from utils.plots import plot_one_box
|
18 |
-
from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
|
19 |
-
from PIL import Image
|
20 |
-
|
21 |
-
from sort import *
|
22 |
-
|
23 |
-
from huggingface_hub import hf_hub_download
|
24 |
-
|
25 |
-
def load_model(model_name):
|
26 |
-
model_path = hf_hub_download(repo_id=f"Yolov7/{model_name}", filename=f"{model_name}.pt")
|
27 |
-
|
28 |
-
return model_path
|
29 |
-
|
30 |
-
|
31 |
-
model_names = ["yolov7"]
|
32 |
-
|
33 |
-
models = {model_name: load_model(model_name) for model_name in model_names}
|
34 |
-
|
35 |
-
##################################
|
36 |
-
# """Function to Draw Bounding boxes"""
|
37 |
-
def draw_boxes(img, bbox, identities=None, categories=None, confidences = None, names=None, colors = None):
|
38 |
-
for i, box in enumerate(bbox):
|
39 |
-
x1, y1, x2, y2 = [int(i) for i in box]
|
40 |
-
tl = opt.thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
|
41 |
-
|
42 |
-
cat = int(categories[i]) if categories is not None else 0
|
43 |
-
id = int(identities[i]) if identities is not None else 0
|
44 |
-
# conf = confidences[i] if confidences is not None else 0
|
45 |
-
|
46 |
-
color = colors[cat]
|
47 |
-
|
48 |
-
if not opt.nobbox:
|
49 |
-
cv2.rectangle(img, (x1, y1), (x2, y2), color, tl)
|
50 |
-
|
51 |
-
if not opt.nolabel:
|
52 |
-
label = str(id) + ":"+ names[cat] if identities is not None else f'{names[cat]} {confidences[i]:.2f}'
|
53 |
-
tf = max(tl - 1, 1) # font thickness
|
54 |
-
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
|
55 |
-
c2 = x1 + t_size[0], y1 - t_size[1] - 3
|
56 |
-
cv2.rectangle(img, (x1, y1), c2, color, -1, cv2.LINE_AA) # filled
|
57 |
-
cv2.putText(img, label, (x1, y1 - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
|
58 |
-
|
59 |
-
|
60 |
-
return img
|
61 |
-
##################################
|
62 |
-
|
63 |
-
|
64 |
-
def detect(save_img=True):
|
65 |
-
parser = argparse.ArgumentParser()
|
66 |
-
parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)')
|
67 |
-
parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
|
68 |
-
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
|
69 |
-
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
|
70 |
-
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
|
71 |
-
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
72 |
-
parser.add_argument('--view-img', action='store_true', help='display results')
|
73 |
-
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
74 |
-
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
75 |
-
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
76 |
-
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
|
77 |
-
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
78 |
-
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
79 |
-
parser.add_argument('--update', action='store_true', help='update all models')
|
80 |
-
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
|
81 |
-
parser.add_argument('--name', default='exp', help='save results to project/name')
|
82 |
-
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
83 |
-
parser.add_argument('--no-trace', action='store_true', help='don`t trace model')
|
84 |
-
|
85 |
-
parser.add_argument('--track', action='store_true', help='run tracking')
|
86 |
-
parser.add_argument('--show-track', action='store_true', help='show tracked path')
|
87 |
-
parser.add_argument('--show-fps', action='store_true', help='show fps')
|
88 |
-
parser.add_argument('--thickness', type=int, default=2, help='bounding box and font size thickness')
|
89 |
-
parser.add_argument('--seed', type=int, default=1, help='random seed to control bbox colors')
|
90 |
-
parser.add_argument('--nobbox', action='store_true', help='don`t show bounding box')
|
91 |
-
parser.add_argument('--nolabel', action='store_true', help='don`t show label')
|
92 |
-
parser.add_argument('--unique-track-color', action='store_true', help='show each track in unique color')
|
93 |
-
|
94 |
-
opt = parser.parse_args()
|
95 |
-
np.random.seed(opt.seed)
|
96 |
-
|
97 |
-
sort_tracker = Sort(max_age=5,
|
98 |
-
min_hits=2,
|
99 |
-
iou_threshold=0.2)
|
100 |
-
|
101 |
-
source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
|
102 |
-
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
|
103 |
-
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
|
104 |
-
('rtsp://', 'rtmp://', 'http://', 'https://'))
|
105 |
-
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
|
106 |
-
if not opt.nosave:
|
107 |
-
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
108 |
-
|
109 |
-
# Initialize
|
110 |
-
set_logging()
|
111 |
-
device = select_device(opt.device)
|
112 |
-
half = device.type != 'cpu' # half precision only supported on CUDA
|
113 |
-
|
114 |
-
# Load model
|
115 |
-
model = attempt_load(weights, map_location=device) # load FP32 model
|
116 |
-
stride = int(model.stride.max()) # model stride
|
117 |
-
imgsz = check_img_size(imgsz, s=stride) # check img_size
|
118 |
-
|
119 |
-
if trace:
|
120 |
-
model = TracedModel(model, device, opt.img_size)
|
121 |
-
|
122 |
-
if half:
|
123 |
-
model.half() # to FP16
|
124 |
-
|
125 |
-
# Second-stage classifier
|
126 |
-
classify = False
|
127 |
-
if classify:
|
128 |
-
modelc = load_classifier(name='resnet101', n=2) # initialize
|
129 |
-
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
|
130 |
-
|
131 |
-
# Set Dataloader
|
132 |
-
vid_path, vid_writer = None, None
|
133 |
-
if webcam:
|
134 |
-
view_img = check_imshow()
|
135 |
-
cudnn.benchmark = True # set True to speed up constant image size inference
|
136 |
-
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
|
137 |
-
else:
|
138 |
-
dataset = LoadImages(source, img_size=imgsz, stride=stride)
|
139 |
-
|
140 |
-
# Get names and colors
|
141 |
-
names = model.module.names if hasattr(model, 'module') else model.names
|
142 |
-
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
|
143 |
-
|
144 |
-
# Run inference
|
145 |
-
if device.type != 'cpu':
|
146 |
-
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
|
147 |
-
old_img_w = old_img_h = imgsz
|
148 |
-
old_img_b = 1
|
149 |
-
|
150 |
-
t0 = time.time()
|
151 |
-
###################################
|
152 |
-
startTime = 0
|
153 |
-
###################################
|
154 |
-
for path, img, im0s, vid_cap in dataset:
|
155 |
-
img = torch.from_numpy(img).to(device)
|
156 |
-
img = img.half() if half else img.float() # uint8 to fp16/32
|
157 |
-
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
158 |
-
if img.ndimension() == 3:
|
159 |
-
img = img.unsqueeze(0)
|
160 |
-
|
161 |
-
# Warmup
|
162 |
-
if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
|
163 |
-
old_img_b = img.shape[0]
|
164 |
-
old_img_h = img.shape[2]
|
165 |
-
old_img_w = img.shape[3]
|
166 |
-
for i in range(3):
|
167 |
-
model(img, augment=opt.augment)[0]
|
168 |
-
|
169 |
-
# Inference
|
170 |
-
t1 = time_synchronized()
|
171 |
-
pred = model(img, augment=opt.augment)[0]
|
172 |
-
t2 = time_synchronized()
|
173 |
-
|
174 |
-
# Apply NMS
|
175 |
-
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
|
176 |
-
t3 = time_synchronized()
|
177 |
-
|
178 |
-
# Apply Classifier
|
179 |
-
if classify:
|
180 |
-
pred = apply_classifier(pred, modelc, img, im0s)
|
181 |
-
|
182 |
-
# Process detections
|
183 |
-
for i, det in enumerate(pred): # detections per image
|
184 |
-
if webcam: # batch_size >= 1
|
185 |
-
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
|
186 |
-
else:
|
187 |
-
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
|
188 |
-
|
189 |
-
p = Path(p) # to Path
|
190 |
-
save_path = str(save_dir / p.name) # img.jpg
|
191 |
-
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
|
192 |
-
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
193 |
-
if len(det):
|
194 |
-
# Rescale boxes from img_size to im0 size
|
195 |
-
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
|
196 |
-
|
197 |
-
# Print results
|
198 |
-
for c in det[:, -1].unique():
|
199 |
-
n = (det[:, -1] == c).sum() # detections per class
|
200 |
-
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
201 |
-
|
202 |
-
dets_to_sort = np.empty((0,6))
|
203 |
-
# NOTE: We send in detected object class too
|
204 |
-
for x1,y1,x2,y2,conf,detclass in det.cpu().detach().numpy():
|
205 |
-
dets_to_sort = np.vstack((dets_to_sort,
|
206 |
-
np.array([x1, y1, x2, y2, conf, detclass])))
|
207 |
-
|
208 |
-
|
209 |
-
if opt.track:
|
210 |
-
|
211 |
-
tracked_dets = sort_tracker.update(dets_to_sort, opt.unique_track_color)
|
212 |
-
tracks =sort_tracker.getTrackers()
|
213 |
-
|
214 |
-
# draw boxes for visualization
|
215 |
-
if len(tracked_dets)>0:
|
216 |
-
bbox_xyxy = tracked_dets[:,:4]
|
217 |
-
identities = tracked_dets[:, 8]
|
218 |
-
categories = tracked_dets[:, 4]
|
219 |
-
confidences = None
|
220 |
-
|
221 |
-
if opt.show_track:
|
222 |
-
#loop over tracks
|
223 |
-
for t, track in enumerate(tracks):
|
224 |
-
|
225 |
-
track_color = colors[int(track.detclass)] if not opt.unique_track_color else sort_tracker.color_list[t]
|
226 |
-
|
227 |
-
[cv2.line(im0, (int(track.centroidarr[i][0]),
|
228 |
-
int(track.centroidarr[i][1])),
|
229 |
-
(int(track.centroidarr[i+1][0]),
|
230 |
-
int(track.centroidarr[i+1][1])),
|
231 |
-
track_color, thickness=opt.thickness)
|
232 |
-
for i,_ in enumerate(track.centroidarr)
|
233 |
-
if i < len(track.centroidarr)-1 ]
|
234 |
-
else:
|
235 |
-
bbox_xyxy = dets_to_sort[:,:4]
|
236 |
-
identities = None
|
237 |
-
categories = dets_to_sort[:, 5]
|
238 |
-
confidences = dets_to_sort[:, 4]
|
239 |
-
|
240 |
-
im0 = draw_boxes(im0, bbox_xyxy, identities, categories, confidences, names, colors)
|
241 |
-
|
242 |
-
# Print time (inference + NMS)
|
243 |
-
print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
|
244 |
-
|
245 |
-
# Stream results
|
246 |
-
######################################################
|
247 |
-
if dataset.mode != 'image' and opt.show_fps:
|
248 |
-
currentTime = time.time()
|
249 |
-
|
250 |
-
fps = 1/(currentTime - startTime)
|
251 |
-
startTime = currentTime
|
252 |
-
cv2.putText(im0, "FPS: " + str(int(fps)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0),2)
|
253 |
-
|
254 |
-
#######################################################
|
255 |
-
if view_img:
|
256 |
-
cv2.imshow(str(p), im0)
|
257 |
-
cv2.waitKey(1) # 1 millisecond
|
258 |
-
|
259 |
-
# Save results (image with detections)
|
260 |
-
if save_img:
|
261 |
-
if dataset.mode == 'image':
|
262 |
-
cv2.imwrite(save_path, im0)
|
263 |
-
print(f" The image with the result is saved in: {save_path}")
|
264 |
-
else: # 'video' or 'stream'
|
265 |
-
if vid_path != save_path: # new video
|
266 |
-
vid_path = save_path
|
267 |
-
if isinstance(vid_writer, cv2.VideoWriter):
|
268 |
-
vid_writer.release() # release previous video writer
|
269 |
-
if vid_cap: # video
|
270 |
-
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
271 |
-
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
272 |
-
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
273 |
-
else: # stream
|
274 |
-
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
275 |
-
save_path += '.mp4'
|
276 |
-
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
277 |
-
vid_writer.write(im0)
|
278 |
-
|
279 |
-
if save_txt or save_img:
|
280 |
-
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
281 |
-
#print(f"Results saved to {save_dir}{s}")
|
282 |
-
|
283 |
-
print(f'Done. ({time.time() - t0:.3f}s)')
|
284 |
-
return img
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
desc = "demo for <a href='https://github.com/WongKinYiu/yolov7' style='text-decoration: underline' target='_blank'>WongKinYiu/yolov7</a> Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors"
|
289 |
-
gr.Interface(detect,
|
290 |
-
inputs = [gr.Video(format="mp4")],
|
291 |
-
outputs = gr.Video(format="mp4"),
|
292 |
-
title="Yolov7",description=desc).launch()
|
293 |
-
# gr.Interface(detect,[gr.Image(type="pil"),gr.Dropdown(choices=model_names)], gr.Image(type="pil"),title="Yolov7",examples=[["horses.jpeg", "yolov7"]],description="demo for <a href='https://github.com/WongKinYiu/yolov7' style='text-decoration: underline' target='_blank'>WongKinYiu/yolov7</a> Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aer0xander/sd-to-diffusers/utils.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
def is_google_colab():
|
2 |
-
try:
|
3 |
-
import google.colab
|
4 |
-
return True
|
5 |
-
except:
|
6 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/conv2d_gradfix.py
DELETED
@@ -1,225 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Custom replacement for `torch.nn.functional.conv2d` that supports
|
10 |
-
arbitrarily high order gradients with zero performance penalty."""
|
11 |
-
|
12 |
-
import contextlib
|
13 |
-
import torch
|
14 |
-
|
15 |
-
# pylint: disable=redefined-builtin
|
16 |
-
# pylint: disable=arguments-differ
|
17 |
-
# pylint: disable=protected-access
|
18 |
-
|
19 |
-
# ----------------------------------------------------------------------------
|
20 |
-
|
21 |
-
# Enable the custom op by setting this to true.
|
22 |
-
enabled = False
|
23 |
-
# Forcefully disable computation of gradients with respect to the weights.
|
24 |
-
weight_gradients_disabled = False
|
25 |
-
|
26 |
-
|
27 |
-
@contextlib.contextmanager
|
28 |
-
def no_weight_gradients(disable=True):
|
29 |
-
global weight_gradients_disabled
|
30 |
-
old = weight_gradients_disabled
|
31 |
-
if disable:
|
32 |
-
weight_gradients_disabled = True
|
33 |
-
yield
|
34 |
-
weight_gradients_disabled = old
|
35 |
-
|
36 |
-
# ----------------------------------------------------------------------------
|
37 |
-
|
38 |
-
|
39 |
-
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
40 |
-
if _should_use_custom_op(input):
|
41 |
-
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
|
42 |
-
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
|
43 |
-
|
44 |
-
|
45 |
-
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
|
46 |
-
if _should_use_custom_op(input):
|
47 |
-
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
|
48 |
-
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
|
49 |
-
|
50 |
-
# ----------------------------------------------------------------------------
|
51 |
-
|
52 |
-
|
53 |
-
def _should_use_custom_op(input):
|
54 |
-
assert isinstance(input, torch.Tensor)
|
55 |
-
if (not enabled) or (not torch.backends.cudnn.enabled):
|
56 |
-
return False
|
57 |
-
if input.device.type != 'cuda':
|
58 |
-
return False
|
59 |
-
return True
|
60 |
-
|
61 |
-
|
62 |
-
def _tuple_of_ints(xs, ndim):
|
63 |
-
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
|
64 |
-
assert len(xs) == ndim
|
65 |
-
assert all(isinstance(x, int) for x in xs)
|
66 |
-
return xs
|
67 |
-
|
68 |
-
# ----------------------------------------------------------------------------
|
69 |
-
|
70 |
-
|
71 |
-
_conv2d_gradfix_cache = dict()
|
72 |
-
_null_tensor = torch.empty([0])
|
73 |
-
|
74 |
-
|
75 |
-
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
|
76 |
-
# Parse arguments.
|
77 |
-
ndim = 2
|
78 |
-
weight_shape = tuple(weight_shape)
|
79 |
-
stride = _tuple_of_ints(stride, ndim)
|
80 |
-
padding = _tuple_of_ints(padding, ndim)
|
81 |
-
output_padding = _tuple_of_ints(output_padding, ndim)
|
82 |
-
dilation = _tuple_of_ints(dilation, ndim)
|
83 |
-
|
84 |
-
# Lookup from cache.
|
85 |
-
key = (transpose, weight_shape, stride, padding,
|
86 |
-
output_padding, dilation, groups)
|
87 |
-
if key in _conv2d_gradfix_cache:
|
88 |
-
return _conv2d_gradfix_cache[key]
|
89 |
-
|
90 |
-
# Validate arguments.
|
91 |
-
assert groups >= 1
|
92 |
-
assert len(weight_shape) == ndim + 2
|
93 |
-
assert all(stride[i] >= 1 for i in range(ndim))
|
94 |
-
assert all(padding[i] >= 0 for i in range(ndim))
|
95 |
-
assert all(dilation[i] >= 0 for i in range(ndim))
|
96 |
-
if not transpose:
|
97 |
-
assert all(output_padding[i] == 0 for i in range(ndim))
|
98 |
-
else: # transpose
|
99 |
-
assert all(0 <= output_padding[i] < max(
|
100 |
-
stride[i], dilation[i]) for i in range(ndim))
|
101 |
-
|
102 |
-
# Helpers.
|
103 |
-
common_kwargs = dict(stride=stride, padding=padding,
|
104 |
-
dilation=dilation, groups=groups)
|
105 |
-
|
106 |
-
def calc_output_padding(input_shape, output_shape):
|
107 |
-
if transpose:
|
108 |
-
return [0, 0]
|
109 |
-
return [
|
110 |
-
input_shape[i + 2]
|
111 |
-
- (output_shape[i + 2] - 1) * stride[i]
|
112 |
-
- (1 - 2 * padding[i])
|
113 |
-
- dilation[i] * (weight_shape[i + 2] - 1)
|
114 |
-
for i in range(ndim)
|
115 |
-
]
|
116 |
-
|
117 |
-
# Forward & backward.
|
118 |
-
class Conv2d(torch.autograd.Function):
|
119 |
-
@staticmethod
|
120 |
-
def forward(ctx, input, weight, bias):
|
121 |
-
assert weight.shape == weight_shape
|
122 |
-
ctx.save_for_backward(
|
123 |
-
input if weight.requires_grad else _null_tensor,
|
124 |
-
weight if input.requires_grad else _null_tensor,
|
125 |
-
)
|
126 |
-
ctx.input_shape = input.shape
|
127 |
-
|
128 |
-
# Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere).
|
129 |
-
if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0):
|
130 |
-
a = weight.reshape(
|
131 |
-
groups, weight_shape[0] // groups, weight_shape[1])
|
132 |
-
b = input.reshape(
|
133 |
-
input.shape[0], groups, input.shape[1] // groups, -1)
|
134 |
-
c = (a.transpose(1, 2) if transpose else a) @ b.permute(1,
|
135 |
-
2, 0, 3).flatten(2)
|
136 |
-
c = c.reshape(-1, input.shape[0],
|
137 |
-
*input.shape[2:]).transpose(0, 1)
|
138 |
-
c = c if bias is None else c + \
|
139 |
-
bias.unsqueeze(0).unsqueeze(2).unsqueeze(3)
|
140 |
-
return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format))
|
141 |
-
|
142 |
-
# General case => cuDNN.
|
143 |
-
if transpose:
|
144 |
-
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
|
145 |
-
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
|
146 |
-
|
147 |
-
@staticmethod
|
148 |
-
def backward(ctx, grad_output):
|
149 |
-
input, weight = ctx.saved_tensors
|
150 |
-
input_shape = ctx.input_shape
|
151 |
-
grad_input = None
|
152 |
-
grad_weight = None
|
153 |
-
grad_bias = None
|
154 |
-
|
155 |
-
if ctx.needs_input_grad[0]:
|
156 |
-
p = calc_output_padding(
|
157 |
-
input_shape=input_shape, output_shape=grad_output.shape)
|
158 |
-
op = _conv2d_gradfix(transpose=(
|
159 |
-
not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs)
|
160 |
-
grad_input = op.apply(grad_output, weight, None)
|
161 |
-
assert grad_input.shape == input_shape
|
162 |
-
|
163 |
-
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
|
164 |
-
grad_weight = Conv2dGradWeight.apply(grad_output, input)
|
165 |
-
assert grad_weight.shape == weight_shape
|
166 |
-
|
167 |
-
if ctx.needs_input_grad[2]:
|
168 |
-
grad_bias = grad_output.sum([0, 2, 3])
|
169 |
-
|
170 |
-
return grad_input, grad_weight, grad_bias
|
171 |
-
|
172 |
-
# Gradient with respect to the weights.
|
173 |
-
class Conv2dGradWeight(torch.autograd.Function):
|
174 |
-
@staticmethod
|
175 |
-
def forward(ctx, grad_output, input):
|
176 |
-
ctx.save_for_backward(
|
177 |
-
grad_output if input.requires_grad else _null_tensor,
|
178 |
-
input if grad_output.requires_grad else _null_tensor,
|
179 |
-
)
|
180 |
-
ctx.grad_output_shape = grad_output.shape
|
181 |
-
ctx.input_shape = input.shape
|
182 |
-
|
183 |
-
# Simple 1x1 convolution => cuBLAS (on both Volta and Ampere).
|
184 |
-
if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0):
|
185 |
-
a = grad_output.reshape(
|
186 |
-
grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2)
|
187 |
-
b = input.reshape(
|
188 |
-
input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2)
|
189 |
-
c = (b @ a.transpose(1, 2) if transpose else a @
|
190 |
-
b.transpose(1, 2)).reshape(weight_shape)
|
191 |
-
return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format))
|
192 |
-
|
193 |
-
# General case => cuDNN.
|
194 |
-
name = 'aten::cudnn_convolution_transpose_backward_weight' if transpose else 'aten::cudnn_convolution_backward_weight'
|
195 |
-
flags = [torch.backends.cudnn.benchmark,
|
196 |
-
torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
|
197 |
-
return torch._C._jit_get_operation(name)(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
|
198 |
-
|
199 |
-
@staticmethod
|
200 |
-
def backward(ctx, grad2_grad_weight):
|
201 |
-
grad_output, input = ctx.saved_tensors
|
202 |
-
grad_output_shape = ctx.grad_output_shape
|
203 |
-
input_shape = ctx.input_shape
|
204 |
-
grad2_grad_output = None
|
205 |
-
grad2_input = None
|
206 |
-
|
207 |
-
if ctx.needs_input_grad[0]:
|
208 |
-
grad2_grad_output = Conv2d.apply(
|
209 |
-
input, grad2_grad_weight, None)
|
210 |
-
assert grad2_grad_output.shape == grad_output_shape
|
211 |
-
|
212 |
-
if ctx.needs_input_grad[1]:
|
213 |
-
p = calc_output_padding(
|
214 |
-
input_shape=input_shape, output_shape=grad_output_shape)
|
215 |
-
op = _conv2d_gradfix(transpose=(
|
216 |
-
not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs)
|
217 |
-
grad2_input = op.apply(grad_output, grad2_grad_weight, None)
|
218 |
-
assert grad2_input.shape == input_shape
|
219 |
-
|
220 |
-
return grad2_grad_output, grad2_input
|
221 |
-
|
222 |
-
_conv2d_gradfix_cache[key] = Conv2d
|
223 |
-
return Conv2d
|
224 |
-
|
225 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/viz/drag_widget.py
DELETED
@@ -1,173 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
import imgui
|
5 |
-
import dnnlib
|
6 |
-
from gui_utils import imgui_utils
|
7 |
-
|
8 |
-
# ----------------------------------------------------------------------------
|
9 |
-
|
10 |
-
|
11 |
-
class DragWidget:
|
12 |
-
def __init__(self, viz):
|
13 |
-
self.viz = viz
|
14 |
-
self.point = [-1, -1]
|
15 |
-
self.points = []
|
16 |
-
self.targets = []
|
17 |
-
self.is_point = True
|
18 |
-
self.last_click = False
|
19 |
-
self.is_drag = False
|
20 |
-
self.iteration = 0
|
21 |
-
self.mode = 'point'
|
22 |
-
self.r_mask = 50
|
23 |
-
self.show_mask = False
|
24 |
-
self.mask = torch.ones(256, 256)
|
25 |
-
self.lambda_mask = 20
|
26 |
-
self.feature_idx = 5
|
27 |
-
self.r1 = 3
|
28 |
-
self.r2 = 12
|
29 |
-
self.path = os.path.abspath(os.path.join(
|
30 |
-
os.path.dirname(__file__), '..', '_screenshots'))
|
31 |
-
self.defer_frames = 0
|
32 |
-
self.disabled_time = 0
|
33 |
-
|
34 |
-
def action(self, click, down, x, y):
|
35 |
-
if self.mode == 'point':
|
36 |
-
self.add_point(click, x, y)
|
37 |
-
elif down:
|
38 |
-
self.draw_mask(x, y)
|
39 |
-
|
40 |
-
def add_point(self, click, x, y):
|
41 |
-
if click:
|
42 |
-
self.point = [y, x]
|
43 |
-
elif self.last_click:
|
44 |
-
if self.is_drag:
|
45 |
-
self.stop_drag()
|
46 |
-
if self.is_point:
|
47 |
-
self.points.append(self.point)
|
48 |
-
self.is_point = False
|
49 |
-
else:
|
50 |
-
self.targets.append(self.point)
|
51 |
-
self.is_point = True
|
52 |
-
self.last_click = click
|
53 |
-
|
54 |
-
def init_mask(self, w, h):
|
55 |
-
self.width, self.height = w, h
|
56 |
-
self.mask = torch.ones(h, w)
|
57 |
-
|
58 |
-
def draw_mask(self, x, y):
|
59 |
-
X = torch.linspace(0, self.width, self.width)
|
60 |
-
Y = torch.linspace(0, self.height, self.height)
|
61 |
-
yy, xx = torch.meshgrid(Y, X)
|
62 |
-
circle = (xx - x)**2 + (yy - y)**2 < self.r_mask**2
|
63 |
-
if self.mode == 'flexible':
|
64 |
-
self.mask[circle] = 0
|
65 |
-
elif self.mode == 'fixed':
|
66 |
-
self.mask[circle] = 1
|
67 |
-
|
68 |
-
def stop_drag(self):
|
69 |
-
self.is_drag = False
|
70 |
-
self.iteration = 0
|
71 |
-
|
72 |
-
def set_points(self, points):
|
73 |
-
self.points = points
|
74 |
-
|
75 |
-
def reset_point(self):
|
76 |
-
self.points = []
|
77 |
-
self.targets = []
|
78 |
-
self.is_point = True
|
79 |
-
|
80 |
-
def load_points(self, suffix):
|
81 |
-
points = []
|
82 |
-
point_path = self.path + f'_{suffix}.txt'
|
83 |
-
try:
|
84 |
-
with open(point_path, "r") as f:
|
85 |
-
for line in f.readlines():
|
86 |
-
y, x = line.split()
|
87 |
-
points.append([int(y), int(x)])
|
88 |
-
except:
|
89 |
-
print(f'Wrong point file path: {point_path}')
|
90 |
-
return points
|
91 |
-
|
92 |
-
@imgui_utils.scoped_by_object_id
|
93 |
-
def __call__(self, show=True):
|
94 |
-
viz = self.viz
|
95 |
-
reset = False
|
96 |
-
if show:
|
97 |
-
with imgui_utils.grayed_out(self.disabled_time != 0):
|
98 |
-
imgui.text('Drag')
|
99 |
-
imgui.same_line(viz.label_w)
|
100 |
-
|
101 |
-
if imgui_utils.button('Add point', width=viz.button_w, enabled='image' in viz.result):
|
102 |
-
self.mode = 'point'
|
103 |
-
|
104 |
-
imgui.same_line()
|
105 |
-
reset = False
|
106 |
-
if imgui_utils.button('Reset point', width=viz.button_w, enabled='image' in viz.result):
|
107 |
-
self.reset_point()
|
108 |
-
reset = True
|
109 |
-
|
110 |
-
imgui.text(' ')
|
111 |
-
imgui.same_line(viz.label_w)
|
112 |
-
if imgui_utils.button('Start', width=viz.button_w, enabled='image' in viz.result):
|
113 |
-
self.is_drag = True
|
114 |
-
if len(self.points) > len(self.targets):
|
115 |
-
self.points = self.points[:len(self.targets)]
|
116 |
-
|
117 |
-
imgui.same_line()
|
118 |
-
if imgui_utils.button('Stop', width=viz.button_w, enabled='image' in viz.result):
|
119 |
-
self.stop_drag()
|
120 |
-
|
121 |
-
imgui.text(' ')
|
122 |
-
imgui.same_line(viz.label_w)
|
123 |
-
imgui.text(f'Steps: {self.iteration}')
|
124 |
-
|
125 |
-
imgui.text('Mask')
|
126 |
-
imgui.same_line(viz.label_w)
|
127 |
-
if imgui_utils.button('Flexible area', width=viz.button_w, enabled='image' in viz.result):
|
128 |
-
self.mode = 'flexible'
|
129 |
-
self.show_mask = True
|
130 |
-
|
131 |
-
imgui.same_line()
|
132 |
-
if imgui_utils.button('Fixed area', width=viz.button_w, enabled='image' in viz.result):
|
133 |
-
self.mode = 'fixed'
|
134 |
-
self.show_mask = True
|
135 |
-
|
136 |
-
imgui.text(' ')
|
137 |
-
imgui.same_line(viz.label_w)
|
138 |
-
if imgui_utils.button('Reset mask', width=viz.button_w, enabled='image' in viz.result):
|
139 |
-
self.mask = torch.ones(self.height, self.width)
|
140 |
-
imgui.same_line()
|
141 |
-
_clicked, self.show_mask = imgui.checkbox(
|
142 |
-
'Show mask', self.show_mask)
|
143 |
-
|
144 |
-
imgui.text(' ')
|
145 |
-
imgui.same_line(viz.label_w)
|
146 |
-
with imgui_utils.item_width(viz.font_size * 6):
|
147 |
-
changed, self.r_mask = imgui.input_int(
|
148 |
-
'Radius', self.r_mask)
|
149 |
-
|
150 |
-
imgui.text(' ')
|
151 |
-
imgui.same_line(viz.label_w)
|
152 |
-
with imgui_utils.item_width(viz.font_size * 6):
|
153 |
-
changed, self.lambda_mask = imgui.input_int(
|
154 |
-
'Lambda', self.lambda_mask)
|
155 |
-
|
156 |
-
self.disabled_time = max(self.disabled_time - viz.frame_delta, 0)
|
157 |
-
if self.defer_frames > 0:
|
158 |
-
self.defer_frames -= 1
|
159 |
-
viz.args.is_drag = self.is_drag
|
160 |
-
if self.is_drag:
|
161 |
-
self.iteration += 1
|
162 |
-
viz.args.iteration = self.iteration
|
163 |
-
viz.args.points = [point for point in self.points]
|
164 |
-
viz.args.targets = [point for point in self.targets]
|
165 |
-
viz.args.mask = self.mask
|
166 |
-
viz.args.lambda_mask = self.lambda_mask
|
167 |
-
viz.args.feature_idx = self.feature_idx
|
168 |
-
viz.args.r1 = self.r1
|
169 |
-
viz.args.r2 = self.r2
|
170 |
-
viz.args.reset = reset
|
171 |
-
|
172 |
-
|
173 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/portfolio/index.html
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<title>Welcome to 1littlecoder</title>
|
5 |
-
<link href="https://fonts.googleapis.com/css2?family=Bellota&display=swap" rel="stylesheet">
|
6 |
-
<link href="style.css" rel="stylesheet" type="text/css">
|
7 |
-
</head>
|
8 |
-
<body>
|
9 |
-
<div id="header" class="section">
|
10 |
-
<img alt="logo" class="img-circle" src="https://w7.pngwing.com/pngs/670/845/png-transparent-batman-lego-action-figure-illustration-lego-batman-3-beyond-gotham-lego-batman-the-videogame-lego-dimensions-lego-batman-2-dc-super-heroes-games-heroes-fictional-character-film.png">
|
11 |
-
<p>Welcome to 1littlecoder</p>
|
12 |
-
</div>
|
13 |
-
<div class="section">
|
14 |
-
<h1><span>About Me</span></h1>
|
15 |
-
<p> Hey! I'm <strong>1littlecoder</strong> from <strong>India.</strong>. I Like <strong>Coding</strong> R Python Data Science Machine Learning</p>
|
16 |
-
<p class="quote">~ 1littlecoder</p>
|
17 |
-
</div>
|
18 |
-
<div class="section" id="res">
|
19 |
-
<h1><span>My Works</span></h1>
|
20 |
-
<p align="centre"><strong>Here Are Some Of My Works</strong></p>
|
21 |
-
<a href="https://telegram.me">
|
22 |
-
<img src="https://img.icons8.com/nolan/144/telegram-app.png"/>
|
23 |
-
<div class="caption">Telegram Channel</div>
|
24 |
-
</a>
|
25 |
-
<a href="https://github.com/amrrs">
|
26 |
-
<img src="https://img.icons8.com/nolan/144/github.png"/>
|
27 |
-
<div class="caption">Github Account</div>
|
28 |
-
</a>
|
29 |
-
<a href="https://1littlecoder.in">
|
30 |
-
<img src="https://img.icons8.com/dusk/144/000000/domain.png"/>
|
31 |
-
<div class="caption">My Website</div>
|
32 |
-
</a>
|
33 |
-
<br>
|
34 |
-
<p align="centre"><strong>Resources I Use</strong></p>
|
35 |
-
<a href="https://github.com/">
|
36 |
-
<img src="https://img.icons8.com/nolan/144/github.png"/>
|
37 |
-
<div class="caption">Github</div>
|
38 |
-
</a>
|
39 |
-
<a href="https://telegram.me">
|
40 |
-
<img src="https://img.icons8.com/nolan/144/telegram-app.png"/>
|
41 |
-
<div class="caption">Telegram</div>
|
42 |
-
</a>
|
43 |
-
<a href="https://code.visualstudio.com">
|
44 |
-
<img src="https://img.icons8.com/nolan/144/code.png"/>
|
45 |
-
<div class="caption">VS Code Editor</div>
|
46 |
-
</a>
|
47 |
-
<a href="https://python.org">
|
48 |
-
<img src="https://img.icons8.com/nolan/144/python.png"/>
|
49 |
-
<div class="caption">Python</div>
|
50 |
-
</a>
|
51 |
-
<a href="https://www.php.net/">
|
52 |
-
<img src="https://img.icons8.com/dusk/144/000000/php-logo.png"/>
|
53 |
-
<div class="caption">PHP</div>
|
54 |
-
</a>
|
55 |
-
<a href="https://ubuntu.com">
|
56 |
-
<img src="https://img.icons8.com/color/144/000000/ubuntu--v1.png"/>
|
57 |
-
<div class="caption">Ubuntu</div>
|
58 |
-
</a>
|
59 |
-
</div>
|
60 |
-
<div class="section">
|
61 |
-
<h1><span>My Skills</span></h1>
|
62 |
-
<ul>
|
63 |
-
<li>Python<br /> <progress min="0" max="100" value="95"></progress> </li>
|
64 |
-
<li>PHP <br /> <progress min="0" max="100" value="75"></progress> </li>
|
65 |
-
<li>Coding<br /> <progress min="0" max="100" value="100"></progress> </li>
|
66 |
-
</ul>
|
67 |
-
</div>
|
68 |
-
<div class="section" id="contacts">
|
69 |
-
<h1><span>Follow Me</span></h1>
|
70 |
-
<div>
|
71 |
-
<a href="https://instagram.com/" target="_blank">
|
72 |
-
<img alt="Instagram" src="https://img.icons8.com/cute-clipart/100/instagram-new.png"/>
|
73 |
-
</a>
|
74 |
-
<a href="https://twitter.com/1littlecoder">
|
75 |
-
<img alt="Twitter" src="https://www.sololearn.com/Uploads/icons/twitter.png" />
|
76 |
-
</a>
|
77 |
-
<a href="https://github.com/amrrs">
|
78 |
-
<img alt="GitHub" src="https://img.icons8.com/nolan/144/github.png"/>
|
79 |
-
</a>
|
80 |
-
<a href="https://t.me/">
|
81 |
-
<img alt="Telegram" src="https://img.icons8.com/fluent/96/000000/telegram-app.png"/>
|
82 |
-
</a>
|
83 |
-
<a href="https://www.youtube.com/channel/UCRD6WpNNzJpRIU4z89PNSbg">
|
84 |
-
<img alt="YouTube" src="https://img.icons8.com/color/96/000000/youtube-play.png"/>
|
85 |
-
</a>
|
86 |
-
<a href="mailto:[email protected]">
|
87 |
-
<img alt="Email" src="https://img.icons8.com/fluent/96/000000/gmail.png"/>
|
88 |
-
</a>
|
89 |
-
</div>
|
90 |
-
</div>
|
91 |
-
<div class="section" id="contacts">
|
92 |
-
<h1><span>Contact Us</span></h1>
|
93 |
-
<a href="mailto:[email protected]">
|
94 |
-
<img src="https://img.icons8.com/fluent/95/000000/gmail--v2.png"/>
|
95 |
-
</a>
|
96 |
-
</div>
|
97 |
-
<center>Made with ❤️ By <a href="https://github.com/amrrs">
|
98 |
-
1littlecoder
|
99 |
-
</a></center>
|
100 |
-
|
101 |
-
<script type="text/javascript">
|
102 |
-
function search() {
|
103 |
-
window.open('https://www.google.com/search?output=search&q=' + document.getElementById("question").value)
|
104 |
-
}
|
105 |
-
</script>
|
106 |
-
</body>
|
107 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/deis.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# DEIS
|
14 |
-
|
15 |
-
Fast Sampling of Diffusion Models with Exponential Integrator.
|
16 |
-
|
17 |
-
## Overview
|
18 |
-
|
19 |
-
Original paper can be found [here](https://arxiv.org/abs/2204.13902). The original implementation can be found [here](https://github.com/qsh-zh/deis).
|
20 |
-
|
21 |
-
## DEISMultistepScheduler
|
22 |
-
[[autodoc]] DEISMultistepScheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/euler.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Euler scheduler
|
14 |
-
|
15 |
-
## Overview
|
16 |
-
|
17 |
-
Euler scheduler (Algorithm 2) from the paper [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) by Karras et al. (2022). Based on the original [k-diffusion](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L51) implementation by Katherine Crowson.
|
18 |
-
Fast scheduler which often times generates good outputs with 20-30 steps.
|
19 |
-
|
20 |
-
## EulerDiscreteScheduler
|
21 |
-
[[autodoc]] EulerDiscreteScheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/attention_flax.py
DELETED
@@ -1,446 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import functools
|
16 |
-
import math
|
17 |
-
|
18 |
-
import flax.linen as nn
|
19 |
-
import jax
|
20 |
-
import jax.numpy as jnp
|
21 |
-
|
22 |
-
|
23 |
-
def _query_chunk_attention(query, key, value, precision, key_chunk_size: int = 4096):
|
24 |
-
"""Multi-head dot product attention with a limited number of queries."""
|
25 |
-
num_kv, num_heads, k_features = key.shape[-3:]
|
26 |
-
v_features = value.shape[-1]
|
27 |
-
key_chunk_size = min(key_chunk_size, num_kv)
|
28 |
-
query = query / jnp.sqrt(k_features)
|
29 |
-
|
30 |
-
@functools.partial(jax.checkpoint, prevent_cse=False)
|
31 |
-
def summarize_chunk(query, key, value):
|
32 |
-
attn_weights = jnp.einsum("...qhd,...khd->...qhk", query, key, precision=precision)
|
33 |
-
|
34 |
-
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
|
35 |
-
max_score = jax.lax.stop_gradient(max_score)
|
36 |
-
exp_weights = jnp.exp(attn_weights - max_score)
|
37 |
-
|
38 |
-
exp_values = jnp.einsum("...vhf,...qhv->...qhf", value, exp_weights, precision=precision)
|
39 |
-
max_score = jnp.einsum("...qhk->...qh", max_score)
|
40 |
-
|
41 |
-
return (exp_values, exp_weights.sum(axis=-1), max_score)
|
42 |
-
|
43 |
-
def chunk_scanner(chunk_idx):
|
44 |
-
# julienne key array
|
45 |
-
key_chunk = jax.lax.dynamic_slice(
|
46 |
-
operand=key,
|
47 |
-
start_indices=[0] * (key.ndim - 3) + [chunk_idx, 0, 0], # [...,k,h,d]
|
48 |
-
slice_sizes=list(key.shape[:-3]) + [key_chunk_size, num_heads, k_features], # [...,k,h,d]
|
49 |
-
)
|
50 |
-
|
51 |
-
# julienne value array
|
52 |
-
value_chunk = jax.lax.dynamic_slice(
|
53 |
-
operand=value,
|
54 |
-
start_indices=[0] * (value.ndim - 3) + [chunk_idx, 0, 0], # [...,v,h,d]
|
55 |
-
slice_sizes=list(value.shape[:-3]) + [key_chunk_size, num_heads, v_features], # [...,v,h,d]
|
56 |
-
)
|
57 |
-
|
58 |
-
return summarize_chunk(query, key_chunk, value_chunk)
|
59 |
-
|
60 |
-
chunk_values, chunk_weights, chunk_max = jax.lax.map(f=chunk_scanner, xs=jnp.arange(0, num_kv, key_chunk_size))
|
61 |
-
|
62 |
-
global_max = jnp.max(chunk_max, axis=0, keepdims=True)
|
63 |
-
max_diffs = jnp.exp(chunk_max - global_max)
|
64 |
-
|
65 |
-
chunk_values *= jnp.expand_dims(max_diffs, axis=-1)
|
66 |
-
chunk_weights *= max_diffs
|
67 |
-
|
68 |
-
all_values = chunk_values.sum(axis=0)
|
69 |
-
all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis=0)
|
70 |
-
|
71 |
-
return all_values / all_weights
|
72 |
-
|
73 |
-
|
74 |
-
def jax_memory_efficient_attention(
|
75 |
-
query, key, value, precision=jax.lax.Precision.HIGHEST, query_chunk_size: int = 1024, key_chunk_size: int = 4096
|
76 |
-
):
|
77 |
-
r"""
|
78 |
-
Flax Memory-efficient multi-head dot product attention. https://arxiv.org/abs/2112.05682v2
|
79 |
-
https://github.com/AminRezaei0x443/memory-efficient-attention
|
80 |
-
|
81 |
-
Args:
|
82 |
-
query (`jnp.ndarray`): (batch..., query_length, head, query_key_depth_per_head)
|
83 |
-
key (`jnp.ndarray`): (batch..., key_value_length, head, query_key_depth_per_head)
|
84 |
-
value (`jnp.ndarray`): (batch..., key_value_length, head, value_depth_per_head)
|
85 |
-
precision (`jax.lax.Precision`, *optional*, defaults to `jax.lax.Precision.HIGHEST`):
|
86 |
-
numerical precision for computation
|
87 |
-
query_chunk_size (`int`, *optional*, defaults to 1024):
|
88 |
-
chunk size to divide query array value must divide query_length equally without remainder
|
89 |
-
key_chunk_size (`int`, *optional*, defaults to 4096):
|
90 |
-
chunk size to divide key and value array value must divide key_value_length equally without remainder
|
91 |
-
|
92 |
-
Returns:
|
93 |
-
(`jnp.ndarray`) with shape of (batch..., query_length, head, value_depth_per_head)
|
94 |
-
"""
|
95 |
-
num_q, num_heads, q_features = query.shape[-3:]
|
96 |
-
|
97 |
-
def chunk_scanner(chunk_idx, _):
|
98 |
-
# julienne query array
|
99 |
-
query_chunk = jax.lax.dynamic_slice(
|
100 |
-
operand=query,
|
101 |
-
start_indices=([0] * (query.ndim - 3)) + [chunk_idx, 0, 0], # [...,q,h,d]
|
102 |
-
slice_sizes=list(query.shape[:-3]) + [min(query_chunk_size, num_q), num_heads, q_features], # [...,q,h,d]
|
103 |
-
)
|
104 |
-
|
105 |
-
return (
|
106 |
-
chunk_idx + query_chunk_size, # unused ignore it
|
107 |
-
_query_chunk_attention(
|
108 |
-
query=query_chunk, key=key, value=value, precision=precision, key_chunk_size=key_chunk_size
|
109 |
-
),
|
110 |
-
)
|
111 |
-
|
112 |
-
_, res = jax.lax.scan(
|
113 |
-
f=chunk_scanner, init=0, xs=None, length=math.ceil(num_q / query_chunk_size) # start counter # stop counter
|
114 |
-
)
|
115 |
-
|
116 |
-
return jnp.concatenate(res, axis=-3) # fuse the chunked result back
|
117 |
-
|
118 |
-
|
119 |
-
class FlaxAttention(nn.Module):
|
120 |
-
r"""
|
121 |
-
A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762
|
122 |
-
|
123 |
-
Parameters:
|
124 |
-
query_dim (:obj:`int`):
|
125 |
-
Input hidden states dimension
|
126 |
-
heads (:obj:`int`, *optional*, defaults to 8):
|
127 |
-
Number of heads
|
128 |
-
dim_head (:obj:`int`, *optional*, defaults to 64):
|
129 |
-
Hidden states dimension inside each head
|
130 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
131 |
-
Dropout rate
|
132 |
-
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
133 |
-
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
134 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
135 |
-
Parameters `dtype`
|
136 |
-
|
137 |
-
"""
|
138 |
-
query_dim: int
|
139 |
-
heads: int = 8
|
140 |
-
dim_head: int = 64
|
141 |
-
dropout: float = 0.0
|
142 |
-
use_memory_efficient_attention: bool = False
|
143 |
-
dtype: jnp.dtype = jnp.float32
|
144 |
-
|
145 |
-
def setup(self):
|
146 |
-
inner_dim = self.dim_head * self.heads
|
147 |
-
self.scale = self.dim_head**-0.5
|
148 |
-
|
149 |
-
# Weights were exported with old names {to_q, to_k, to_v, to_out}
|
150 |
-
self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q")
|
151 |
-
self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k")
|
152 |
-
self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v")
|
153 |
-
|
154 |
-
self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0")
|
155 |
-
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
156 |
-
|
157 |
-
def reshape_heads_to_batch_dim(self, tensor):
|
158 |
-
batch_size, seq_len, dim = tensor.shape
|
159 |
-
head_size = self.heads
|
160 |
-
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
|
161 |
-
tensor = jnp.transpose(tensor, (0, 2, 1, 3))
|
162 |
-
tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
|
163 |
-
return tensor
|
164 |
-
|
165 |
-
def reshape_batch_dim_to_heads(self, tensor):
|
166 |
-
batch_size, seq_len, dim = tensor.shape
|
167 |
-
head_size = self.heads
|
168 |
-
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
|
169 |
-
tensor = jnp.transpose(tensor, (0, 2, 1, 3))
|
170 |
-
tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size)
|
171 |
-
return tensor
|
172 |
-
|
173 |
-
def __call__(self, hidden_states, context=None, deterministic=True):
|
174 |
-
context = hidden_states if context is None else context
|
175 |
-
|
176 |
-
query_proj = self.query(hidden_states)
|
177 |
-
key_proj = self.key(context)
|
178 |
-
value_proj = self.value(context)
|
179 |
-
|
180 |
-
query_states = self.reshape_heads_to_batch_dim(query_proj)
|
181 |
-
key_states = self.reshape_heads_to_batch_dim(key_proj)
|
182 |
-
value_states = self.reshape_heads_to_batch_dim(value_proj)
|
183 |
-
|
184 |
-
if self.use_memory_efficient_attention:
|
185 |
-
query_states = query_states.transpose(1, 0, 2)
|
186 |
-
key_states = key_states.transpose(1, 0, 2)
|
187 |
-
value_states = value_states.transpose(1, 0, 2)
|
188 |
-
|
189 |
-
# this if statement create a chunk size for each layer of the unet
|
190 |
-
# the chunk size is equal to the query_length dimension of the deepest layer of the unet
|
191 |
-
|
192 |
-
flatten_latent_dim = query_states.shape[-3]
|
193 |
-
if flatten_latent_dim % 64 == 0:
|
194 |
-
query_chunk_size = int(flatten_latent_dim / 64)
|
195 |
-
elif flatten_latent_dim % 16 == 0:
|
196 |
-
query_chunk_size = int(flatten_latent_dim / 16)
|
197 |
-
elif flatten_latent_dim % 4 == 0:
|
198 |
-
query_chunk_size = int(flatten_latent_dim / 4)
|
199 |
-
else:
|
200 |
-
query_chunk_size = int(flatten_latent_dim)
|
201 |
-
|
202 |
-
hidden_states = jax_memory_efficient_attention(
|
203 |
-
query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4
|
204 |
-
)
|
205 |
-
|
206 |
-
hidden_states = hidden_states.transpose(1, 0, 2)
|
207 |
-
else:
|
208 |
-
# compute attentions
|
209 |
-
attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states)
|
210 |
-
attention_scores = attention_scores * self.scale
|
211 |
-
attention_probs = nn.softmax(attention_scores, axis=2)
|
212 |
-
|
213 |
-
# attend to values
|
214 |
-
hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states)
|
215 |
-
|
216 |
-
hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
|
217 |
-
hidden_states = self.proj_attn(hidden_states)
|
218 |
-
return self.dropout_layer(hidden_states, deterministic=deterministic)
|
219 |
-
|
220 |
-
|
221 |
-
class FlaxBasicTransformerBlock(nn.Module):
|
222 |
-
r"""
|
223 |
-
A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in:
|
224 |
-
https://arxiv.org/abs/1706.03762
|
225 |
-
|
226 |
-
|
227 |
-
Parameters:
|
228 |
-
dim (:obj:`int`):
|
229 |
-
Inner hidden states dimension
|
230 |
-
n_heads (:obj:`int`):
|
231 |
-
Number of heads
|
232 |
-
d_head (:obj:`int`):
|
233 |
-
Hidden states dimension inside each head
|
234 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
235 |
-
Dropout rate
|
236 |
-
only_cross_attention (`bool`, defaults to `False`):
|
237 |
-
Whether to only apply cross attention.
|
238 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
239 |
-
Parameters `dtype`
|
240 |
-
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
241 |
-
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
242 |
-
"""
|
243 |
-
dim: int
|
244 |
-
n_heads: int
|
245 |
-
d_head: int
|
246 |
-
dropout: float = 0.0
|
247 |
-
only_cross_attention: bool = False
|
248 |
-
dtype: jnp.dtype = jnp.float32
|
249 |
-
use_memory_efficient_attention: bool = False
|
250 |
-
|
251 |
-
def setup(self):
|
252 |
-
# self attention (or cross_attention if only_cross_attention is True)
|
253 |
-
self.attn1 = FlaxAttention(
|
254 |
-
self.dim, self.n_heads, self.d_head, self.dropout, self.use_memory_efficient_attention, dtype=self.dtype
|
255 |
-
)
|
256 |
-
# cross attention
|
257 |
-
self.attn2 = FlaxAttention(
|
258 |
-
self.dim, self.n_heads, self.d_head, self.dropout, self.use_memory_efficient_attention, dtype=self.dtype
|
259 |
-
)
|
260 |
-
self.ff = FlaxFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype)
|
261 |
-
self.norm1 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
262 |
-
self.norm2 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
263 |
-
self.norm3 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
264 |
-
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
265 |
-
|
266 |
-
def __call__(self, hidden_states, context, deterministic=True):
|
267 |
-
# self attention
|
268 |
-
residual = hidden_states
|
269 |
-
if self.only_cross_attention:
|
270 |
-
hidden_states = self.attn1(self.norm1(hidden_states), context, deterministic=deterministic)
|
271 |
-
else:
|
272 |
-
hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic)
|
273 |
-
hidden_states = hidden_states + residual
|
274 |
-
|
275 |
-
# cross attention
|
276 |
-
residual = hidden_states
|
277 |
-
hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic)
|
278 |
-
hidden_states = hidden_states + residual
|
279 |
-
|
280 |
-
# feed forward
|
281 |
-
residual = hidden_states
|
282 |
-
hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic)
|
283 |
-
hidden_states = hidden_states + residual
|
284 |
-
|
285 |
-
return self.dropout_layer(hidden_states, deterministic=deterministic)
|
286 |
-
|
287 |
-
|
288 |
-
class FlaxTransformer2DModel(nn.Module):
|
289 |
-
r"""
|
290 |
-
A Spatial Transformer layer with Gated Linear Unit (GLU) activation function as described in:
|
291 |
-
https://arxiv.org/pdf/1506.02025.pdf
|
292 |
-
|
293 |
-
|
294 |
-
Parameters:
|
295 |
-
in_channels (:obj:`int`):
|
296 |
-
Input number of channels
|
297 |
-
n_heads (:obj:`int`):
|
298 |
-
Number of heads
|
299 |
-
d_head (:obj:`int`):
|
300 |
-
Hidden states dimension inside each head
|
301 |
-
depth (:obj:`int`, *optional*, defaults to 1):
|
302 |
-
Number of transformers block
|
303 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
304 |
-
Dropout rate
|
305 |
-
use_linear_projection (`bool`, defaults to `False`): tbd
|
306 |
-
only_cross_attention (`bool`, defaults to `False`): tbd
|
307 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
308 |
-
Parameters `dtype`
|
309 |
-
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
310 |
-
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
311 |
-
"""
|
312 |
-
in_channels: int
|
313 |
-
n_heads: int
|
314 |
-
d_head: int
|
315 |
-
depth: int = 1
|
316 |
-
dropout: float = 0.0
|
317 |
-
use_linear_projection: bool = False
|
318 |
-
only_cross_attention: bool = False
|
319 |
-
dtype: jnp.dtype = jnp.float32
|
320 |
-
use_memory_efficient_attention: bool = False
|
321 |
-
|
322 |
-
def setup(self):
|
323 |
-
self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5)
|
324 |
-
|
325 |
-
inner_dim = self.n_heads * self.d_head
|
326 |
-
if self.use_linear_projection:
|
327 |
-
self.proj_in = nn.Dense(inner_dim, dtype=self.dtype)
|
328 |
-
else:
|
329 |
-
self.proj_in = nn.Conv(
|
330 |
-
inner_dim,
|
331 |
-
kernel_size=(1, 1),
|
332 |
-
strides=(1, 1),
|
333 |
-
padding="VALID",
|
334 |
-
dtype=self.dtype,
|
335 |
-
)
|
336 |
-
|
337 |
-
self.transformer_blocks = [
|
338 |
-
FlaxBasicTransformerBlock(
|
339 |
-
inner_dim,
|
340 |
-
self.n_heads,
|
341 |
-
self.d_head,
|
342 |
-
dropout=self.dropout,
|
343 |
-
only_cross_attention=self.only_cross_attention,
|
344 |
-
dtype=self.dtype,
|
345 |
-
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
346 |
-
)
|
347 |
-
for _ in range(self.depth)
|
348 |
-
]
|
349 |
-
|
350 |
-
if self.use_linear_projection:
|
351 |
-
self.proj_out = nn.Dense(inner_dim, dtype=self.dtype)
|
352 |
-
else:
|
353 |
-
self.proj_out = nn.Conv(
|
354 |
-
inner_dim,
|
355 |
-
kernel_size=(1, 1),
|
356 |
-
strides=(1, 1),
|
357 |
-
padding="VALID",
|
358 |
-
dtype=self.dtype,
|
359 |
-
)
|
360 |
-
|
361 |
-
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
362 |
-
|
363 |
-
def __call__(self, hidden_states, context, deterministic=True):
|
364 |
-
batch, height, width, channels = hidden_states.shape
|
365 |
-
residual = hidden_states
|
366 |
-
hidden_states = self.norm(hidden_states)
|
367 |
-
if self.use_linear_projection:
|
368 |
-
hidden_states = hidden_states.reshape(batch, height * width, channels)
|
369 |
-
hidden_states = self.proj_in(hidden_states)
|
370 |
-
else:
|
371 |
-
hidden_states = self.proj_in(hidden_states)
|
372 |
-
hidden_states = hidden_states.reshape(batch, height * width, channels)
|
373 |
-
|
374 |
-
for transformer_block in self.transformer_blocks:
|
375 |
-
hidden_states = transformer_block(hidden_states, context, deterministic=deterministic)
|
376 |
-
|
377 |
-
if self.use_linear_projection:
|
378 |
-
hidden_states = self.proj_out(hidden_states)
|
379 |
-
hidden_states = hidden_states.reshape(batch, height, width, channels)
|
380 |
-
else:
|
381 |
-
hidden_states = hidden_states.reshape(batch, height, width, channels)
|
382 |
-
hidden_states = self.proj_out(hidden_states)
|
383 |
-
|
384 |
-
hidden_states = hidden_states + residual
|
385 |
-
return self.dropout_layer(hidden_states, deterministic=deterministic)
|
386 |
-
|
387 |
-
|
388 |
-
class FlaxFeedForward(nn.Module):
|
389 |
-
r"""
|
390 |
-
Flax module that encapsulates two Linear layers separated by a non-linearity. It is the counterpart of PyTorch's
|
391 |
-
[`FeedForward`] class, with the following simplifications:
|
392 |
-
- The activation function is currently hardcoded to a gated linear unit from:
|
393 |
-
https://arxiv.org/abs/2002.05202
|
394 |
-
- `dim_out` is equal to `dim`.
|
395 |
-
- The number of hidden dimensions is hardcoded to `dim * 4` in [`FlaxGELU`].
|
396 |
-
|
397 |
-
Parameters:
|
398 |
-
dim (:obj:`int`):
|
399 |
-
Inner hidden states dimension
|
400 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
401 |
-
Dropout rate
|
402 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
403 |
-
Parameters `dtype`
|
404 |
-
"""
|
405 |
-
dim: int
|
406 |
-
dropout: float = 0.0
|
407 |
-
dtype: jnp.dtype = jnp.float32
|
408 |
-
|
409 |
-
def setup(self):
|
410 |
-
# The second linear layer needs to be called
|
411 |
-
# net_2 for now to match the index of the Sequential layer
|
412 |
-
self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype)
|
413 |
-
self.net_2 = nn.Dense(self.dim, dtype=self.dtype)
|
414 |
-
|
415 |
-
def __call__(self, hidden_states, deterministic=True):
|
416 |
-
hidden_states = self.net_0(hidden_states, deterministic=deterministic)
|
417 |
-
hidden_states = self.net_2(hidden_states)
|
418 |
-
return hidden_states
|
419 |
-
|
420 |
-
|
421 |
-
class FlaxGEGLU(nn.Module):
|
422 |
-
r"""
|
423 |
-
Flax implementation of a Linear layer followed by the variant of the gated linear unit activation function from
|
424 |
-
https://arxiv.org/abs/2002.05202.
|
425 |
-
|
426 |
-
Parameters:
|
427 |
-
dim (:obj:`int`):
|
428 |
-
Input hidden states dimension
|
429 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
430 |
-
Dropout rate
|
431 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
432 |
-
Parameters `dtype`
|
433 |
-
"""
|
434 |
-
dim: int
|
435 |
-
dropout: float = 0.0
|
436 |
-
dtype: jnp.dtype = jnp.float32
|
437 |
-
|
438 |
-
def setup(self):
|
439 |
-
inner_dim = self.dim * 4
|
440 |
-
self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype)
|
441 |
-
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
442 |
-
|
443 |
-
def __call__(self, hidden_states, deterministic=True):
|
444 |
-
hidden_states = self.proj(hidden_states)
|
445 |
-
hidden_linear, hidden_gelu = jnp.split(hidden_states, 2, axis=2)
|
446 |
-
return self.dropout_layer(hidden_linear * nn.gelu(hidden_gelu), deterministic=deterministic)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/pipeline_utils.py
DELETED
@@ -1,1698 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
|
17 |
-
import fnmatch
|
18 |
-
import importlib
|
19 |
-
import inspect
|
20 |
-
import os
|
21 |
-
import re
|
22 |
-
import sys
|
23 |
-
import warnings
|
24 |
-
from dataclasses import dataclass
|
25 |
-
from pathlib import Path
|
26 |
-
from typing import Any, Callable, Dict, List, Optional, Union
|
27 |
-
|
28 |
-
import numpy as np
|
29 |
-
import PIL
|
30 |
-
import torch
|
31 |
-
from huggingface_hub import ModelCard, hf_hub_download, model_info, snapshot_download
|
32 |
-
from packaging import version
|
33 |
-
from requests.exceptions import HTTPError
|
34 |
-
from tqdm.auto import tqdm
|
35 |
-
|
36 |
-
import diffusers
|
37 |
-
|
38 |
-
from .. import __version__
|
39 |
-
from ..configuration_utils import ConfigMixin
|
40 |
-
from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT
|
41 |
-
from ..schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
|
42 |
-
from ..utils import (
|
43 |
-
CONFIG_NAME,
|
44 |
-
DEPRECATED_REVISION_ARGS,
|
45 |
-
DIFFUSERS_CACHE,
|
46 |
-
HF_HUB_OFFLINE,
|
47 |
-
SAFETENSORS_WEIGHTS_NAME,
|
48 |
-
WEIGHTS_NAME,
|
49 |
-
BaseOutput,
|
50 |
-
deprecate,
|
51 |
-
get_class_from_dynamic_module,
|
52 |
-
is_accelerate_available,
|
53 |
-
is_accelerate_version,
|
54 |
-
is_compiled_module,
|
55 |
-
is_safetensors_available,
|
56 |
-
is_torch_version,
|
57 |
-
is_transformers_available,
|
58 |
-
logging,
|
59 |
-
numpy_to_pil,
|
60 |
-
)
|
61 |
-
|
62 |
-
|
63 |
-
if is_transformers_available():
|
64 |
-
import transformers
|
65 |
-
from transformers import PreTrainedModel
|
66 |
-
from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME
|
67 |
-
from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME
|
68 |
-
from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME
|
69 |
-
|
70 |
-
from ..utils import FLAX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME
|
71 |
-
|
72 |
-
|
73 |
-
if is_accelerate_available():
|
74 |
-
import accelerate
|
75 |
-
|
76 |
-
|
77 |
-
INDEX_FILE = "diffusion_pytorch_model.bin"
|
78 |
-
CUSTOM_PIPELINE_FILE_NAME = "pipeline.py"
|
79 |
-
DUMMY_MODULES_FOLDER = "diffusers.utils"
|
80 |
-
TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils"
|
81 |
-
CONNECTED_PIPES_KEYS = ["prior"]
|
82 |
-
|
83 |
-
|
84 |
-
logger = logging.get_logger(__name__)
|
85 |
-
|
86 |
-
|
87 |
-
LOADABLE_CLASSES = {
|
88 |
-
"diffusers": {
|
89 |
-
"ModelMixin": ["save_pretrained", "from_pretrained"],
|
90 |
-
"SchedulerMixin": ["save_pretrained", "from_pretrained"],
|
91 |
-
"DiffusionPipeline": ["save_pretrained", "from_pretrained"],
|
92 |
-
"OnnxRuntimeModel": ["save_pretrained", "from_pretrained"],
|
93 |
-
},
|
94 |
-
"transformers": {
|
95 |
-
"PreTrainedTokenizer": ["save_pretrained", "from_pretrained"],
|
96 |
-
"PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"],
|
97 |
-
"PreTrainedModel": ["save_pretrained", "from_pretrained"],
|
98 |
-
"FeatureExtractionMixin": ["save_pretrained", "from_pretrained"],
|
99 |
-
"ProcessorMixin": ["save_pretrained", "from_pretrained"],
|
100 |
-
"ImageProcessingMixin": ["save_pretrained", "from_pretrained"],
|
101 |
-
},
|
102 |
-
"onnxruntime.training": {
|
103 |
-
"ORTModule": ["save_pretrained", "from_pretrained"],
|
104 |
-
},
|
105 |
-
}
|
106 |
-
|
107 |
-
ALL_IMPORTABLE_CLASSES = {}
|
108 |
-
for library in LOADABLE_CLASSES:
|
109 |
-
ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library])
|
110 |
-
|
111 |
-
|
112 |
-
@dataclass
|
113 |
-
class ImagePipelineOutput(BaseOutput):
|
114 |
-
"""
|
115 |
-
Output class for image pipelines.
|
116 |
-
|
117 |
-
Args:
|
118 |
-
images (`List[PIL.Image.Image]` or `np.ndarray`)
|
119 |
-
List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,
|
120 |
-
num_channels)`.
|
121 |
-
"""
|
122 |
-
|
123 |
-
images: Union[List[PIL.Image.Image], np.ndarray]
|
124 |
-
|
125 |
-
|
126 |
-
@dataclass
|
127 |
-
class AudioPipelineOutput(BaseOutput):
|
128 |
-
"""
|
129 |
-
Output class for audio pipelines.
|
130 |
-
|
131 |
-
Args:
|
132 |
-
audios (`np.ndarray`)
|
133 |
-
List of denoised audio samples of a NumPy array of shape `(batch_size, num_channels, sample_rate)`.
|
134 |
-
"""
|
135 |
-
|
136 |
-
audios: np.ndarray
|
137 |
-
|
138 |
-
|
139 |
-
def is_safetensors_compatible(filenames, variant=None, passed_components=None) -> bool:
|
140 |
-
"""
|
141 |
-
Checking for safetensors compatibility:
|
142 |
-
- By default, all models are saved with the default pytorch serialization, so we use the list of default pytorch
|
143 |
-
files to know which safetensors files are needed.
|
144 |
-
- The model is safetensors compatible only if there is a matching safetensors file for every default pytorch file.
|
145 |
-
|
146 |
-
Converting default pytorch serialized filenames to safetensors serialized filenames:
|
147 |
-
- For models from the diffusers library, just replace the ".bin" extension with ".safetensors"
|
148 |
-
- For models from the transformers library, the filename changes from "pytorch_model" to "model", and the ".bin"
|
149 |
-
extension is replaced with ".safetensors"
|
150 |
-
"""
|
151 |
-
pt_filenames = []
|
152 |
-
|
153 |
-
sf_filenames = set()
|
154 |
-
|
155 |
-
passed_components = passed_components or []
|
156 |
-
|
157 |
-
for filename in filenames:
|
158 |
-
_, extension = os.path.splitext(filename)
|
159 |
-
|
160 |
-
if len(filename.split("/")) == 2 and filename.split("/")[0] in passed_components:
|
161 |
-
continue
|
162 |
-
|
163 |
-
if extension == ".bin":
|
164 |
-
pt_filenames.append(filename)
|
165 |
-
elif extension == ".safetensors":
|
166 |
-
sf_filenames.add(filename)
|
167 |
-
|
168 |
-
for filename in pt_filenames:
|
169 |
-
# filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extention = '.bam'
|
170 |
-
path, filename = os.path.split(filename)
|
171 |
-
filename, extension = os.path.splitext(filename)
|
172 |
-
|
173 |
-
if filename.startswith("pytorch_model"):
|
174 |
-
filename = filename.replace("pytorch_model", "model")
|
175 |
-
else:
|
176 |
-
filename = filename
|
177 |
-
|
178 |
-
expected_sf_filename = os.path.join(path, filename)
|
179 |
-
expected_sf_filename = f"{expected_sf_filename}.safetensors"
|
180 |
-
|
181 |
-
if expected_sf_filename not in sf_filenames:
|
182 |
-
logger.warning(f"{expected_sf_filename} not found")
|
183 |
-
return False
|
184 |
-
|
185 |
-
return True
|
186 |
-
|
187 |
-
|
188 |
-
def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]:
|
189 |
-
weight_names = [
|
190 |
-
WEIGHTS_NAME,
|
191 |
-
SAFETENSORS_WEIGHTS_NAME,
|
192 |
-
FLAX_WEIGHTS_NAME,
|
193 |
-
ONNX_WEIGHTS_NAME,
|
194 |
-
ONNX_EXTERNAL_WEIGHTS_NAME,
|
195 |
-
]
|
196 |
-
|
197 |
-
if is_transformers_available():
|
198 |
-
weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME]
|
199 |
-
|
200 |
-
# model_pytorch, diffusion_model_pytorch, ...
|
201 |
-
weight_prefixes = [w.split(".")[0] for w in weight_names]
|
202 |
-
# .bin, .safetensors, ...
|
203 |
-
weight_suffixs = [w.split(".")[-1] for w in weight_names]
|
204 |
-
# -00001-of-00002
|
205 |
-
transformers_index_format = r"\d{5}-of-\d{5}"
|
206 |
-
|
207 |
-
if variant is not None:
|
208 |
-
# `diffusion_pytorch_model.fp16.bin` as well as `model.fp16-00001-of-00002.safetensors`
|
209 |
-
variant_file_re = re.compile(
|
210 |
-
rf"({'|'.join(weight_prefixes)})\.({variant}|{variant}-{transformers_index_format})\.({'|'.join(weight_suffixs)})$"
|
211 |
-
)
|
212 |
-
# `text_encoder/pytorch_model.bin.index.fp16.json`
|
213 |
-
variant_index_re = re.compile(
|
214 |
-
rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.{variant}\.json$"
|
215 |
-
)
|
216 |
-
|
217 |
-
# `diffusion_pytorch_model.bin` as well as `model-00001-of-00002.safetensors`
|
218 |
-
non_variant_file_re = re.compile(
|
219 |
-
rf"({'|'.join(weight_prefixes)})(-{transformers_index_format})?\.({'|'.join(weight_suffixs)})$"
|
220 |
-
)
|
221 |
-
# `text_encoder/pytorch_model.bin.index.json`
|
222 |
-
non_variant_index_re = re.compile(rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.json")
|
223 |
-
|
224 |
-
if variant is not None:
|
225 |
-
variant_weights = {f for f in filenames if variant_file_re.match(f.split("/")[-1]) is not None}
|
226 |
-
variant_indexes = {f for f in filenames if variant_index_re.match(f.split("/")[-1]) is not None}
|
227 |
-
variant_filenames = variant_weights | variant_indexes
|
228 |
-
else:
|
229 |
-
variant_filenames = set()
|
230 |
-
|
231 |
-
non_variant_weights = {f for f in filenames if non_variant_file_re.match(f.split("/")[-1]) is not None}
|
232 |
-
non_variant_indexes = {f for f in filenames if non_variant_index_re.match(f.split("/")[-1]) is not None}
|
233 |
-
non_variant_filenames = non_variant_weights | non_variant_indexes
|
234 |
-
|
235 |
-
# all variant filenames will be used by default
|
236 |
-
usable_filenames = set(variant_filenames)
|
237 |
-
|
238 |
-
def convert_to_variant(filename):
|
239 |
-
if "index" in filename:
|
240 |
-
variant_filename = filename.replace("index", f"index.{variant}")
|
241 |
-
elif re.compile(f"^(.*?){transformers_index_format}").match(filename) is not None:
|
242 |
-
variant_filename = f"{filename.split('-')[0]}.{variant}-{'-'.join(filename.split('-')[1:])}"
|
243 |
-
else:
|
244 |
-
variant_filename = f"{filename.split('.')[0]}.{variant}.{filename.split('.')[1]}"
|
245 |
-
return variant_filename
|
246 |
-
|
247 |
-
for f in non_variant_filenames:
|
248 |
-
variant_filename = convert_to_variant(f)
|
249 |
-
if variant_filename not in usable_filenames:
|
250 |
-
usable_filenames.add(f)
|
251 |
-
|
252 |
-
return usable_filenames, variant_filenames
|
253 |
-
|
254 |
-
|
255 |
-
def warn_deprecated_model_variant(pretrained_model_name_or_path, use_auth_token, variant, revision, model_filenames):
|
256 |
-
info = model_info(
|
257 |
-
pretrained_model_name_or_path,
|
258 |
-
use_auth_token=use_auth_token,
|
259 |
-
revision=None,
|
260 |
-
)
|
261 |
-
filenames = {sibling.rfilename for sibling in info.siblings}
|
262 |
-
comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision)
|
263 |
-
comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames]
|
264 |
-
|
265 |
-
if set(comp_model_filenames) == set(model_filenames):
|
266 |
-
warnings.warn(
|
267 |
-
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.",
|
268 |
-
FutureWarning,
|
269 |
-
)
|
270 |
-
else:
|
271 |
-
warnings.warn(
|
272 |
-
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.",
|
273 |
-
FutureWarning,
|
274 |
-
)
|
275 |
-
|
276 |
-
|
277 |
-
def maybe_raise_or_warn(
|
278 |
-
library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module
|
279 |
-
):
|
280 |
-
"""Simple helper method to raise or warn in case incorrect module has been passed"""
|
281 |
-
if not is_pipeline_module:
|
282 |
-
library = importlib.import_module(library_name)
|
283 |
-
class_obj = getattr(library, class_name)
|
284 |
-
class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
|
285 |
-
|
286 |
-
expected_class_obj = None
|
287 |
-
for class_name, class_candidate in class_candidates.items():
|
288 |
-
if class_candidate is not None and issubclass(class_obj, class_candidate):
|
289 |
-
expected_class_obj = class_candidate
|
290 |
-
|
291 |
-
# Dynamo wraps the original model in a private class.
|
292 |
-
# I didn't find a public API to get the original class.
|
293 |
-
sub_model = passed_class_obj[name]
|
294 |
-
model_cls = sub_model.__class__
|
295 |
-
if is_compiled_module(sub_model):
|
296 |
-
model_cls = sub_model._orig_mod.__class__
|
297 |
-
|
298 |
-
if not issubclass(model_cls, expected_class_obj):
|
299 |
-
raise ValueError(
|
300 |
-
f"{passed_class_obj[name]} is of type: {model_cls}, but should be" f" {expected_class_obj}"
|
301 |
-
)
|
302 |
-
else:
|
303 |
-
logger.warning(
|
304 |
-
f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it"
|
305 |
-
" has the correct type"
|
306 |
-
)
|
307 |
-
|
308 |
-
|
309 |
-
def get_class_obj_and_candidates(library_name, class_name, importable_classes, pipelines, is_pipeline_module):
|
310 |
-
"""Simple helper method to retrieve class object of module as well as potential parent class objects"""
|
311 |
-
if is_pipeline_module:
|
312 |
-
pipeline_module = getattr(pipelines, library_name)
|
313 |
-
|
314 |
-
class_obj = getattr(pipeline_module, class_name)
|
315 |
-
class_candidates = {c: class_obj for c in importable_classes.keys()}
|
316 |
-
else:
|
317 |
-
# else we just import it from the library.
|
318 |
-
library = importlib.import_module(library_name)
|
319 |
-
|
320 |
-
class_obj = getattr(library, class_name)
|
321 |
-
class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
|
322 |
-
|
323 |
-
return class_obj, class_candidates
|
324 |
-
|
325 |
-
|
326 |
-
def _get_pipeline_class(
|
327 |
-
class_obj, config, load_connected_pipeline=False, custom_pipeline=None, cache_dir=None, revision=None
|
328 |
-
):
|
329 |
-
if custom_pipeline is not None:
|
330 |
-
if custom_pipeline.endswith(".py"):
|
331 |
-
path = Path(custom_pipeline)
|
332 |
-
# decompose into folder & file
|
333 |
-
file_name = path.name
|
334 |
-
custom_pipeline = path.parent.absolute()
|
335 |
-
else:
|
336 |
-
file_name = CUSTOM_PIPELINE_FILE_NAME
|
337 |
-
|
338 |
-
return get_class_from_dynamic_module(
|
339 |
-
custom_pipeline, module_file=file_name, cache_dir=cache_dir, revision=revision
|
340 |
-
)
|
341 |
-
|
342 |
-
if class_obj != DiffusionPipeline:
|
343 |
-
return class_obj
|
344 |
-
|
345 |
-
diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0])
|
346 |
-
pipeline_cls = getattr(diffusers_module, config["_class_name"])
|
347 |
-
|
348 |
-
if load_connected_pipeline:
|
349 |
-
from .auto_pipeline import _get_connected_pipeline
|
350 |
-
|
351 |
-
connected_pipeline_cls = _get_connected_pipeline(pipeline_cls)
|
352 |
-
if connected_pipeline_cls is not None:
|
353 |
-
logger.info(
|
354 |
-
f"Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`"
|
355 |
-
)
|
356 |
-
else:
|
357 |
-
logger.info(f"{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.")
|
358 |
-
|
359 |
-
pipeline_cls = connected_pipeline_cls or pipeline_cls
|
360 |
-
|
361 |
-
return pipeline_cls
|
362 |
-
|
363 |
-
|
364 |
-
def load_sub_model(
|
365 |
-
library_name: str,
|
366 |
-
class_name: str,
|
367 |
-
importable_classes: List[Any],
|
368 |
-
pipelines: Any,
|
369 |
-
is_pipeline_module: bool,
|
370 |
-
pipeline_class: Any,
|
371 |
-
torch_dtype: torch.dtype,
|
372 |
-
provider: Any,
|
373 |
-
sess_options: Any,
|
374 |
-
device_map: Optional[Union[Dict[str, torch.device], str]],
|
375 |
-
max_memory: Optional[Dict[Union[int, str], Union[int, str]]],
|
376 |
-
offload_folder: Optional[Union[str, os.PathLike]],
|
377 |
-
offload_state_dict: bool,
|
378 |
-
model_variants: Dict[str, str],
|
379 |
-
name: str,
|
380 |
-
from_flax: bool,
|
381 |
-
variant: str,
|
382 |
-
low_cpu_mem_usage: bool,
|
383 |
-
cached_folder: Union[str, os.PathLike],
|
384 |
-
):
|
385 |
-
"""Helper method to load the module `name` from `library_name` and `class_name`"""
|
386 |
-
# retrieve class candidates
|
387 |
-
class_obj, class_candidates = get_class_obj_and_candidates(
|
388 |
-
library_name, class_name, importable_classes, pipelines, is_pipeline_module
|
389 |
-
)
|
390 |
-
|
391 |
-
load_method_name = None
|
392 |
-
# retrive load method name
|
393 |
-
for class_name, class_candidate in class_candidates.items():
|
394 |
-
if class_candidate is not None and issubclass(class_obj, class_candidate):
|
395 |
-
load_method_name = importable_classes[class_name][1]
|
396 |
-
|
397 |
-
# if load method name is None, then we have a dummy module -> raise Error
|
398 |
-
if load_method_name is None:
|
399 |
-
none_module = class_obj.__module__
|
400 |
-
is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith(
|
401 |
-
TRANSFORMERS_DUMMY_MODULES_FOLDER
|
402 |
-
)
|
403 |
-
if is_dummy_path and "dummy" in none_module:
|
404 |
-
# call class_obj for nice error message of missing requirements
|
405 |
-
class_obj()
|
406 |
-
|
407 |
-
raise ValueError(
|
408 |
-
f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have"
|
409 |
-
f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}."
|
410 |
-
)
|
411 |
-
|
412 |
-
load_method = getattr(class_obj, load_method_name)
|
413 |
-
|
414 |
-
# add kwargs to loading method
|
415 |
-
loading_kwargs = {}
|
416 |
-
if issubclass(class_obj, torch.nn.Module):
|
417 |
-
loading_kwargs["torch_dtype"] = torch_dtype
|
418 |
-
if issubclass(class_obj, diffusers.OnnxRuntimeModel):
|
419 |
-
loading_kwargs["provider"] = provider
|
420 |
-
loading_kwargs["sess_options"] = sess_options
|
421 |
-
|
422 |
-
is_diffusers_model = issubclass(class_obj, diffusers.ModelMixin)
|
423 |
-
|
424 |
-
if is_transformers_available():
|
425 |
-
transformers_version = version.parse(version.parse(transformers.__version__).base_version)
|
426 |
-
else:
|
427 |
-
transformers_version = "N/A"
|
428 |
-
|
429 |
-
is_transformers_model = (
|
430 |
-
is_transformers_available()
|
431 |
-
and issubclass(class_obj, PreTrainedModel)
|
432 |
-
and transformers_version >= version.parse("4.20.0")
|
433 |
-
)
|
434 |
-
|
435 |
-
# When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers.
|
436 |
-
# To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default.
|
437 |
-
# This makes sure that the weights won't be initialized which significantly speeds up loading.
|
438 |
-
if is_diffusers_model or is_transformers_model:
|
439 |
-
loading_kwargs["device_map"] = device_map
|
440 |
-
loading_kwargs["max_memory"] = max_memory
|
441 |
-
loading_kwargs["offload_folder"] = offload_folder
|
442 |
-
loading_kwargs["offload_state_dict"] = offload_state_dict
|
443 |
-
loading_kwargs["variant"] = model_variants.pop(name, None)
|
444 |
-
if from_flax:
|
445 |
-
loading_kwargs["from_flax"] = True
|
446 |
-
|
447 |
-
# the following can be deleted once the minimum required `transformers` version
|
448 |
-
# is higher than 4.27
|
449 |
-
if (
|
450 |
-
is_transformers_model
|
451 |
-
and loading_kwargs["variant"] is not None
|
452 |
-
and transformers_version < version.parse("4.27.0")
|
453 |
-
):
|
454 |
-
raise ImportError(
|
455 |
-
f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0"
|
456 |
-
)
|
457 |
-
elif is_transformers_model and loading_kwargs["variant"] is None:
|
458 |
-
loading_kwargs.pop("variant")
|
459 |
-
|
460 |
-
# if `from_flax` and model is transformer model, can currently not load with `low_cpu_mem_usage`
|
461 |
-
if not (from_flax and is_transformers_model):
|
462 |
-
loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
|
463 |
-
else:
|
464 |
-
loading_kwargs["low_cpu_mem_usage"] = False
|
465 |
-
|
466 |
-
# check if the module is in a subdirectory
|
467 |
-
if os.path.isdir(os.path.join(cached_folder, name)):
|
468 |
-
loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
|
469 |
-
else:
|
470 |
-
# else load from the root directory
|
471 |
-
loaded_sub_model = load_method(cached_folder, **loading_kwargs)
|
472 |
-
|
473 |
-
return loaded_sub_model
|
474 |
-
|
475 |
-
|
476 |
-
class DiffusionPipeline(ConfigMixin):
|
477 |
-
r"""
|
478 |
-
Base class for all pipelines.
|
479 |
-
|
480 |
-
[`DiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and
|
481 |
-
provides methods for loading, downloading and saving models. It also includes methods to:
|
482 |
-
|
483 |
-
- move all PyTorch modules to the device of your choice
|
484 |
-
- enable/disable the progress bar for the denoising iteration
|
485 |
-
|
486 |
-
Class attributes:
|
487 |
-
|
488 |
-
- **config_name** (`str`) -- The configuration filename that stores the class and module names of all the
|
489 |
-
diffusion pipeline's components.
|
490 |
-
- **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the
|
491 |
-
pipeline to function (should be overridden by subclasses).
|
492 |
-
"""
|
493 |
-
config_name = "model_index.json"
|
494 |
-
_optional_components = []
|
495 |
-
_exclude_from_cpu_offload = []
|
496 |
-
_load_connected_pipes = False
|
497 |
-
_is_onnx = False
|
498 |
-
|
499 |
-
def register_modules(self, **kwargs):
|
500 |
-
# import it here to avoid circular import
|
501 |
-
from diffusers import pipelines
|
502 |
-
|
503 |
-
for name, module in kwargs.items():
|
504 |
-
# retrieve library
|
505 |
-
if module is None:
|
506 |
-
register_dict = {name: (None, None)}
|
507 |
-
else:
|
508 |
-
# register the config from the original module, not the dynamo compiled one
|
509 |
-
if is_compiled_module(module):
|
510 |
-
not_compiled_module = module._orig_mod
|
511 |
-
else:
|
512 |
-
not_compiled_module = module
|
513 |
-
|
514 |
-
library = not_compiled_module.__module__.split(".")[0]
|
515 |
-
|
516 |
-
# check if the module is a pipeline module
|
517 |
-
module_path_items = not_compiled_module.__module__.split(".")
|
518 |
-
pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None
|
519 |
-
|
520 |
-
path = not_compiled_module.__module__.split(".")
|
521 |
-
is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir)
|
522 |
-
|
523 |
-
# if library is not in LOADABLE_CLASSES, then it is a custom module.
|
524 |
-
# Or if it's a pipeline module, then the module is inside the pipeline
|
525 |
-
# folder so we set the library to module name.
|
526 |
-
if is_pipeline_module:
|
527 |
-
library = pipeline_dir
|
528 |
-
elif library not in LOADABLE_CLASSES:
|
529 |
-
library = not_compiled_module.__module__
|
530 |
-
|
531 |
-
# retrieve class_name
|
532 |
-
class_name = not_compiled_module.__class__.__name__
|
533 |
-
|
534 |
-
register_dict = {name: (library, class_name)}
|
535 |
-
|
536 |
-
# save model index config
|
537 |
-
self.register_to_config(**register_dict)
|
538 |
-
|
539 |
-
# set models
|
540 |
-
setattr(self, name, module)
|
541 |
-
|
542 |
-
def __setattr__(self, name: str, value: Any):
|
543 |
-
if name in self.__dict__ and hasattr(self.config, name):
|
544 |
-
# We need to overwrite the config if name exists in config
|
545 |
-
if isinstance(getattr(self.config, name), (tuple, list)):
|
546 |
-
if value is not None and self.config[name][0] is not None:
|
547 |
-
class_library_tuple = (value.__module__.split(".")[0], value.__class__.__name__)
|
548 |
-
else:
|
549 |
-
class_library_tuple = (None, None)
|
550 |
-
|
551 |
-
self.register_to_config(**{name: class_library_tuple})
|
552 |
-
else:
|
553 |
-
self.register_to_config(**{name: value})
|
554 |
-
|
555 |
-
super().__setattr__(name, value)
|
556 |
-
|
557 |
-
def save_pretrained(
|
558 |
-
self,
|
559 |
-
save_directory: Union[str, os.PathLike],
|
560 |
-
safe_serialization: bool = False,
|
561 |
-
variant: Optional[str] = None,
|
562 |
-
):
|
563 |
-
"""
|
564 |
-
Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its
|
565 |
-
class implements both a save and loading method. The pipeline is easily reloaded using the
|
566 |
-
[`~DiffusionPipeline.from_pretrained`] class method.
|
567 |
-
|
568 |
-
Arguments:
|
569 |
-
save_directory (`str` or `os.PathLike`):
|
570 |
-
Directory to save a pipeline to. Will be created if it doesn't exist.
|
571 |
-
safe_serialization (`bool`, *optional*, defaults to `False`):
|
572 |
-
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
|
573 |
-
variant (`str`, *optional*):
|
574 |
-
If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
|
575 |
-
"""
|
576 |
-
model_index_dict = dict(self.config)
|
577 |
-
model_index_dict.pop("_class_name", None)
|
578 |
-
model_index_dict.pop("_diffusers_version", None)
|
579 |
-
model_index_dict.pop("_module", None)
|
580 |
-
model_index_dict.pop("_name_or_path", None)
|
581 |
-
|
582 |
-
expected_modules, optional_kwargs = self._get_signature_keys(self)
|
583 |
-
|
584 |
-
def is_saveable_module(name, value):
|
585 |
-
if name not in expected_modules:
|
586 |
-
return False
|
587 |
-
if name in self._optional_components and value[0] is None:
|
588 |
-
return False
|
589 |
-
return True
|
590 |
-
|
591 |
-
model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)}
|
592 |
-
for pipeline_component_name in model_index_dict.keys():
|
593 |
-
sub_model = getattr(self, pipeline_component_name)
|
594 |
-
model_cls = sub_model.__class__
|
595 |
-
|
596 |
-
# Dynamo wraps the original model in a private class.
|
597 |
-
# I didn't find a public API to get the original class.
|
598 |
-
if is_compiled_module(sub_model):
|
599 |
-
sub_model = sub_model._orig_mod
|
600 |
-
model_cls = sub_model.__class__
|
601 |
-
|
602 |
-
save_method_name = None
|
603 |
-
# search for the model's base class in LOADABLE_CLASSES
|
604 |
-
for library_name, library_classes in LOADABLE_CLASSES.items():
|
605 |
-
if library_name in sys.modules:
|
606 |
-
library = importlib.import_module(library_name)
|
607 |
-
else:
|
608 |
-
logger.info(
|
609 |
-
f"{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}"
|
610 |
-
)
|
611 |
-
|
612 |
-
for base_class, save_load_methods in library_classes.items():
|
613 |
-
class_candidate = getattr(library, base_class, None)
|
614 |
-
if class_candidate is not None and issubclass(model_cls, class_candidate):
|
615 |
-
# if we found a suitable base class in LOADABLE_CLASSES then grab its save method
|
616 |
-
save_method_name = save_load_methods[0]
|
617 |
-
break
|
618 |
-
if save_method_name is not None:
|
619 |
-
break
|
620 |
-
|
621 |
-
if save_method_name is None:
|
622 |
-
logger.warn(f"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved.")
|
623 |
-
# make sure that unsaveable components are not tried to be loaded afterward
|
624 |
-
self.register_to_config(**{pipeline_component_name: (None, None)})
|
625 |
-
continue
|
626 |
-
|
627 |
-
save_method = getattr(sub_model, save_method_name)
|
628 |
-
|
629 |
-
# Call the save method with the argument safe_serialization only if it's supported
|
630 |
-
save_method_signature = inspect.signature(save_method)
|
631 |
-
save_method_accept_safe = "safe_serialization" in save_method_signature.parameters
|
632 |
-
save_method_accept_variant = "variant" in save_method_signature.parameters
|
633 |
-
|
634 |
-
save_kwargs = {}
|
635 |
-
if save_method_accept_safe:
|
636 |
-
save_kwargs["safe_serialization"] = safe_serialization
|
637 |
-
if save_method_accept_variant:
|
638 |
-
save_kwargs["variant"] = variant
|
639 |
-
|
640 |
-
save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs)
|
641 |
-
|
642 |
-
# finally save the config
|
643 |
-
self.save_config(save_directory)
|
644 |
-
|
645 |
-
def to(
|
646 |
-
self,
|
647 |
-
torch_device: Optional[Union[str, torch.device]] = None,
|
648 |
-
torch_dtype: Optional[torch.dtype] = None,
|
649 |
-
silence_dtype_warnings: bool = False,
|
650 |
-
):
|
651 |
-
if torch_device is None and torch_dtype is None:
|
652 |
-
return self
|
653 |
-
|
654 |
-
# throw warning if pipeline is in "offloaded"-mode but user tries to manually set to GPU.
|
655 |
-
def module_is_sequentially_offloaded(module):
|
656 |
-
if not is_accelerate_available() or is_accelerate_version("<", "0.14.0"):
|
657 |
-
return False
|
658 |
-
|
659 |
-
return hasattr(module, "_hf_hook") and not isinstance(
|
660 |
-
module._hf_hook, (accelerate.hooks.CpuOffload, accelerate.hooks.AlignDevicesHook)
|
661 |
-
)
|
662 |
-
|
663 |
-
def module_is_offloaded(module):
|
664 |
-
if not is_accelerate_available() or is_accelerate_version("<", "0.17.0.dev0"):
|
665 |
-
return False
|
666 |
-
|
667 |
-
return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload)
|
668 |
-
|
669 |
-
# .to("cuda") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer
|
670 |
-
pipeline_is_sequentially_offloaded = any(
|
671 |
-
module_is_sequentially_offloaded(module) for _, module in self.components.items()
|
672 |
-
)
|
673 |
-
if pipeline_is_sequentially_offloaded and torch.device(torch_device).type == "cuda":
|
674 |
-
raise ValueError(
|
675 |
-
"It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading."
|
676 |
-
)
|
677 |
-
|
678 |
-
# Display a warning in this case (the operation succeeds but the benefits are lost)
|
679 |
-
pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items())
|
680 |
-
if pipeline_is_offloaded and torch.device(torch_device).type == "cuda":
|
681 |
-
logger.warning(
|
682 |
-
f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading."
|
683 |
-
)
|
684 |
-
|
685 |
-
module_names, _ = self._get_signature_keys(self)
|
686 |
-
modules = [getattr(self, n, None) for n in module_names]
|
687 |
-
modules = [m for m in modules if isinstance(m, torch.nn.Module)]
|
688 |
-
|
689 |
-
is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded
|
690 |
-
for module in modules:
|
691 |
-
is_loaded_in_8bit = hasattr(module, "is_loaded_in_8bit") and module.is_loaded_in_8bit
|
692 |
-
|
693 |
-
if is_loaded_in_8bit and torch_dtype is not None:
|
694 |
-
logger.warning(
|
695 |
-
f"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {torch_dtype} is not yet supported. Module is still in 8bit precision."
|
696 |
-
)
|
697 |
-
|
698 |
-
if is_loaded_in_8bit and torch_device is not None:
|
699 |
-
logger.warning(
|
700 |
-
f"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {torch_dtype} via `.to()` is not yet supported. Module is still on {module.device}."
|
701 |
-
)
|
702 |
-
else:
|
703 |
-
module.to(torch_device, torch_dtype)
|
704 |
-
|
705 |
-
if (
|
706 |
-
module.dtype == torch.float16
|
707 |
-
and str(torch_device) in ["cpu"]
|
708 |
-
and not silence_dtype_warnings
|
709 |
-
and not is_offloaded
|
710 |
-
):
|
711 |
-
logger.warning(
|
712 |
-
"Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` device. It"
|
713 |
-
" is not recommended to move them to `cpu` as running them will fail. Please make"
|
714 |
-
" sure to use an accelerator to run the pipeline in inference, due to the lack of"
|
715 |
-
" support for`float16` operations on this device in PyTorch. Please, remove the"
|
716 |
-
" `torch_dtype=torch.float16` argument, or use another device for inference."
|
717 |
-
)
|
718 |
-
return self
|
719 |
-
|
720 |
-
@property
|
721 |
-
def device(self) -> torch.device:
|
722 |
-
r"""
|
723 |
-
Returns:
|
724 |
-
`torch.device`: The torch device on which the pipeline is located.
|
725 |
-
"""
|
726 |
-
module_names, _ = self._get_signature_keys(self)
|
727 |
-
modules = [getattr(self, n, None) for n in module_names]
|
728 |
-
modules = [m for m in modules if isinstance(m, torch.nn.Module)]
|
729 |
-
|
730 |
-
for module in modules:
|
731 |
-
return module.device
|
732 |
-
|
733 |
-
return torch.device("cpu")
|
734 |
-
|
735 |
-
@classmethod
|
736 |
-
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
|
737 |
-
r"""
|
738 |
-
Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights.
|
739 |
-
|
740 |
-
The pipeline is set in evaluation mode (`model.eval()`) by default.
|
741 |
-
|
742 |
-
If you get the error message below, you need to finetune the weights for your downstream task:
|
743 |
-
|
744 |
-
```
|
745 |
-
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
|
746 |
-
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
|
747 |
-
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
748 |
-
```
|
749 |
-
|
750 |
-
Parameters:
|
751 |
-
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
|
752 |
-
Can be either:
|
753 |
-
|
754 |
-
- A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
|
755 |
-
hosted on the Hub.
|
756 |
-
- A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights
|
757 |
-
saved using
|
758 |
-
[`~DiffusionPipeline.save_pretrained`].
|
759 |
-
torch_dtype (`str` or `torch.dtype`, *optional*):
|
760 |
-
Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the
|
761 |
-
dtype is automatically derived from the model's weights.
|
762 |
-
custom_pipeline (`str`, *optional*):
|
763 |
-
|
764 |
-
<Tip warning={true}>
|
765 |
-
|
766 |
-
🧪 This is an experimental feature and may change in the future.
|
767 |
-
|
768 |
-
</Tip>
|
769 |
-
|
770 |
-
Can be either:
|
771 |
-
|
772 |
-
- A string, the *repo id* (for example `hf-internal-testing/diffusers-dummy-pipeline`) of a custom
|
773 |
-
pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines
|
774 |
-
the custom pipeline.
|
775 |
-
- A string, the *file name* of a community pipeline hosted on GitHub under
|
776 |
-
[Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file
|
777 |
-
names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`
|
778 |
-
instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the
|
779 |
-
current main branch of GitHub.
|
780 |
-
- A path to a directory (`./my_pipeline_directory/`) containing a custom pipeline. The directory
|
781 |
-
must contain a file called `pipeline.py` that defines the custom pipeline.
|
782 |
-
|
783 |
-
For more information on how to load and create custom pipelines, please have a look at [Loading and
|
784 |
-
Adding Custom
|
785 |
-
Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview)
|
786 |
-
force_download (`bool`, *optional*, defaults to `False`):
|
787 |
-
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
788 |
-
cached versions if they exist.
|
789 |
-
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
790 |
-
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
791 |
-
is not used.
|
792 |
-
resume_download (`bool`, *optional*, defaults to `False`):
|
793 |
-
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
794 |
-
incompletely downloaded files are deleted.
|
795 |
-
proxies (`Dict[str, str]`, *optional*):
|
796 |
-
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
797 |
-
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
798 |
-
output_loading_info(`bool`, *optional*, defaults to `False`):
|
799 |
-
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
|
800 |
-
local_files_only (`bool`, *optional*, defaults to `False`):
|
801 |
-
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
802 |
-
won't be downloaded from the Hub.
|
803 |
-
use_auth_token (`str` or *bool*, *optional*):
|
804 |
-
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
805 |
-
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
806 |
-
revision (`str`, *optional*, defaults to `"main"`):
|
807 |
-
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
808 |
-
allowed by Git.
|
809 |
-
custom_revision (`str`, *optional*, defaults to `"main"`):
|
810 |
-
The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
|
811 |
-
`revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
|
812 |
-
custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
|
813 |
-
mirror (`str`, *optional*):
|
814 |
-
Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
|
815 |
-
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
|
816 |
-
information.
|
817 |
-
device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
|
818 |
-
A map that specifies where each submodule should go. It doesn’t need to be defined for each
|
819 |
-
parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
|
820 |
-
same device.
|
821 |
-
|
822 |
-
Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
|
823 |
-
more information about each option see [designing a device
|
824 |
-
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
|
825 |
-
max_memory (`Dict`, *optional*):
|
826 |
-
A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
|
827 |
-
each GPU and the available CPU RAM if unset.
|
828 |
-
offload_folder (`str` or `os.PathLike`, *optional*):
|
829 |
-
The path to offload weights if device_map contains the value `"disk"`.
|
830 |
-
offload_state_dict (`bool`, *optional*):
|
831 |
-
If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
|
832 |
-
the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`
|
833 |
-
when there is some disk offload.
|
834 |
-
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
|
835 |
-
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
|
836 |
-
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
|
837 |
-
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
|
838 |
-
argument to `True` will raise an error.
|
839 |
-
use_safetensors (`bool`, *optional*, defaults to `None`):
|
840 |
-
If set to `None`, the safetensors weights are downloaded if they're available **and** if the
|
841 |
-
safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
|
842 |
-
weights. If set to `False`, safetensors weights are not loaded.
|
843 |
-
use_onnx (`bool`, *optional*, defaults to `None`):
|
844 |
-
If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights
|
845 |
-
will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is
|
846 |
-
`False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending
|
847 |
-
with `.onnx` and `.pb`.
|
848 |
-
kwargs (remaining dictionary of keyword arguments, *optional*):
|
849 |
-
Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
|
850 |
-
class). The overwritten components are passed directly to the pipelines `__init__` method. See example
|
851 |
-
below for more information.
|
852 |
-
variant (`str`, *optional*):
|
853 |
-
Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
|
854 |
-
loading `from_flax`.
|
855 |
-
|
856 |
-
<Tip>
|
857 |
-
|
858 |
-
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
|
859 |
-
`huggingface-cli login`.
|
860 |
-
|
861 |
-
</Tip>
|
862 |
-
|
863 |
-
Examples:
|
864 |
-
|
865 |
-
```py
|
866 |
-
>>> from diffusers import DiffusionPipeline
|
867 |
-
|
868 |
-
>>> # Download pipeline from huggingface.co and cache.
|
869 |
-
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
870 |
-
|
871 |
-
>>> # Download pipeline that requires an authorization token
|
872 |
-
>>> # For more information on access tokens, please refer to this section
|
873 |
-
>>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
|
874 |
-
>>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
875 |
-
|
876 |
-
>>> # Use a different scheduler
|
877 |
-
>>> from diffusers import LMSDiscreteScheduler
|
878 |
-
|
879 |
-
>>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)
|
880 |
-
>>> pipeline.scheduler = scheduler
|
881 |
-
```
|
882 |
-
"""
|
883 |
-
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
|
884 |
-
resume_download = kwargs.pop("resume_download", False)
|
885 |
-
force_download = kwargs.pop("force_download", False)
|
886 |
-
proxies = kwargs.pop("proxies", None)
|
887 |
-
local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
|
888 |
-
use_auth_token = kwargs.pop("use_auth_token", None)
|
889 |
-
revision = kwargs.pop("revision", None)
|
890 |
-
from_flax = kwargs.pop("from_flax", False)
|
891 |
-
torch_dtype = kwargs.pop("torch_dtype", None)
|
892 |
-
custom_pipeline = kwargs.pop("custom_pipeline", None)
|
893 |
-
custom_revision = kwargs.pop("custom_revision", None)
|
894 |
-
provider = kwargs.pop("provider", None)
|
895 |
-
sess_options = kwargs.pop("sess_options", None)
|
896 |
-
device_map = kwargs.pop("device_map", None)
|
897 |
-
max_memory = kwargs.pop("max_memory", None)
|
898 |
-
offload_folder = kwargs.pop("offload_folder", None)
|
899 |
-
offload_state_dict = kwargs.pop("offload_state_dict", False)
|
900 |
-
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
|
901 |
-
variant = kwargs.pop("variant", None)
|
902 |
-
use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
|
903 |
-
load_connected_pipeline = kwargs.pop("load_connected_pipeline", False)
|
904 |
-
|
905 |
-
# 1. Download the checkpoints and configs
|
906 |
-
# use snapshot download here to get it working from from_pretrained
|
907 |
-
if not os.path.isdir(pretrained_model_name_or_path):
|
908 |
-
cached_folder = cls.download(
|
909 |
-
pretrained_model_name_or_path,
|
910 |
-
cache_dir=cache_dir,
|
911 |
-
resume_download=resume_download,
|
912 |
-
force_download=force_download,
|
913 |
-
proxies=proxies,
|
914 |
-
local_files_only=local_files_only,
|
915 |
-
use_auth_token=use_auth_token,
|
916 |
-
revision=revision,
|
917 |
-
from_flax=from_flax,
|
918 |
-
use_safetensors=use_safetensors,
|
919 |
-
custom_pipeline=custom_pipeline,
|
920 |
-
custom_revision=custom_revision,
|
921 |
-
variant=variant,
|
922 |
-
load_connected_pipeline=load_connected_pipeline,
|
923 |
-
**kwargs,
|
924 |
-
)
|
925 |
-
else:
|
926 |
-
cached_folder = pretrained_model_name_or_path
|
927 |
-
|
928 |
-
config_dict = cls.load_config(cached_folder)
|
929 |
-
|
930 |
-
# pop out "_ignore_files" as it is only needed for download
|
931 |
-
config_dict.pop("_ignore_files", None)
|
932 |
-
|
933 |
-
# 2. Define which model components should load variants
|
934 |
-
# We retrieve the information by matching whether variant
|
935 |
-
# model checkpoints exist in the subfolders
|
936 |
-
model_variants = {}
|
937 |
-
if variant is not None:
|
938 |
-
for folder in os.listdir(cached_folder):
|
939 |
-
folder_path = os.path.join(cached_folder, folder)
|
940 |
-
is_folder = os.path.isdir(folder_path) and folder in config_dict
|
941 |
-
variant_exists = is_folder and any(
|
942 |
-
p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)
|
943 |
-
)
|
944 |
-
if variant_exists:
|
945 |
-
model_variants[folder] = variant
|
946 |
-
|
947 |
-
# 3. Load the pipeline class, if using custom module then load it from the hub
|
948 |
-
# if we load from explicit class, let's use it
|
949 |
-
pipeline_class = _get_pipeline_class(
|
950 |
-
cls,
|
951 |
-
config_dict,
|
952 |
-
load_connected_pipeline=load_connected_pipeline,
|
953 |
-
custom_pipeline=custom_pipeline,
|
954 |
-
cache_dir=cache_dir,
|
955 |
-
revision=custom_revision,
|
956 |
-
)
|
957 |
-
|
958 |
-
# DEPRECATED: To be removed in 1.0.0
|
959 |
-
if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse(
|
960 |
-
version.parse(config_dict["_diffusers_version"]).base_version
|
961 |
-
) <= version.parse("0.5.1"):
|
962 |
-
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy
|
963 |
-
|
964 |
-
pipeline_class = StableDiffusionInpaintPipelineLegacy
|
965 |
-
|
966 |
-
deprecation_message = (
|
967 |
-
"You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the"
|
968 |
-
f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For"
|
969 |
-
" better inpainting results, we strongly suggest using Stable Diffusion's official inpainting"
|
970 |
-
" checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your"
|
971 |
-
f" checkpoint {pretrained_model_name_or_path} to the format of"
|
972 |
-
" https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain"
|
973 |
-
" the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0."
|
974 |
-
)
|
975 |
-
deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False)
|
976 |
-
|
977 |
-
# 4. Define expected modules given pipeline signature
|
978 |
-
# and define non-None initialized modules (=`init_kwargs`)
|
979 |
-
|
980 |
-
# some modules can be passed directly to the init
|
981 |
-
# in this case they are already instantiated in `kwargs`
|
982 |
-
# extract them here
|
983 |
-
expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class)
|
984 |
-
passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
|
985 |
-
passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
|
986 |
-
|
987 |
-
init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)
|
988 |
-
|
989 |
-
# define init kwargs
|
990 |
-
init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict}
|
991 |
-
init_kwargs = {**init_kwargs, **passed_pipe_kwargs}
|
992 |
-
|
993 |
-
# remove `null` components
|
994 |
-
def load_module(name, value):
|
995 |
-
if value[0] is None:
|
996 |
-
return False
|
997 |
-
if name in passed_class_obj and passed_class_obj[name] is None:
|
998 |
-
return False
|
999 |
-
return True
|
1000 |
-
|
1001 |
-
init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)}
|
1002 |
-
|
1003 |
-
# Special case: safety_checker must be loaded separately when using `from_flax`
|
1004 |
-
if from_flax and "safety_checker" in init_dict and "safety_checker" not in passed_class_obj:
|
1005 |
-
raise NotImplementedError(
|
1006 |
-
"The safety checker cannot be automatically loaded when loading weights `from_flax`."
|
1007 |
-
" Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker"
|
1008 |
-
" separately if you need it."
|
1009 |
-
)
|
1010 |
-
|
1011 |
-
# 5. Throw nice warnings / errors for fast accelerate loading
|
1012 |
-
if len(unused_kwargs) > 0:
|
1013 |
-
logger.warning(
|
1014 |
-
f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored."
|
1015 |
-
)
|
1016 |
-
|
1017 |
-
if low_cpu_mem_usage and not is_accelerate_available():
|
1018 |
-
low_cpu_mem_usage = False
|
1019 |
-
logger.warning(
|
1020 |
-
"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
|
1021 |
-
" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
|
1022 |
-
" `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
|
1023 |
-
" install accelerate\n```\n."
|
1024 |
-
)
|
1025 |
-
|
1026 |
-
if device_map is not None and not is_torch_version(">=", "1.9.0"):
|
1027 |
-
raise NotImplementedError(
|
1028 |
-
"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set"
|
1029 |
-
" `device_map=None`."
|
1030 |
-
)
|
1031 |
-
|
1032 |
-
if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
|
1033 |
-
raise NotImplementedError(
|
1034 |
-
"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
|
1035 |
-
" `low_cpu_mem_usage=False`."
|
1036 |
-
)
|
1037 |
-
|
1038 |
-
if low_cpu_mem_usage is False and device_map is not None:
|
1039 |
-
raise ValueError(
|
1040 |
-
f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and"
|
1041 |
-
" dispatching. Please make sure to set `low_cpu_mem_usage=True`."
|
1042 |
-
)
|
1043 |
-
|
1044 |
-
# import it here to avoid circular import
|
1045 |
-
from diffusers import pipelines
|
1046 |
-
|
1047 |
-
# 6. Load each module in the pipeline
|
1048 |
-
for name, (library_name, class_name) in tqdm(init_dict.items(), desc="Loading pipeline components..."):
|
1049 |
-
# 6.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names
|
1050 |
-
if class_name.startswith("Flax"):
|
1051 |
-
class_name = class_name[4:]
|
1052 |
-
|
1053 |
-
# 6.2 Define all importable classes
|
1054 |
-
is_pipeline_module = hasattr(pipelines, library_name)
|
1055 |
-
importable_classes = ALL_IMPORTABLE_CLASSES
|
1056 |
-
loaded_sub_model = None
|
1057 |
-
|
1058 |
-
# 6.3 Use passed sub model or load class_name from library_name
|
1059 |
-
if name in passed_class_obj:
|
1060 |
-
# if the model is in a pipeline module, then we load it from the pipeline
|
1061 |
-
# check that passed_class_obj has correct parent class
|
1062 |
-
maybe_raise_or_warn(
|
1063 |
-
library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module
|
1064 |
-
)
|
1065 |
-
|
1066 |
-
loaded_sub_model = passed_class_obj[name]
|
1067 |
-
else:
|
1068 |
-
# load sub model
|
1069 |
-
loaded_sub_model = load_sub_model(
|
1070 |
-
library_name=library_name,
|
1071 |
-
class_name=class_name,
|
1072 |
-
importable_classes=importable_classes,
|
1073 |
-
pipelines=pipelines,
|
1074 |
-
is_pipeline_module=is_pipeline_module,
|
1075 |
-
pipeline_class=pipeline_class,
|
1076 |
-
torch_dtype=torch_dtype,
|
1077 |
-
provider=provider,
|
1078 |
-
sess_options=sess_options,
|
1079 |
-
device_map=device_map,
|
1080 |
-
max_memory=max_memory,
|
1081 |
-
offload_folder=offload_folder,
|
1082 |
-
offload_state_dict=offload_state_dict,
|
1083 |
-
model_variants=model_variants,
|
1084 |
-
name=name,
|
1085 |
-
from_flax=from_flax,
|
1086 |
-
variant=variant,
|
1087 |
-
low_cpu_mem_usage=low_cpu_mem_usage,
|
1088 |
-
cached_folder=cached_folder,
|
1089 |
-
)
|
1090 |
-
logger.info(
|
1091 |
-
f"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}."
|
1092 |
-
)
|
1093 |
-
|
1094 |
-
init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...)
|
1095 |
-
|
1096 |
-
if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, "README.md")):
|
1097 |
-
modelcard = ModelCard.load(os.path.join(cached_folder, "README.md"))
|
1098 |
-
connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS}
|
1099 |
-
load_kwargs = {
|
1100 |
-
"cache_dir": cache_dir,
|
1101 |
-
"resume_download": resume_download,
|
1102 |
-
"force_download": force_download,
|
1103 |
-
"proxies": proxies,
|
1104 |
-
"local_files_only": local_files_only,
|
1105 |
-
"use_auth_token": use_auth_token,
|
1106 |
-
"revision": revision,
|
1107 |
-
"torch_dtype": torch_dtype,
|
1108 |
-
"custom_pipeline": custom_pipeline,
|
1109 |
-
"custom_revision": custom_revision,
|
1110 |
-
"provider": provider,
|
1111 |
-
"sess_options": sess_options,
|
1112 |
-
"device_map": device_map,
|
1113 |
-
"max_memory": max_memory,
|
1114 |
-
"offload_folder": offload_folder,
|
1115 |
-
"offload_state_dict": offload_state_dict,
|
1116 |
-
"low_cpu_mem_usage": low_cpu_mem_usage,
|
1117 |
-
"variant": variant,
|
1118 |
-
"use_safetensors": use_safetensors,
|
1119 |
-
}
|
1120 |
-
connected_pipes = {
|
1121 |
-
prefix: DiffusionPipeline.from_pretrained(repo_id, **load_kwargs.copy())
|
1122 |
-
for prefix, repo_id in connected_pipes.items()
|
1123 |
-
if repo_id is not None
|
1124 |
-
}
|
1125 |
-
|
1126 |
-
for prefix, connected_pipe in connected_pipes.items():
|
1127 |
-
# add connected pipes to `init_kwargs` with <prefix>_<component_name>, e.g. "prior_text_encoder"
|
1128 |
-
init_kwargs.update(
|
1129 |
-
{"_".join([prefix, name]): component for name, component in connected_pipe.components.items()}
|
1130 |
-
)
|
1131 |
-
|
1132 |
-
# 7. Potentially add passed objects if expected
|
1133 |
-
missing_modules = set(expected_modules) - set(init_kwargs.keys())
|
1134 |
-
passed_modules = list(passed_class_obj.keys())
|
1135 |
-
optional_modules = pipeline_class._optional_components
|
1136 |
-
if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules):
|
1137 |
-
for module in missing_modules:
|
1138 |
-
init_kwargs[module] = passed_class_obj.get(module, None)
|
1139 |
-
elif len(missing_modules) > 0:
|
1140 |
-
passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs
|
1141 |
-
raise ValueError(
|
1142 |
-
f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed."
|
1143 |
-
)
|
1144 |
-
|
1145 |
-
# 8. Instantiate the pipeline
|
1146 |
-
model = pipeline_class(**init_kwargs)
|
1147 |
-
|
1148 |
-
# 9. Save where the model was instantiated from
|
1149 |
-
model.register_to_config(_name_or_path=pretrained_model_name_or_path)
|
1150 |
-
return model
|
1151 |
-
|
1152 |
-
@property
|
1153 |
-
def name_or_path(self) -> str:
|
1154 |
-
return getattr(self.config, "_name_or_path", None)
|
1155 |
-
|
1156 |
-
@property
|
1157 |
-
def _execution_device(self):
|
1158 |
-
r"""
|
1159 |
-
Returns the device on which the pipeline's models will be executed. After calling
|
1160 |
-
[`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from
|
1161 |
-
Accelerate's module hooks.
|
1162 |
-
"""
|
1163 |
-
for name, model in self.components.items():
|
1164 |
-
if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload:
|
1165 |
-
continue
|
1166 |
-
|
1167 |
-
if not hasattr(model, "_hf_hook"):
|
1168 |
-
return self.device
|
1169 |
-
for module in model.modules():
|
1170 |
-
if (
|
1171 |
-
hasattr(module, "_hf_hook")
|
1172 |
-
and hasattr(module._hf_hook, "execution_device")
|
1173 |
-
and module._hf_hook.execution_device is not None
|
1174 |
-
):
|
1175 |
-
return torch.device(module._hf_hook.execution_device)
|
1176 |
-
return self.device
|
1177 |
-
|
1178 |
-
def enable_sequential_cpu_offload(self, gpu_id: int = 0, device: Union[torch.device, str] = "cuda"):
|
1179 |
-
r"""
|
1180 |
-
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
|
1181 |
-
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
|
1182 |
-
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
|
1183 |
-
Note that offloading happens on a submodule basis. Memory savings are higher than with
|
1184 |
-
`enable_model_cpu_offload`, but performance is lower.
|
1185 |
-
"""
|
1186 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
|
1187 |
-
from accelerate import cpu_offload
|
1188 |
-
else:
|
1189 |
-
raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
|
1190 |
-
|
1191 |
-
if device == "cuda":
|
1192 |
-
device = torch.device(f"{device}:{gpu_id}")
|
1193 |
-
|
1194 |
-
if self.device.type != "cpu":
|
1195 |
-
self.to("cpu", silence_dtype_warnings=True)
|
1196 |
-
device_mod = getattr(torch, self.device.type, None)
|
1197 |
-
if hasattr(device_mod, "empty_cache") and device_mod.is_available():
|
1198 |
-
device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
1199 |
-
|
1200 |
-
for name, model in self.components.items():
|
1201 |
-
if not isinstance(model, torch.nn.Module):
|
1202 |
-
continue
|
1203 |
-
|
1204 |
-
if name in self._exclude_from_cpu_offload:
|
1205 |
-
model.to(device)
|
1206 |
-
else:
|
1207 |
-
# make sure to offload buffers if not all high level weights
|
1208 |
-
# are of type nn.Module
|
1209 |
-
offload_buffers = len(model._parameters) > 0
|
1210 |
-
cpu_offload(model, device, offload_buffers=offload_buffers)
|
1211 |
-
|
1212 |
-
@classmethod
|
1213 |
-
def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
|
1214 |
-
r"""
|
1215 |
-
Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights.
|
1216 |
-
|
1217 |
-
Parameters:
|
1218 |
-
pretrained_model_name (`str` or `os.PathLike`, *optional*):
|
1219 |
-
A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
|
1220 |
-
hosted on the Hub.
|
1221 |
-
custom_pipeline (`str`, *optional*):
|
1222 |
-
Can be either:
|
1223 |
-
|
1224 |
-
- A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained
|
1225 |
-
pipeline hosted on the Hub. The repository must contain a file called `pipeline.py` that defines
|
1226 |
-
the custom pipeline.
|
1227 |
-
|
1228 |
-
- A string, the *file name* of a community pipeline hosted on GitHub under
|
1229 |
-
[Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file
|
1230 |
-
names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`
|
1231 |
-
instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the
|
1232 |
-
current `main` branch of GitHub.
|
1233 |
-
|
1234 |
-
- A path to a *directory* (`./my_pipeline_directory/`) containing a custom pipeline. The directory
|
1235 |
-
must contain a file called `pipeline.py` that defines the custom pipeline.
|
1236 |
-
|
1237 |
-
<Tip warning={true}>
|
1238 |
-
|
1239 |
-
🧪 This is an experimental feature and may change in the future.
|
1240 |
-
|
1241 |
-
</Tip>
|
1242 |
-
|
1243 |
-
For more information on how to load and create custom pipelines, take a look at [How to contribute a
|
1244 |
-
community pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline).
|
1245 |
-
|
1246 |
-
force_download (`bool`, *optional*, defaults to `False`):
|
1247 |
-
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
1248 |
-
cached versions if they exist.
|
1249 |
-
resume_download (`bool`, *optional*, defaults to `False`):
|
1250 |
-
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
1251 |
-
incompletely downloaded files are deleted.
|
1252 |
-
proxies (`Dict[str, str]`, *optional*):
|
1253 |
-
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
1254 |
-
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
1255 |
-
output_loading_info(`bool`, *optional*, defaults to `False`):
|
1256 |
-
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
|
1257 |
-
local_files_only (`bool`, *optional*, defaults to `False`):
|
1258 |
-
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
1259 |
-
won't be downloaded from the Hub.
|
1260 |
-
use_auth_token (`str` or *bool*, *optional*):
|
1261 |
-
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
1262 |
-
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
1263 |
-
revision (`str`, *optional*, defaults to `"main"`):
|
1264 |
-
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
1265 |
-
allowed by Git.
|
1266 |
-
custom_revision (`str`, *optional*, defaults to `"main"`):
|
1267 |
-
The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
|
1268 |
-
`revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
|
1269 |
-
custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
|
1270 |
-
mirror (`str`, *optional*):
|
1271 |
-
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
|
1272 |
-
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
|
1273 |
-
information.
|
1274 |
-
variant (`str`, *optional*):
|
1275 |
-
Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
|
1276 |
-
loading `from_flax`.
|
1277 |
-
use_safetensors (`bool`, *optional*, defaults to `None`):
|
1278 |
-
If set to `None`, the safetensors weights are downloaded if they're available **and** if the
|
1279 |
-
safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
|
1280 |
-
weights. If set to `False`, safetensors weights are not loaded.
|
1281 |
-
use_onnx (`bool`, *optional*, defaults to `False`):
|
1282 |
-
If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights
|
1283 |
-
will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is
|
1284 |
-
`False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending
|
1285 |
-
with `.onnx` and `.pb`.
|
1286 |
-
|
1287 |
-
Returns:
|
1288 |
-
`os.PathLike`:
|
1289 |
-
A path to the downloaded pipeline.
|
1290 |
-
|
1291 |
-
<Tip>
|
1292 |
-
|
1293 |
-
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
|
1294 |
-
`huggingface-cli login`.
|
1295 |
-
|
1296 |
-
</Tip>
|
1297 |
-
|
1298 |
-
"""
|
1299 |
-
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
|
1300 |
-
resume_download = kwargs.pop("resume_download", False)
|
1301 |
-
force_download = kwargs.pop("force_download", False)
|
1302 |
-
proxies = kwargs.pop("proxies", None)
|
1303 |
-
local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
|
1304 |
-
use_auth_token = kwargs.pop("use_auth_token", None)
|
1305 |
-
revision = kwargs.pop("revision", None)
|
1306 |
-
from_flax = kwargs.pop("from_flax", False)
|
1307 |
-
custom_pipeline = kwargs.pop("custom_pipeline", None)
|
1308 |
-
custom_revision = kwargs.pop("custom_revision", None)
|
1309 |
-
variant = kwargs.pop("variant", None)
|
1310 |
-
use_safetensors = kwargs.pop("use_safetensors", None)
|
1311 |
-
use_onnx = kwargs.pop("use_onnx", None)
|
1312 |
-
load_connected_pipeline = kwargs.pop("load_connected_pipeline", False)
|
1313 |
-
|
1314 |
-
if use_safetensors and not is_safetensors_available():
|
1315 |
-
raise ValueError(
|
1316 |
-
"`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
|
1317 |
-
)
|
1318 |
-
|
1319 |
-
allow_pickle = False
|
1320 |
-
if use_safetensors is None:
|
1321 |
-
use_safetensors = is_safetensors_available()
|
1322 |
-
allow_pickle = True
|
1323 |
-
|
1324 |
-
allow_patterns = None
|
1325 |
-
ignore_patterns = None
|
1326 |
-
|
1327 |
-
model_info_call_error: Optional[Exception] = None
|
1328 |
-
if not local_files_only:
|
1329 |
-
try:
|
1330 |
-
info = model_info(
|
1331 |
-
pretrained_model_name,
|
1332 |
-
use_auth_token=use_auth_token,
|
1333 |
-
revision=revision,
|
1334 |
-
)
|
1335 |
-
except HTTPError as e:
|
1336 |
-
logger.warn(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.")
|
1337 |
-
local_files_only = True
|
1338 |
-
model_info_call_error = e # save error to reraise it if model is not cached locally
|
1339 |
-
|
1340 |
-
if not local_files_only:
|
1341 |
-
config_file = hf_hub_download(
|
1342 |
-
pretrained_model_name,
|
1343 |
-
cls.config_name,
|
1344 |
-
cache_dir=cache_dir,
|
1345 |
-
revision=revision,
|
1346 |
-
proxies=proxies,
|
1347 |
-
force_download=force_download,
|
1348 |
-
resume_download=resume_download,
|
1349 |
-
use_auth_token=use_auth_token,
|
1350 |
-
)
|
1351 |
-
|
1352 |
-
config_dict = cls._dict_from_json_file(config_file)
|
1353 |
-
|
1354 |
-
ignore_filenames = config_dict.pop("_ignore_files", [])
|
1355 |
-
|
1356 |
-
# retrieve all folder_names that contain relevant files
|
1357 |
-
folder_names = [k for k, v in config_dict.items() if isinstance(v, list)]
|
1358 |
-
|
1359 |
-
filenames = {sibling.rfilename for sibling in info.siblings}
|
1360 |
-
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)
|
1361 |
-
|
1362 |
-
if len(variant_filenames) == 0 and variant is not None:
|
1363 |
-
deprecation_message = (
|
1364 |
-
f"You are trying to load the model files of the `variant={variant}`, but no such modeling files are available."
|
1365 |
-
f"The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`"
|
1366 |
-
"if such variant modeling files are not available. Doing so will lead to an error in v0.22.0 as defaulting to non-variant"
|
1367 |
-
"modeling files is deprecated."
|
1368 |
-
)
|
1369 |
-
deprecate("no variant default", "0.22.0", deprecation_message, standard_warn=False)
|
1370 |
-
|
1371 |
-
# remove ignored filenames
|
1372 |
-
model_filenames = set(model_filenames) - set(ignore_filenames)
|
1373 |
-
variant_filenames = set(variant_filenames) - set(ignore_filenames)
|
1374 |
-
|
1375 |
-
# if the whole pipeline is cached we don't have to ping the Hub
|
1376 |
-
if revision in DEPRECATED_REVISION_ARGS and version.parse(
|
1377 |
-
version.parse(__version__).base_version
|
1378 |
-
) >= version.parse("0.20.0"):
|
1379 |
-
warn_deprecated_model_variant(
|
1380 |
-
pretrained_model_name, use_auth_token, variant, revision, model_filenames
|
1381 |
-
)
|
1382 |
-
|
1383 |
-
model_folder_names = {os.path.split(f)[0] for f in model_filenames if os.path.split(f)[0] in folder_names}
|
1384 |
-
|
1385 |
-
# all filenames compatible with variant will be added
|
1386 |
-
allow_patterns = list(model_filenames)
|
1387 |
-
|
1388 |
-
# allow all patterns from non-model folders
|
1389 |
-
# this enables downloading schedulers, tokenizers, ...
|
1390 |
-
allow_patterns += [f"{k}/*" for k in folder_names if k not in model_folder_names]
|
1391 |
-
# also allow downloading config.json files with the model
|
1392 |
-
allow_patterns += [os.path.join(k, "config.json") for k in model_folder_names]
|
1393 |
-
|
1394 |
-
allow_patterns += [
|
1395 |
-
SCHEDULER_CONFIG_NAME,
|
1396 |
-
CONFIG_NAME,
|
1397 |
-
cls.config_name,
|
1398 |
-
CUSTOM_PIPELINE_FILE_NAME,
|
1399 |
-
]
|
1400 |
-
|
1401 |
-
# retrieve passed components that should not be downloaded
|
1402 |
-
pipeline_class = _get_pipeline_class(
|
1403 |
-
cls,
|
1404 |
-
config_dict,
|
1405 |
-
load_connected_pipeline=load_connected_pipeline,
|
1406 |
-
custom_pipeline=custom_pipeline,
|
1407 |
-
cache_dir=cache_dir,
|
1408 |
-
revision=custom_revision,
|
1409 |
-
)
|
1410 |
-
expected_components, _ = cls._get_signature_keys(pipeline_class)
|
1411 |
-
passed_components = [k for k in expected_components if k in kwargs]
|
1412 |
-
|
1413 |
-
if (
|
1414 |
-
use_safetensors
|
1415 |
-
and not allow_pickle
|
1416 |
-
and not is_safetensors_compatible(
|
1417 |
-
model_filenames, variant=variant, passed_components=passed_components
|
1418 |
-
)
|
1419 |
-
):
|
1420 |
-
raise EnvironmentError(
|
1421 |
-
f"Could not found the necessary `safetensors` weights in {model_filenames} (variant={variant})"
|
1422 |
-
)
|
1423 |
-
if from_flax:
|
1424 |
-
ignore_patterns = ["*.bin", "*.safetensors", "*.onnx", "*.pb"]
|
1425 |
-
elif use_safetensors and is_safetensors_compatible(
|
1426 |
-
model_filenames, variant=variant, passed_components=passed_components
|
1427 |
-
):
|
1428 |
-
ignore_patterns = ["*.bin", "*.msgpack"]
|
1429 |
-
|
1430 |
-
use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx
|
1431 |
-
if not use_onnx:
|
1432 |
-
ignore_patterns += ["*.onnx", "*.pb"]
|
1433 |
-
|
1434 |
-
safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(".safetensors")}
|
1435 |
-
safetensors_model_filenames = {f for f in model_filenames if f.endswith(".safetensors")}
|
1436 |
-
if (
|
1437 |
-
len(safetensors_variant_filenames) > 0
|
1438 |
-
and safetensors_model_filenames != safetensors_variant_filenames
|
1439 |
-
):
|
1440 |
-
logger.warn(
|
1441 |
-
f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure."
|
1442 |
-
)
|
1443 |
-
else:
|
1444 |
-
ignore_patterns = ["*.safetensors", "*.msgpack"]
|
1445 |
-
|
1446 |
-
use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx
|
1447 |
-
if not use_onnx:
|
1448 |
-
ignore_patterns += ["*.onnx", "*.pb"]
|
1449 |
-
|
1450 |
-
bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")}
|
1451 |
-
bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")}
|
1452 |
-
if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames:
|
1453 |
-
logger.warn(
|
1454 |
-
f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure."
|
1455 |
-
)
|
1456 |
-
|
1457 |
-
# Don't download any objects that are passed
|
1458 |
-
allow_patterns = [
|
1459 |
-
p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components)
|
1460 |
-
]
|
1461 |
-
|
1462 |
-
if pipeline_class._load_connected_pipes:
|
1463 |
-
allow_patterns.append("README.md")
|
1464 |
-
|
1465 |
-
# Don't download index files of forbidden patterns either
|
1466 |
-
ignore_patterns = ignore_patterns + [f"{i}.index.*json" for i in ignore_patterns]
|
1467 |
-
|
1468 |
-
re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns]
|
1469 |
-
re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns]
|
1470 |
-
|
1471 |
-
expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)]
|
1472 |
-
expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)]
|
1473 |
-
|
1474 |
-
snapshot_folder = Path(config_file).parent
|
1475 |
-
pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files)
|
1476 |
-
|
1477 |
-
if pipeline_is_cached and not force_download:
|
1478 |
-
# if the pipeline is cached, we can directly return it
|
1479 |
-
# else call snapshot_download
|
1480 |
-
return snapshot_folder
|
1481 |
-
|
1482 |
-
user_agent = {"pipeline_class": cls.__name__}
|
1483 |
-
if custom_pipeline is not None and not custom_pipeline.endswith(".py"):
|
1484 |
-
user_agent["custom_pipeline"] = custom_pipeline
|
1485 |
-
|
1486 |
-
# download all allow_patterns - ignore_patterns
|
1487 |
-
try:
|
1488 |
-
cached_folder = snapshot_download(
|
1489 |
-
pretrained_model_name,
|
1490 |
-
cache_dir=cache_dir,
|
1491 |
-
resume_download=resume_download,
|
1492 |
-
proxies=proxies,
|
1493 |
-
local_files_only=local_files_only,
|
1494 |
-
use_auth_token=use_auth_token,
|
1495 |
-
revision=revision,
|
1496 |
-
allow_patterns=allow_patterns,
|
1497 |
-
ignore_patterns=ignore_patterns,
|
1498 |
-
user_agent=user_agent,
|
1499 |
-
)
|
1500 |
-
|
1501 |
-
# retrieve pipeline class from local file
|
1502 |
-
cls_name = cls.load_config(os.path.join(cached_folder, "model_index.json")).get("_class_name", None)
|
1503 |
-
pipeline_class = getattr(diffusers, cls_name, None)
|
1504 |
-
|
1505 |
-
if pipeline_class is not None and pipeline_class._load_connected_pipes:
|
1506 |
-
modelcard = ModelCard.load(os.path.join(cached_folder, "README.md"))
|
1507 |
-
connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], [])
|
1508 |
-
for connected_pipe_repo_id in connected_pipes:
|
1509 |
-
download_kwargs = {
|
1510 |
-
"cache_dir": cache_dir,
|
1511 |
-
"resume_download": resume_download,
|
1512 |
-
"force_download": force_download,
|
1513 |
-
"proxies": proxies,
|
1514 |
-
"local_files_only": local_files_only,
|
1515 |
-
"use_auth_token": use_auth_token,
|
1516 |
-
"variant": variant,
|
1517 |
-
"use_safetensors": use_safetensors,
|
1518 |
-
}
|
1519 |
-
DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs)
|
1520 |
-
|
1521 |
-
return cached_folder
|
1522 |
-
|
1523 |
-
except FileNotFoundError:
|
1524 |
-
# Means we tried to load pipeline with `local_files_only=True` but the files have not been found in local cache.
|
1525 |
-
# This can happen in two cases:
|
1526 |
-
# 1. If the user passed `local_files_only=True` => we raise the error directly
|
1527 |
-
# 2. If we forced `local_files_only=True` when `model_info` failed => we raise the initial error
|
1528 |
-
if model_info_call_error is None:
|
1529 |
-
# 1. user passed `local_files_only=True`
|
1530 |
-
raise
|
1531 |
-
else:
|
1532 |
-
# 2. we forced `local_files_only=True` when `model_info` failed
|
1533 |
-
raise EnvironmentError(
|
1534 |
-
f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occured"
|
1535 |
-
" while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace"
|
1536 |
-
" above."
|
1537 |
-
) from model_info_call_error
|
1538 |
-
|
1539 |
-
@staticmethod
|
1540 |
-
def _get_signature_keys(obj):
|
1541 |
-
parameters = inspect.signature(obj.__init__).parameters
|
1542 |
-
required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}
|
1543 |
-
optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
|
1544 |
-
expected_modules = set(required_parameters.keys()) - {"self"}
|
1545 |
-
return expected_modules, optional_parameters
|
1546 |
-
|
1547 |
-
@property
|
1548 |
-
def components(self) -> Dict[str, Any]:
|
1549 |
-
r"""
|
1550 |
-
The `self.components` property can be useful to run different pipelines with the same weights and
|
1551 |
-
configurations without reallocating additional memory.
|
1552 |
-
|
1553 |
-
Returns (`dict`):
|
1554 |
-
A dictionary containing all the modules needed to initialize the pipeline.
|
1555 |
-
|
1556 |
-
Examples:
|
1557 |
-
|
1558 |
-
```py
|
1559 |
-
>>> from diffusers import (
|
1560 |
-
... StableDiffusionPipeline,
|
1561 |
-
... StableDiffusionImg2ImgPipeline,
|
1562 |
-
... StableDiffusionInpaintPipeline,
|
1563 |
-
... )
|
1564 |
-
|
1565 |
-
>>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
1566 |
-
>>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components)
|
1567 |
-
>>> inpaint = StableDiffusionInpaintPipeline(**text2img.components)
|
1568 |
-
```
|
1569 |
-
"""
|
1570 |
-
expected_modules, optional_parameters = self._get_signature_keys(self)
|
1571 |
-
components = {
|
1572 |
-
k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters
|
1573 |
-
}
|
1574 |
-
|
1575 |
-
if set(components.keys()) != expected_modules:
|
1576 |
-
raise ValueError(
|
1577 |
-
f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected"
|
1578 |
-
f" {expected_modules} to be defined, but {components.keys()} are defined."
|
1579 |
-
)
|
1580 |
-
|
1581 |
-
return components
|
1582 |
-
|
1583 |
-
@staticmethod
|
1584 |
-
def numpy_to_pil(images):
|
1585 |
-
"""
|
1586 |
-
Convert a NumPy image or a batch of images to a PIL image.
|
1587 |
-
"""
|
1588 |
-
return numpy_to_pil(images)
|
1589 |
-
|
1590 |
-
def progress_bar(self, iterable=None, total=None):
|
1591 |
-
if not hasattr(self, "_progress_bar_config"):
|
1592 |
-
self._progress_bar_config = {}
|
1593 |
-
elif not isinstance(self._progress_bar_config, dict):
|
1594 |
-
raise ValueError(
|
1595 |
-
f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
|
1596 |
-
)
|
1597 |
-
|
1598 |
-
if iterable is not None:
|
1599 |
-
return tqdm(iterable, **self._progress_bar_config)
|
1600 |
-
elif total is not None:
|
1601 |
-
return tqdm(total=total, **self._progress_bar_config)
|
1602 |
-
else:
|
1603 |
-
raise ValueError("Either `total` or `iterable` has to be defined.")
|
1604 |
-
|
1605 |
-
def set_progress_bar_config(self, **kwargs):
|
1606 |
-
self._progress_bar_config = kwargs
|
1607 |
-
|
1608 |
-
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):
|
1609 |
-
r"""
|
1610 |
-
Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this
|
1611 |
-
option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed
|
1612 |
-
up during training is not guaranteed.
|
1613 |
-
|
1614 |
-
<Tip warning={true}>
|
1615 |
-
|
1616 |
-
⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes
|
1617 |
-
precedent.
|
1618 |
-
|
1619 |
-
</Tip>
|
1620 |
-
|
1621 |
-
Parameters:
|
1622 |
-
attention_op (`Callable`, *optional*):
|
1623 |
-
Override the default `None` operator for use as `op` argument to the
|
1624 |
-
[`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)
|
1625 |
-
function of xFormers.
|
1626 |
-
|
1627 |
-
Examples:
|
1628 |
-
|
1629 |
-
```py
|
1630 |
-
>>> import torch
|
1631 |
-
>>> from diffusers import DiffusionPipeline
|
1632 |
-
>>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
|
1633 |
-
|
1634 |
-
>>> pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16)
|
1635 |
-
>>> pipe = pipe.to("cuda")
|
1636 |
-
>>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)
|
1637 |
-
>>> # Workaround for not accepting attention shape using VAE for Flash Attention
|
1638 |
-
>>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None)
|
1639 |
-
```
|
1640 |
-
"""
|
1641 |
-
self.set_use_memory_efficient_attention_xformers(True, attention_op)
|
1642 |
-
|
1643 |
-
def disable_xformers_memory_efficient_attention(self):
|
1644 |
-
r"""
|
1645 |
-
Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).
|
1646 |
-
"""
|
1647 |
-
self.set_use_memory_efficient_attention_xformers(False)
|
1648 |
-
|
1649 |
-
def set_use_memory_efficient_attention_xformers(
|
1650 |
-
self, valid: bool, attention_op: Optional[Callable] = None
|
1651 |
-
) -> None:
|
1652 |
-
# Recursively walk through all the children.
|
1653 |
-
# Any children which exposes the set_use_memory_efficient_attention_xformers method
|
1654 |
-
# gets the message
|
1655 |
-
def fn_recursive_set_mem_eff(module: torch.nn.Module):
|
1656 |
-
if hasattr(module, "set_use_memory_efficient_attention_xformers"):
|
1657 |
-
module.set_use_memory_efficient_attention_xformers(valid, attention_op)
|
1658 |
-
|
1659 |
-
for child in module.children():
|
1660 |
-
fn_recursive_set_mem_eff(child)
|
1661 |
-
|
1662 |
-
module_names, _ = self._get_signature_keys(self)
|
1663 |
-
modules = [getattr(self, n, None) for n in module_names]
|
1664 |
-
modules = [m for m in modules if isinstance(m, torch.nn.Module)]
|
1665 |
-
|
1666 |
-
for module in modules:
|
1667 |
-
fn_recursive_set_mem_eff(module)
|
1668 |
-
|
1669 |
-
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
|
1670 |
-
r"""
|
1671 |
-
Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor
|
1672 |
-
in slices to compute attention in several steps. This is useful to save some memory in exchange for a small
|
1673 |
-
speed decrease.
|
1674 |
-
|
1675 |
-
Args:
|
1676 |
-
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
|
1677 |
-
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
|
1678 |
-
`"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is
|
1679 |
-
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
|
1680 |
-
must be a multiple of `slice_size`.
|
1681 |
-
"""
|
1682 |
-
self.set_attention_slice(slice_size)
|
1683 |
-
|
1684 |
-
def disable_attention_slicing(self):
|
1685 |
-
r"""
|
1686 |
-
Disable sliced attention computation. If `enable_attention_slicing` was previously called, attention is
|
1687 |
-
computed in one step.
|
1688 |
-
"""
|
1689 |
-
# set slice_size = `None` to disable `attention slicing`
|
1690 |
-
self.enable_attention_slicing(None)
|
1691 |
-
|
1692 |
-
def set_attention_slice(self, slice_size: Optional[int]):
|
1693 |
-
module_names, _ = self._get_signature_keys(self)
|
1694 |
-
modules = [getattr(self, n, None) for n in module_names]
|
1695 |
-
modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, "set_attention_slice")]
|
1696 |
-
|
1697 |
-
for module in modules:
|
1698 |
-
module.set_attention_slice(slice_size)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/spectrogram_diffusion/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/coco_detection.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
dataset_type = 'CocoDataset'
|
2 |
-
data_root = 'data/coco/'
|
3 |
-
img_norm_cfg = dict(
|
4 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
5 |
-
train_pipeline = [
|
6 |
-
dict(type='LoadImageFromFile'),
|
7 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
8 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
9 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
10 |
-
dict(type='Normalize', **img_norm_cfg),
|
11 |
-
dict(type='Pad', size_divisor=32),
|
12 |
-
dict(type='DefaultFormatBundle'),
|
13 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
14 |
-
]
|
15 |
-
test_pipeline = [
|
16 |
-
dict(type='LoadImageFromFile'),
|
17 |
-
dict(
|
18 |
-
type='MultiScaleFlipAug',
|
19 |
-
img_scale=(1333, 800),
|
20 |
-
flip=False,
|
21 |
-
transforms=[
|
22 |
-
dict(type='Resize', keep_ratio=True),
|
23 |
-
dict(type='RandomFlip'),
|
24 |
-
dict(type='Normalize', **img_norm_cfg),
|
25 |
-
dict(type='Pad', size_divisor=32),
|
26 |
-
dict(type='ImageToTensor', keys=['img']),
|
27 |
-
dict(type='Collect', keys=['img']),
|
28 |
-
])
|
29 |
-
]
|
30 |
-
data = dict(
|
31 |
-
samples_per_gpu=2,
|
32 |
-
workers_per_gpu=2,
|
33 |
-
train=dict(
|
34 |
-
type=dataset_type,
|
35 |
-
ann_file=data_root + 'annotations/instances_train2017.json',
|
36 |
-
img_prefix=data_root + 'train2017/',
|
37 |
-
pipeline=train_pipeline),
|
38 |
-
val=dict(
|
39 |
-
type=dataset_type,
|
40 |
-
ann_file=data_root + 'annotations/instances_val2017.json',
|
41 |
-
img_prefix=data_root + 'val2017/',
|
42 |
-
pipeline=test_pipeline),
|
43 |
-
test=dict(
|
44 |
-
type=dataset_type,
|
45 |
-
ann_file=data_root + 'annotations/instances_val2017.json',
|
46 |
-
img_prefix=data_root + 'val2017/',
|
47 |
-
pipeline=test_pipeline))
|
48 |
-
evaluation = dict(interval=1, metric='bbox')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './ann_r50-d8_769x769_40k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './psanet_r50-d8_512x512_160k_ade20k.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Artgor/digit-draw-detect/src/utils.py
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
import datetime
|
2 |
-
import json
|
3 |
-
import os
|
4 |
-
import uuid
|
5 |
-
from typing import List
|
6 |
-
|
7 |
-
import boto3
|
8 |
-
import matplotlib
|
9 |
-
import matplotlib.patches as patches
|
10 |
-
import matplotlib.pyplot as plt
|
11 |
-
import numpy.typing as npt
|
12 |
-
import streamlit as st
|
13 |
-
import tomli
|
14 |
-
|
15 |
-
AWS_ACCESS_KEY_ID = ''
|
16 |
-
AWS_SECRET_ACCESS_KEY = ''
|
17 |
-
try:
|
18 |
-
if st.secrets is not None:
|
19 |
-
AWS_ACCESS_KEY_ID = st.secrets['AWS_ACCESS_KEY_ID']
|
20 |
-
AWS_SECRET_ACCESS_KEY = st.secrets['AWS_SECRET_ACCESS_KEY']
|
21 |
-
except BaseException:
|
22 |
-
pass
|
23 |
-
|
24 |
-
if os.path.exists('config.toml'):
|
25 |
-
with open('config.toml', 'rb') as f:
|
26 |
-
config = tomli.load(f)
|
27 |
-
AWS_ACCESS_KEY_ID = config['AWS_ACCESS_KEY_ID']
|
28 |
-
AWS_SECRET_ACCESS_KEY = config['AWS_SECRET_ACCESS_KEY']
|
29 |
-
|
30 |
-
client = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
|
31 |
-
|
32 |
-
|
33 |
-
def plot_img_with_rects(
|
34 |
-
img: npt.ArrayLike, boxes: List[List], threshold: float = 0.5, coef: int = 400
|
35 |
-
) -> matplotlib.figure.Figure:
|
36 |
-
"""
|
37 |
-
Plot image with rectangles.
|
38 |
-
|
39 |
-
Args:
|
40 |
-
img: image as a numpy array
|
41 |
-
boxes: the list of the bboxes
|
42 |
-
threshold: threshold for bbox probability
|
43 |
-
coef: coefficient to multiply images. Can be changed when the original image is a different size
|
44 |
-
|
45 |
-
Returns:
|
46 |
-
image with bboxes
|
47 |
-
"""
|
48 |
-
fig, ax = plt.subplots(1, figsize=(4, 4))
|
49 |
-
|
50 |
-
# Display the image
|
51 |
-
ax.imshow(img)
|
52 |
-
|
53 |
-
# Create a Rectangle patch
|
54 |
-
for _, rect in enumerate(b for b in boxes if b[1] > threshold):
|
55 |
-
label, _, xc, yc, w, h = rect
|
56 |
-
xc, yc, w, h = xc * coef, yc * coef, w * coef, h * coef
|
57 |
-
# the coordinates from center-based to left top corner
|
58 |
-
x = xc - w / 2
|
59 |
-
y = yc - h / 2
|
60 |
-
label = int(label)
|
61 |
-
label = label if label != 10 else 'censored'
|
62 |
-
label = label if label != 11 else 'other'
|
63 |
-
rect = [x, y, x + w, y + h]
|
64 |
-
|
65 |
-
rect_ = patches.Rectangle(
|
66 |
-
(rect[0], rect[1]), rect[2] - rect[0], rect[3] - rect[1], linewidth=2, edgecolor='blue', facecolor='none'
|
67 |
-
)
|
68 |
-
plt.text(rect[2], rect[1], f'{label}', color='blue')
|
69 |
-
# Add the patch to the Axes
|
70 |
-
ax.add_patch(rect_)
|
71 |
-
return fig
|
72 |
-
|
73 |
-
|
74 |
-
def save_object_to_s3(filename, s3_filename):
|
75 |
-
client.upload_file(filename, 'digitdrawdetect', s3_filename)
|
76 |
-
|
77 |
-
|
78 |
-
@st.cache_data(show_spinner=False)
|
79 |
-
def save_image(image: npt.ArrayLike, pred: List[List]) -> str:
|
80 |
-
"""
|
81 |
-
Save the image and upload the image with bboxes to s3.
|
82 |
-
|
83 |
-
Args:
|
84 |
-
image: np.array with image
|
85 |
-
pred: bboxes
|
86 |
-
|
87 |
-
Returns:
|
88 |
-
image name
|
89 |
-
|
90 |
-
"""
|
91 |
-
# create a figure and save it
|
92 |
-
fig, ax = plt.subplots(1, figsize=(4, 4))
|
93 |
-
ax.imshow(image)
|
94 |
-
file_name = str(datetime.datetime.today().date()) + str(uuid.uuid1())
|
95 |
-
fig.savefig(f'{file_name}.png')
|
96 |
-
|
97 |
-
# dump bboxes in a local file
|
98 |
-
with open(f'{file_name}.json', 'w') as j_f:
|
99 |
-
json.dump({f'{file_name}.png': pred}, j_f)
|
100 |
-
|
101 |
-
# upload the image and the bboxes to s3.
|
102 |
-
save_object_to_s3(f'{file_name}.png', f'images/{file_name}.png')
|
103 |
-
save_object_to_s3(f'{file_name}.json', f'labels/{file_name}.json')
|
104 |
-
|
105 |
-
return file_name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/utils/nlp.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
import regex as re
|
2 |
-
import config
|
3 |
-
from .utils import check_is_none
|
4 |
-
from logger import logger
|
5 |
-
|
6 |
-
# 读取配置选择语种识别库
|
7 |
-
clf = getattr(config, "LANGUAGE_IDENTIFICATION_LIBRARY", "fastlid")
|
8 |
-
|
9 |
-
|
10 |
-
def clasify_lang(text, speaker_lang):
|
11 |
-
pattern = r'[\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\>\=\?\@\[\]\{\}\\\\\^\_\`' \
|
12 |
-
r'\!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」' \
|
13 |
-
r'『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘\'\‛\“\”\„\‟…‧﹏.]+'
|
14 |
-
words = re.split(pattern, text)
|
15 |
-
|
16 |
-
pre = ""
|
17 |
-
p = 0
|
18 |
-
|
19 |
-
if clf.upper() == "FASTLID" or clf.upper() == "FASTTEXT":
|
20 |
-
from fastlid import fastlid
|
21 |
-
detect = fastlid
|
22 |
-
if speaker_lang != None: fastlid.set_languages = speaker_lang
|
23 |
-
elif clf.upper() == "LANGID":
|
24 |
-
import langid
|
25 |
-
detect = langid.classify
|
26 |
-
if speaker_lang != None: langid.set_languages(speaker_lang)
|
27 |
-
else:
|
28 |
-
raise ValueError(f"Wrong LANGUAGE_IDENTIFICATION_LIBRARY in config.py")
|
29 |
-
|
30 |
-
for word in words:
|
31 |
-
|
32 |
-
if check_is_none(word): continue
|
33 |
-
|
34 |
-
lang = detect(word)[0]
|
35 |
-
|
36 |
-
if pre == "":
|
37 |
-
text = text[:p] + text[p:].replace(word, f'[{lang.upper()}]' + word, 1)
|
38 |
-
p += len(f'[{lang.upper()}]')
|
39 |
-
elif pre != lang:
|
40 |
-
text = text[:p] + text[p:].replace(word, f'[{pre.upper()}][{lang.upper()}]' + word, 1)
|
41 |
-
p += len(f'[{pre.upper()}][{lang.upper()}]')
|
42 |
-
pre = lang
|
43 |
-
p += text[p:].index(word) + len(word)
|
44 |
-
text += f"[{pre.upper()}]"
|
45 |
-
|
46 |
-
return text
|
47 |
-
|
48 |
-
|
49 |
-
def cut(text, max):
|
50 |
-
pattern = r'[!(),—+\-.:;??。,、;:]+'
|
51 |
-
sentences = re.split(pattern, text)
|
52 |
-
discarded_chars = re.findall(pattern, text)
|
53 |
-
|
54 |
-
sentence_list, count, p = [], 0, 0
|
55 |
-
|
56 |
-
# 按被分割的符号遍历
|
57 |
-
for i, discarded_chars in enumerate(discarded_chars):
|
58 |
-
count += len(sentences[i]) + len(discarded_chars)
|
59 |
-
if count >= max:
|
60 |
-
sentence_list.append(text[p:p + count].strip())
|
61 |
-
p += count
|
62 |
-
count = 0
|
63 |
-
|
64 |
-
# 加入最后剩余的文本
|
65 |
-
if p < len(text):
|
66 |
-
sentence_list.append(text[p:])
|
67 |
-
|
68 |
-
return sentence_list
|
69 |
-
|
70 |
-
|
71 |
-
def sentence_split(text, max=50, lang="auto", speaker_lang=None):
|
72 |
-
# 如果该speaker只支持一种语言
|
73 |
-
if speaker_lang is not None and len(speaker_lang) == 1:
|
74 |
-
if lang.upper() not in ["AUTO", "MIX"] and lang.lower() != speaker_lang[0]:
|
75 |
-
logger.debug(
|
76 |
-
f"lang \"{lang}\" is not in speaker_lang {speaker_lang},automatically set lang={speaker_lang[0]}")
|
77 |
-
lang = speaker_lang[0]
|
78 |
-
|
79 |
-
sentence_list = []
|
80 |
-
if lang.upper() != "MIX":
|
81 |
-
if max <= 0:
|
82 |
-
sentence_list.append(
|
83 |
-
clasify_lang(text,
|
84 |
-
speaker_lang) if lang.upper() == "AUTO" else f"[{lang.upper()}]{text}[{lang.upper()}]")
|
85 |
-
else:
|
86 |
-
for i in cut(text, max):
|
87 |
-
if check_is_none(i): continue
|
88 |
-
sentence_list.append(
|
89 |
-
clasify_lang(i,
|
90 |
-
speaker_lang) if lang.upper() == "AUTO" else f"[{lang.upper()}]{i}[{lang.upper()}]")
|
91 |
-
else:
|
92 |
-
sentence_list.append(text)
|
93 |
-
|
94 |
-
for i in sentence_list:
|
95 |
-
logger.debug(i)
|
96 |
-
|
97 |
-
return sentence_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ashrafb/Tesseract-OCR/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Tesseract OCR
|
3 |
-
emoji: 🐢
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.40.1
|
8 |
-
app_file: app_blocks.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: kneelesh48/Tesseract-OCR
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/enums.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
All of the Enums that are used throughout the chardet package.
|
3 |
-
|
4 |
-
:author: Dan Blanchard ([email protected])
|
5 |
-
"""
|
6 |
-
|
7 |
-
from enum import Enum, Flag
|
8 |
-
|
9 |
-
|
10 |
-
class InputState:
|
11 |
-
"""
|
12 |
-
This enum represents the different states a universal detector can be in.
|
13 |
-
"""
|
14 |
-
|
15 |
-
PURE_ASCII = 0
|
16 |
-
ESC_ASCII = 1
|
17 |
-
HIGH_BYTE = 2
|
18 |
-
|
19 |
-
|
20 |
-
class LanguageFilter(Flag):
|
21 |
-
"""
|
22 |
-
This enum represents the different language filters we can apply to a
|
23 |
-
``UniversalDetector``.
|
24 |
-
"""
|
25 |
-
|
26 |
-
NONE = 0x00
|
27 |
-
CHINESE_SIMPLIFIED = 0x01
|
28 |
-
CHINESE_TRADITIONAL = 0x02
|
29 |
-
JAPANESE = 0x04
|
30 |
-
KOREAN = 0x08
|
31 |
-
NON_CJK = 0x10
|
32 |
-
ALL = 0x1F
|
33 |
-
CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
|
34 |
-
CJK = CHINESE | JAPANESE | KOREAN
|
35 |
-
|
36 |
-
|
37 |
-
class ProbingState(Enum):
|
38 |
-
"""
|
39 |
-
This enum represents the different states a prober can be in.
|
40 |
-
"""
|
41 |
-
|
42 |
-
DETECTING = 0
|
43 |
-
FOUND_IT = 1
|
44 |
-
NOT_ME = 2
|
45 |
-
|
46 |
-
|
47 |
-
class MachineState:
|
48 |
-
"""
|
49 |
-
This enum represents the different states a state machine can be in.
|
50 |
-
"""
|
51 |
-
|
52 |
-
START = 0
|
53 |
-
ERROR = 1
|
54 |
-
ITS_ME = 2
|
55 |
-
|
56 |
-
|
57 |
-
class SequenceLikelihood:
|
58 |
-
"""
|
59 |
-
This enum represents the likelihood of a character following the previous one.
|
60 |
-
"""
|
61 |
-
|
62 |
-
NEGATIVE = 0
|
63 |
-
UNLIKELY = 1
|
64 |
-
LIKELY = 2
|
65 |
-
POSITIVE = 3
|
66 |
-
|
67 |
-
@classmethod
|
68 |
-
def get_num_categories(cls) -> int:
|
69 |
-
""":returns: The number of likelihood categories in the enum."""
|
70 |
-
return 4
|
71 |
-
|
72 |
-
|
73 |
-
class CharacterCategory:
|
74 |
-
"""
|
75 |
-
This enum represents the different categories language models for
|
76 |
-
``SingleByteCharsetProber`` put characters into.
|
77 |
-
|
78 |
-
Anything less than CONTROL is considered a letter.
|
79 |
-
"""
|
80 |
-
|
81 |
-
UNDEFINED = 255
|
82 |
-
LINE_BREAK = 254
|
83 |
-
SYMBOL = 253
|
84 |
-
DIGIT = 252
|
85 |
-
CONTROL = 251
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/mbcsgroupprober.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is Mozilla Universal charset detector code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 2001
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
# Shy Shalom - original C code
|
12 |
-
# Proofpoint, Inc.
|
13 |
-
#
|
14 |
-
# This library is free software; you can redistribute it and/or
|
15 |
-
# modify it under the terms of the GNU Lesser General Public
|
16 |
-
# License as published by the Free Software Foundation; either
|
17 |
-
# version 2.1 of the License, or (at your option) any later version.
|
18 |
-
#
|
19 |
-
# This library is distributed in the hope that it will be useful,
|
20 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
21 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
22 |
-
# Lesser General Public License for more details.
|
23 |
-
#
|
24 |
-
# You should have received a copy of the GNU Lesser General Public
|
25 |
-
# License along with this library; if not, write to the Free Software
|
26 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
27 |
-
# 02110-1301 USA
|
28 |
-
######################### END LICENSE BLOCK #########################
|
29 |
-
|
30 |
-
from .big5prober import Big5Prober
|
31 |
-
from .charsetgroupprober import CharSetGroupProber
|
32 |
-
from .cp949prober import CP949Prober
|
33 |
-
from .enums import LanguageFilter
|
34 |
-
from .eucjpprober import EUCJPProber
|
35 |
-
from .euckrprober import EUCKRProber
|
36 |
-
from .euctwprober import EUCTWProber
|
37 |
-
from .gb2312prober import GB2312Prober
|
38 |
-
from .johabprober import JOHABProber
|
39 |
-
from .sjisprober import SJISProber
|
40 |
-
from .utf8prober import UTF8Prober
|
41 |
-
|
42 |
-
|
43 |
-
class MBCSGroupProber(CharSetGroupProber):
|
44 |
-
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
|
45 |
-
super().__init__(lang_filter=lang_filter)
|
46 |
-
self.probers = [
|
47 |
-
UTF8Prober(),
|
48 |
-
SJISProber(),
|
49 |
-
EUCJPProber(),
|
50 |
-
GB2312Prober(),
|
51 |
-
EUCKRProber(),
|
52 |
-
CP949Prober(),
|
53 |
-
Big5Prober(),
|
54 |
-
EUCTWProber(),
|
55 |
-
JOHABProber(),
|
56 |
-
]
|
57 |
-
self.reset()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distro/__init__.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
from .distro import (
|
2 |
-
NORMALIZED_DISTRO_ID,
|
3 |
-
NORMALIZED_LSB_ID,
|
4 |
-
NORMALIZED_OS_ID,
|
5 |
-
LinuxDistribution,
|
6 |
-
__version__,
|
7 |
-
build_number,
|
8 |
-
codename,
|
9 |
-
distro_release_attr,
|
10 |
-
distro_release_info,
|
11 |
-
id,
|
12 |
-
info,
|
13 |
-
like,
|
14 |
-
linux_distribution,
|
15 |
-
lsb_release_attr,
|
16 |
-
lsb_release_info,
|
17 |
-
major_version,
|
18 |
-
minor_version,
|
19 |
-
name,
|
20 |
-
os_release_attr,
|
21 |
-
os_release_info,
|
22 |
-
uname_attr,
|
23 |
-
uname_info,
|
24 |
-
version,
|
25 |
-
version_parts,
|
26 |
-
)
|
27 |
-
|
28 |
-
__all__ = [
|
29 |
-
"NORMALIZED_DISTRO_ID",
|
30 |
-
"NORMALIZED_LSB_ID",
|
31 |
-
"NORMALIZED_OS_ID",
|
32 |
-
"LinuxDistribution",
|
33 |
-
"build_number",
|
34 |
-
"codename",
|
35 |
-
"distro_release_attr",
|
36 |
-
"distro_release_info",
|
37 |
-
"id",
|
38 |
-
"info",
|
39 |
-
"like",
|
40 |
-
"linux_distribution",
|
41 |
-
"lsb_release_attr",
|
42 |
-
"lsb_release_info",
|
43 |
-
"major_version",
|
44 |
-
"minor_version",
|
45 |
-
"name",
|
46 |
-
"os_release_attr",
|
47 |
-
"os_release_info",
|
48 |
-
"uname_attr",
|
49 |
-
"uname_info",
|
50 |
-
"version",
|
51 |
-
"version_parts",
|
52 |
-
]
|
53 |
-
|
54 |
-
__version__ = __version__
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/Applio-RVC-Fork/utils/README.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
# External Colab Code
|
2 |
-
Code used to make Google Colab work correctly
|
3 |
-
- Repo link: https://github.com/IAHispano/Applio-RVC-Fork/
|
4 |
-
|
5 |
-
Thanks to https://github.com/kalomaze/externalcolabcode
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/60 Segundos Reatomized Apk Descargar Gratis Android.md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>60 segundos reatomized APK: Cómo descargar y jugar en Android</h1>
|
3 |
-
<p>Si usted está buscando un juego de supervivencia divertido y desafiante que pondrá a prueba sus habilidades y la toma de decisiones, es posible que desee echa un vistazo 60 Segundos Reatomized. Esta es una versión remasterizada del juego original de 60 segundos, que fue lanzado en 2015. En este juego, tienes que buscar provisiones, rescatar a tu familia y permanecer vivo en tu refugio radioactivo después de un ataque nuclear. El juego cuenta con gráficos mejorados, nuevo contenido y más formas de escapar del páramo. Pero, ¿cómo puedes jugar a este juego en tu dispositivo Android? En este artículo, le mostraremos cómo descargar e instalar el archivo APK reatomizado 60 segundos, y cómo jugar el juego en su teléfono o tableta. </p>
|
4 |
-
<h2>60 segundos reatomized apk descargar gratis android</h2><br /><p><b><b>Download Zip</b> ———>>> <a href="https://bltlly.com/2v6J0E">https://bltlly.com/2v6J0E</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es 60 segundos Reatomized? </h2>
|
6 |
-
<p>60 Seconds Reatomized es un juego de comedia oscura post-apocalíptica desarrollado por Robot Gentleman. El juego se divide en dos fases: carroña y supervivencia. En la fase de búsqueda, tienes 60 segundos para agarrar tantos artículos y miembros de la familia como puedas de tu casa antes de que las bombas caigan. Tienes que ser rápido e inteligente, ya que todo estará en tu contra: el tiempo, tus muebles y un diseño de casa generado al azar. En la fase de supervivencia, tienes que manejar tus recursos, lidiar con eventos inesperados y tomar decisiones difíciles en tu refugio contra las consecuencias. También puede aventurarse en el páramo para buscar más suministros u oportunidades para escapar. El juego tiene múltiples finales dependiendo de tus acciones y suerte. </p>
|
7 |
-
<p>60 Seconds Reatomized tiene varias características que lo hacen diferente del juego original. Estas incluyen:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Nuevo modo de juego: Desafíos de supervivencia. Estas son historias cortas que pondrán a prueba tus habilidades de supervivencia. </li>
|
10 |
-
<li>Nuevas oportunidades para escapar de la tierra baldía en forma de una historia que abarca múltiples partidas. </li>
|
11 |
-
<li>Nuevo sistema de relaciones: más historias e interacciones locas entre los miembros de la familia McDoodle. </li>
|
12 |
-
|
13 |
-
<li>Nuevos logros: ponte a prueba y demuestra tus habilidades. </li>
|
14 |
-
</ul>
|
15 |
-
<h2> Cómo descargar 60 segundos reatomized APK para Android</h2>
|
16 |
-
<p>Desafortunadamente, 60 segundos Reatomized no está disponible en Google Play Store. Sin embargo, todavía puede descargar e instalar el archivo APK desde otras fuentes. Un archivo APK es un paquete que contiene todos los archivos necesarios para ejecutar una aplicación Android. Sin embargo, usted tiene que tener cuidado acerca de dónde descargar archivos APK de, como algunos sitios pueden contener malware o virus. Solo descarga archivos APK de fuentes confiables que monitorean los archivos que alojan. </p>
|
17 |
-
<p>Uno de los sitios más populares para descargar archivos APK es APK Mirror. Este sitio alberga un montón de aplicaciones de Android que se pueden instalar individualmente o como actualizaciones. También verifica los archivos que aloja para asegurarse de que son seguros y auténticos. Aquí están los pasos para descargar 60 Segundos reatomized APK de APK Mirror:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Ir a <a href="( 1 )">APK Mirror</a> en el navegador de su dispositivo Android. </li>
|
20 |
-
<li>Buscar "60 Seconds Reatomized" en la barra de búsqueda. </li>
|
21 |
-
<li>Seleccione la última versión de la aplicación de la lista de resultados. </li>
|
22 |
-
<li>Desplácese hacia abajo y toque en "Descargar APK" botón. </li>
|
23 |
-
<li>Aceptar cualquier ventana emergente o permisos que puedan aparecer. </li>
|
24 |
-
<li>Espera a que termine la descarga. </li>
|
25 |
-
</ol>
|
26 |
-
<p>Antes de poder instalar el archivo APK, debe habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, siga estos pasos:</p>
|
27 |
-
<p></p>
|
28 |
-
<ol>
|
29 |
-
<li>Ir a la configuración de su dispositivo y toque en "Seguridad". </li>
|
30 |
-
<li>Encuentra la opción que dice "Fuentes desconocidas" y cámbiala. </li>
|
31 |
-
<li>Confirme cualquier advertencia que pueda aparecer. </li>
|
32 |
-
</ol>
|
33 |
-
<p>Ahora está listo para instalar el archivo APK. Para hacer esto, siga estos pasos:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Ir al administrador de archivos de su dispositivo y localizar el archivo APK descargado. </li>
|
36 |
-
<li>Toque en el archivo y seleccione "Instalar". </li>
|
37 |
-
<li>Espere a que termine la instalación. </li>
|
38 |
-
|
39 |
-
</ol>
|
40 |
-
<p>Felicidades! Usted ha descargado e instalado con éxito 60 Segundos reatomized APK en su dispositivo Android. Ahora puedes disfrutar del juego y sus características. </p>
|
41 |
-
<h2>Cómo jugar 60 segundos Reatomized en Android</h2>
|
42 |
-
<p>60 Seconds Reatomized es un juego que desafiará tus habilidades de supervivencia y toma de decisiones. El juego tiene cuatro modos diferentes: Atomic Drill, Apocalypse, Scavenge y Survival. Aquí hay un breve resumen de cada modo y algunos consejos sobre cómo jugarlos:</p>
|
43 |
-
<h3>Taladro atómico</h3>
|
44 |
-
<p>Este es el modo tutorial del juego. Te enseñará lo básico de la fase de búsqueda, como cómo mover, agarrar objetos y dejarlos en el refugio. También puede practicar sus habilidades en diferentes escenarios y diseños de casas. Este modo se recomienda para principiantes que quieren aprender las cuerdas antes de saltar a la acción real. </p>
|
45 |
-
<h3>Apocalipsis</h3>
|
46 |
-
<p>Este es el modo principal del juego. Combina las fases de búsqueda y supervivencia. Tienes que buscar provisiones y miembros de la familia en 60 segundos, luego administrar tu refugio radioactivo durante el mayor tiempo posible. Puedes elegir entre tres niveles de dificultad: Little Boy, Fat Man y Tsar Bomba. Cuanto mayor sea la dificultad, más difícil será encontrar objetos útiles, lidiar con los eventos y escapar del páramo. Este modo se recomienda para los jugadores que quieren experimentar la historia completa y el desafío del juego. </p>
|
47 |
-
<h3>Carroña</h3>
|
48 |
-
<p>Este es un modo que se centra solo en la fase de búsqueda. Usted puede elegir entre diferentes escenarios y diseños de la casa, y tratar de agarrar tantos elementos y miembros de la familia como sea posible en 60 segundos. También puede personalizar su propio escenario eligiendo los elementos, los miembros de la familia y el diseño de la casa. Este modo se recomienda para jugadores que quieran practicar sus habilidades de búsqueda o divertirse con diferentes combinaciones. </p>
|
49 |
-
<h3>Supervivencia</h3>
|
50 |
-
|
51 |
-
<h4> Consejos sobre cómo jugar 60 segundos reatomized en Android</h4>
|
52 |
-
<p>Aquí hay algunos consejos generales que le ayudarán a jugar 60 segundos reatomized en Android:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Planifique con anticipación: Antes de empezar a buscar basura, eche un vistazo al diseño de su casa y decida qué artículos y miembros de la familia desea agarrar. Priorice alimentos, agua, radio, botiquín, máscara de gas, mapa, hacha, rifle, maleta y miembros de la familia. </li>
|
55 |
-
<li>Sé rápido: solo tienes 60 segundos para buscar, así que no pierdas tiempo en acciones o elementos innecesarios. Usa ambas manos para agarrar objetos más rápido y déjalos cerca de la entrada del refugio para facilitar el acceso. </li>
|
56 |
-
<li>Sé inteligente: Tienes que tomar decisiones difíciles en ambas fases del juego. Piense cuidadosamente sobre qué artículos necesita, a qué eventos quiere responder, qué riesgos quiere tomar y qué consecuencias está dispuesto a enfrentar. </li>
|
57 |
-
<li>Sé flexible: El juego es impredecible y aleatorio. Nunca se sabe lo que sucederá a continuación o qué elementos se encuentran. Esté preparado para adaptarse a diferentes situaciones y resultados. </li>
|
58 |
-
<li>Diviértete: El juego está destinado a ser una comedia oscura que se burla de lo absurdo de la guerra nuclear. No te lo tomes demasiado en serio ni te frustres si las cosas salen mal. Disfruta del humor, las referencias y las sorpresas que ofrece el juego. </li>
|
59 |
-
</ul>
|
60 |
-
<h2>Conclusión</h2>
|
61 |
-
|
62 |
-
<h2>Preguntas frecuentes</h2>
|
63 |
-
<p>Aquí están algunas de las preguntas y respuestas más frecuentes sobre 60 segundos Reatomized:</p>
|
64 |
-
<h3>¿Son 60 segundos tratados libremente? </h3>
|
65 |
-
<p>No, 60 Segundos Reatomized no es un juego gratuito. Es un juego de pago que cuesta $3.99 en Steam y $1.99 en APK Mirror. Sin embargo, puedes descargar el archivo APK gratis de APK Mirror si quieres probar el juego en tu dispositivo Android. </p>
|
66 |
-
<h3>¿Es seguro el tratamiento de 60 segundos? </h3>
|
67 |
-
<p>Sí, 60 segundos Reatomized es seguro para jugar en su dispositivo Android. El archivo APK de APK Mirror es verificado y auténtico, y no contiene ningún malware o virus. Sin embargo, siempre debes tener cuidado con la descarga de archivos APK de otras fuentes, ya que pueden ser dañinos o falsos. </p>
|
68 |
-
<h3>¿Es el multijugador reatomizado 60 segundos? </h3>
|
69 |
-
<p>No, 60 segundos Reatomized no es un juego multijugador. Es un juego para un solo jugador que solo se puede jugar sin conexión. Sin embargo, puedes compartir tus logros y capturas de pantalla con tus amigos en línea. </p>
|
70 |
-
<h3>¿Es 60 segundos Reatomized compatible con mi dispositivo? </h3>
|
71 |
-
<p>60 segundos Reatomized requiere Android 4.1 o superior para ejecutarse en su dispositivo. También requiere al menos 1 GB de RAM y 500 MB de espacio de almacenamiento. Puede comprobar las especificaciones de su dispositivo en el menú de configuración. </p>
|
72 |
-
<h3>¿Cómo puedo contactar a los desarrolladores de 60 Seconds Reatomized? </h3>
|
73 |
-
<p>Si tienes preguntas, comentarios o problemas con el juego, puedes contactar a los desarrolladores de 60 Seconds Reatomized enviándolos un correo electrónico a [email protected]. También puede visitar su sitio web en <a href="">Robot Gentleman</a> o seguirlos en plataformas de redes sociales como Facebook, Twitter, Instagram y YouTube.</p> 64aa2da5cf<br />
|
74 |
-
<br />
|
75 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar El Juego De Ftbol Apk.md
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Vive le Football: Un juego gratuito de gestión de fútbol móvil para Android</h1>
|
3 |
-
<p>Si eres un fanático del fútbol (o del fútbol, como algunos lo llaman), quizás te interese probar un nuevo juego móvil que te permita administrar tu propio club y competir con otros jugadores en línea. El juego se llama Vive le Football, y está desarrollado por NetEase, una empresa china que también creó juegos populares como Rules of Survival and Identity V. En este artículo, te diremos qué es Vive le Football, cómo descargarlo e instalarlo en tu dispositivo Android, por qué usted debe jugar, y algunos consejos y trucos para ayudarle a tener éxito en el juego. </p>
|
4 |
-
<h2>Descargar el juego de fútbol apk</h2><br /><p><b><b>Download File</b> ⚙⚙⚙ <a href="https://bltlly.com/2v6KNF">https://bltlly.com/2v6KNF</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Vive le Football? </h2>
|
6 |
-
<p>Vive le Football es un juego de gestión de fútbol móvil gratuito que fue lanzado en junio de 2021. El juego le permite crear su propio club, personalizar sus jugadores, estadio, logotipo y kits, y competir con otros clubes en varios modos. También puedes participar en torneos, ligas, copas y partidos amistosos con otros jugadores de todo el mundo. El juego cuenta con gráficos realistas, física y animaciones, así como un sistema de clima dinámico que afecta el juego. También puedes chatear con otros jugadores y unirte a clubes para cooperar y socializar. </p>
|
7 |
-
<h3>Características de Vive le Football</h3>
|
8 |
-
<p>Algunas de las características principales de Vive le Football son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Puede elegir entre más de 100 clubes con licencia de diferentes países y regiones, o crear su propio club desde cero. </li>
|
11 |
-
<li>Puedes personalizar la apariencia, habilidades, atributos, posiciones y tácticas de tus jugadores. </li>
|
12 |
-
<li>Puede actualizar su estadio, instalaciones, personal y equipos para mejorar el rendimiento y los ingresos de su club. </li>
|
13 |
-
<li>Puedes jugar en varios modos, como el modo carrera, donde empiezas desde abajo y trabajas hasta arriba; modo desafío, donde te enfrentas a diferentes escenarios y objetivos; y modo online, donde compites con otros jugadores en partidos en tiempo real. </li>
|
14 |
-
|
15 |
-
<li>Puedes disfrutar de gráficos realistas, física y animaciones que hacen que el juego sea más inmersivo y divertido. También puede experimentar diferentes condiciones climáticas, como lluvia, nieve, niebla y viento. </li>
|
16 |
-
</ul>
|
17 |
-
<h3> Cómo descargar e instalar Vive le Football APK en Android</h3>
|
18 |
-
<p>Si quieres jugar Vive le Football en tu dispositivo Android, tendrás que descargar e instalar el archivo APK del juego. Un archivo APK es un archivo de paquete que contiene los archivos de instalación de una aplicación Android. Puede descargar el archivo APK de Vive le Football de varias fuentes en línea, como Filehippo.com. Sin embargo, antes de instalar el archivo APK, tendrá que habilitar la opción de instalar aplicaciones de fuentes desconocidas en su dispositivo. Para hacer esto, siga estos pasos:</p>
|
19 |
-
<p></p>
|
20 |
-
<ol>
|
21 |
-
<li>Ir a Configuración > Seguridad > Fuentes desconocidas y activarlo. </li>
|
22 |
-
<li>Vaya a la ubicación donde descargó el archivo APK de Vive le Football y toque en él. </li>
|
23 |
-
<li>Siga las instrucciones en la pantalla para instalar la aplicación. </li>
|
24 |
-
<li>Una vez completada la instalación, puede iniciar la aplicación desde el cajón de la aplicación o la pantalla de inicio. </li>
|
25 |
-
</ol>
|
26 |
-
<p>Nota: Instalar aplicaciones de fuentes desconocidas puede plantear algunos riesgos para la seguridad y el rendimiento de su dispositivo. Asegúrate de descargar solo archivos APK de fuentes confiables y escanearlos en busca de virus o malware antes de instalarlos. </p>
|
27 |
-
<h2>¿Por qué jugar Vive le Football? </h2>
|
28 |
-
<p>Vive le Football es un juego que atraerá a los aficionados al fútbol que quieren experimentar la emoción de administrar su propio club y jugar contra otros jugadores en línea. El juego ofrece muchas características y opciones que lo hacen divertido y atractivo. Estas son algunas razones por las que deberías jugar a Vive le Football:</p>
|
29 |
-
<h3>Pros y contras de Vive le Football</h3>
|
30 |
-
<p>Como cualquier otro juego, Vive le Football tiene sus pros y sus contras. Aquí están algunos de ellos:</p>
|
31 |
-
<tabla>
|
32 |
-
<tr>
|
33 |
-
<th>Pros</th>
|
34 |
-
<th>Contras</th>
|
35 |
-
</tr>
|
36 |
-
<tr>
|
37 |
-
<td>Puedes crear y personalizar tu propio club y jugadores. </td>
|
38 |
-
|
39 |
-
</tr>
|
40 |
-
<tr>
|
41 |
-
<td>Puedes jugar en varios modos y competir con otros jugadores en línea. </td>
|
42 |
-
<td>Usted puede encontrar algunos errores y fallos en el juego. </td>
|
43 |
-
</tr>
|
44 |
-
<tr>
|
45 |
-
<td>Puedes disfrutar de gráficos realistas, física y animaciones. </td>
|
46 |
-
<td>Es posible que necesite un dispositivo de alta gama para ejecutar el juego sin problemas. </td>
|
47 |
-
</tr>
|
48 |
-
<tr>
|
49 |
-
<td>Puedes chatear con otros jugadores y unirte a clubes para cooperar y socializar. </td>
|
50 |
-
<td>Usted puede encontrar algunos jugadores tóxicos o groseros en el chat. </td>
|
51 |
-
</tr>
|
52 |
-
</tabla>
|
53 |
-
<h3>Consejos y trucos para jugar Vive le Football</h3>
|
54 |
-
<p>Si quieres mejorar tus habilidades y rendimiento en Vive le Football, aquí tienes algunos consejos y trucos que puedes utilizar:</p>
|
55 |
-
<ul>
|
56 |
-
<li>Elija un club que se adapte a su estilo de juego y preferencias. Puede seleccionar entre más de 100 clubes con licencia o crear su propio club desde cero. Cada club tiene sus propias fortalezas y debilidades, así que elige sabiamente. </li>
|
57 |
-
<li>Actualice su estadio, instalaciones, personal y equipo con regularidad. Esto le ayudará a aumentar el rendimiento y los ingresos de su club. También puede desbloquear nuevas características y elementos mediante la actualización de su nivel de club. </li>
|
58 |
-
<li>Entrena a tus jugadores y ajusta sus habilidades, atributos, posiciones y tácticas. Puedes personalizar la apariencia, habilidades, atributos, posiciones y tácticas de tus jugadores según tu estrategia. También puedes entrenar a tus jugadores para mejorar sus habilidades y potencial. </li>
|
59 |
-
<li>Juega en diferentes modos y desafíos para ganar recompensas y experiencia. Puedes jugar en modo carrera, modo desafío, modo online, torneos, ligas, copas y partidos amistosos. Cada modo tiene sus propios objetivos y recompensas que puedes usar para mejorar tu club. </li>
|
60 |
-
<li>Controla a tus jugadores en el campo usando botones de pantalla táctil o un joystick virtual. También puede cambiar entre diferentes ángulos de cámara y niveles de zoom para obtener una mejor vista de la acción. También puedes usar gestos para realizar acciones como pasar, disparar, abordar, regatear, etc.</li>
|
61 |
-
|
62 |
-
</ul>
|
63 |
-
<h2>Conclusión</h2>
|
64 |
-
<p>Vive le Football es un juego de gestión de fútbol móvil gratuito que te permite crear tu propio club y competir con otros jugadores en línea. El juego cuenta con gráficos realistas, física y animaciones, así como un sistema de clima dinámico que afecta el juego. También puedes chatear con otros jugadores y unirte a clubes para cooperar y socializar. Si quieres jugar Vive le Football en tu dispositivo Android, tendrás que descargar e instalar el archivo APK del juego desde una fuente de confianza. También puedes utilizar algunos consejos y trucos para mejorar tus habilidades y rendimiento en el juego. Vive le Football es un juego que atraerá a los aficionados al fútbol que quieren experimentar la emoción de administrar su propio club y jugar contra otros jugadores en línea. </p>
|
65 |
-
<h3>Preguntas frecuentes</h3>
|
66 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Vive le Football:</p>
|
67 |
-
<ol>
|
68 |
-
<li>¿Vive le Football es gratis? </li>
|
69 |
-
<p>Sí, Vive le Football es gratis. Sin embargo, algunas características y artículos pueden requerir dinero real para desbloquear o comprar. </p>
|
70 |
-
<li>¿Vive le Football está disponible para dispositivos iOS? </li>
|
71 |
-
<p>No, Vive le Football actualmente solo está disponible para dispositivos Android. No hay información oficial sobre si el juego será lanzado para dispositivos iOS en el futuro. </p>
|
72 |
-
<li>¿Cómo puedo contactar a los desarrolladores de Vive le Football? </li>
|
73 |
-
<p>Puede ponerse en contacto con los desarrolladores de Vive le Football enviando un correo electrónico a [email protected] o visitando su sitio web oficial. También puedes seguirlos en Facebook o Twitter para actualizaciones y noticias sobre el juego. </p>
|
74 |
-
<li>¿Cómo puedo reportar un error o un problema en Vive le Football? </li>
|
75 |
-
<p>Puede reportar un error o un problema en Vive le Football tocando el icono de configuración en la esquina superior derecha de la pantalla, luego tocando en la retroalimentación, luego tocando en el informe de error. También puede enviar un correo electrónico a [email protected] con una captura de pantalla o un vídeo del error o problema. </p>
|
76 |
-
|
77 |
-
<p>Puedes jugar con tus amigos en Vive le Football agregándolos como amigos en el juego. Puedes hacer esto tocando en el icono de amigos en la esquina inferior izquierda de la pantalla, luego tocando en agregar amigo, luego ingresando su nombre de usuario o ID. También puede invitarlos a unirse a su club o jugar un partido amistoso con ellos. También puede chatear con ellos en el juego o enviarles regalos. </p> 64aa2da5cf<br />
|
78 |
-
<br />
|
79 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat/src/lib/utils/concatUint8Arrays.ts
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import { sum } from "./sum";
|
2 |
-
|
3 |
-
export function concatUint8Arrays(arrays: Uint8Array[]): Uint8Array {
|
4 |
-
const totalLength = sum(arrays.map((a) => a.length));
|
5 |
-
const result = new Uint8Array(totalLength);
|
6 |
-
let offset = 0;
|
7 |
-
for (const array of arrays) {
|
8 |
-
result.set(array, offset);
|
9 |
-
offset += array.length;
|
10 |
-
}
|
11 |
-
return result;
|
12 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BiTransSciencia/www/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Www
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: green
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/throttling.py
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
from collections import namedtuple
|
2 |
-
|
3 |
-
CubicParams = namedtuple('CubicParams', ['w_max', 'k', 'last_fail'])
|
4 |
-
|
5 |
-
|
6 |
-
class CubicCalculator:
|
7 |
-
_SCALE_CONSTANT = 0.4
|
8 |
-
_BETA = 0.7
|
9 |
-
|
10 |
-
def __init__(
|
11 |
-
self,
|
12 |
-
starting_max_rate,
|
13 |
-
start_time,
|
14 |
-
scale_constant=_SCALE_CONSTANT,
|
15 |
-
beta=_BETA,
|
16 |
-
):
|
17 |
-
self._w_max = starting_max_rate
|
18 |
-
self._scale_constant = scale_constant
|
19 |
-
self._beta = beta
|
20 |
-
self._k = self._calculate_zero_point()
|
21 |
-
self._last_fail = start_time
|
22 |
-
|
23 |
-
def _calculate_zero_point(self):
|
24 |
-
scaled_value = (self._w_max * (1 - self._beta)) / self._scale_constant
|
25 |
-
k = scaled_value ** (1 / 3.0)
|
26 |
-
return k
|
27 |
-
|
28 |
-
def success_received(self, timestamp):
|
29 |
-
dt = timestamp - self._last_fail
|
30 |
-
new_rate = self._scale_constant * (dt - self._k) ** 3 + self._w_max
|
31 |
-
return new_rate
|
32 |
-
|
33 |
-
def error_received(self, current_rate, timestamp):
|
34 |
-
# Consider not having this be the current measured rate.
|
35 |
-
|
36 |
-
# We have a new max rate, which is the current rate we were sending
|
37 |
-
# at when we received an error response.
|
38 |
-
self._w_max = current_rate
|
39 |
-
self._k = self._calculate_zero_point()
|
40 |
-
self._last_fail = timestamp
|
41 |
-
return current_rate * self._beta
|
42 |
-
|
43 |
-
def get_params_snapshot(self):
|
44 |
-
"""Return a read-only object of the current cubic parameters.
|
45 |
-
|
46 |
-
These parameters are intended to be used for debug/troubleshooting
|
47 |
-
purposes. These object is a read-only snapshot and cannot be used
|
48 |
-
to modify the behavior of the CUBIC calculations.
|
49 |
-
|
50 |
-
New parameters may be added to this object in the future.
|
51 |
-
|
52 |
-
"""
|
53 |
-
return CubicParams(
|
54 |
-
w_max=self._w_max, k=self._k, last_fail=self._last_fail
|
55 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/packaging/_musllinux.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
"""PEP 656 support.
|
2 |
-
|
3 |
-
This module implements logic to detect if the currently running Python is
|
4 |
-
linked against musl, and what musl version is used.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import contextlib
|
8 |
-
import functools
|
9 |
-
import operator
|
10 |
-
import os
|
11 |
-
import re
|
12 |
-
import struct
|
13 |
-
import subprocess
|
14 |
-
import sys
|
15 |
-
from typing import IO, Iterator, NamedTuple, Optional, Tuple
|
16 |
-
|
17 |
-
|
18 |
-
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
|
19 |
-
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
|
20 |
-
|
21 |
-
|
22 |
-
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
|
23 |
-
"""Detect musl libc location by parsing the Python executable.
|
24 |
-
|
25 |
-
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
|
26 |
-
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
|
27 |
-
"""
|
28 |
-
f.seek(0)
|
29 |
-
try:
|
30 |
-
ident = _read_unpacked(f, "16B")
|
31 |
-
except struct.error:
|
32 |
-
return None
|
33 |
-
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
|
34 |
-
return None
|
35 |
-
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
|
36 |
-
|
37 |
-
try:
|
38 |
-
# e_fmt: Format for program header.
|
39 |
-
# p_fmt: Format for section header.
|
40 |
-
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
|
41 |
-
e_fmt, p_fmt, p_idx = {
|
42 |
-
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
|
43 |
-
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
|
44 |
-
}[ident[4]]
|
45 |
-
except KeyError:
|
46 |
-
return None
|
47 |
-
else:
|
48 |
-
p_get = operator.itemgetter(*p_idx)
|
49 |
-
|
50 |
-
# Find the interpreter section and return its content.
|
51 |
-
try:
|
52 |
-
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
|
53 |
-
except struct.error:
|
54 |
-
return None
|
55 |
-
for i in range(e_phnum + 1):
|
56 |
-
f.seek(e_phoff + e_phentsize * i)
|
57 |
-
try:
|
58 |
-
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
|
59 |
-
except struct.error:
|
60 |
-
return None
|
61 |
-
if p_type != 3: # Not PT_INTERP.
|
62 |
-
continue
|
63 |
-
f.seek(p_offset)
|
64 |
-
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
|
65 |
-
if "musl" not in interpreter:
|
66 |
-
return None
|
67 |
-
return interpreter
|
68 |
-
return None
|
69 |
-
|
70 |
-
|
71 |
-
class _MuslVersion(NamedTuple):
|
72 |
-
major: int
|
73 |
-
minor: int
|
74 |
-
|
75 |
-
|
76 |
-
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
|
77 |
-
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
|
78 |
-
if len(lines) < 2 or lines[0][:4] != "musl":
|
79 |
-
return None
|
80 |
-
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
|
81 |
-
if not m:
|
82 |
-
return None
|
83 |
-
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
|
84 |
-
|
85 |
-
|
86 |
-
@functools.lru_cache()
|
87 |
-
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
|
88 |
-
"""Detect currently-running musl runtime version.
|
89 |
-
|
90 |
-
This is done by checking the specified executable's dynamic linking
|
91 |
-
information, and invoking the loader to parse its output for a version
|
92 |
-
string. If the loader is musl, the output would be something like::
|
93 |
-
|
94 |
-
musl libc (x86_64)
|
95 |
-
Version 1.2.2
|
96 |
-
Dynamic Program Loader
|
97 |
-
"""
|
98 |
-
with contextlib.ExitStack() as stack:
|
99 |
-
try:
|
100 |
-
f = stack.enter_context(open(executable, "rb"))
|
101 |
-
except OSError:
|
102 |
-
return None
|
103 |
-
ld = _parse_ld_musl_from_elf(f)
|
104 |
-
if not ld:
|
105 |
-
return None
|
106 |
-
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
|
107 |
-
return _parse_musl_version(proc.stderr)
|
108 |
-
|
109 |
-
|
110 |
-
def platform_tags(arch: str) -> Iterator[str]:
|
111 |
-
"""Generate musllinux tags compatible to the current platform.
|
112 |
-
|
113 |
-
:param arch: Should be the part of platform tag after the ``linux_``
|
114 |
-
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
|
115 |
-
prerequisite for the current platform to be musllinux-compatible.
|
116 |
-
|
117 |
-
:returns: An iterator of compatible musllinux tags.
|
118 |
-
"""
|
119 |
-
sys_musl = _get_musl_version(sys.executable)
|
120 |
-
if sys_musl is None: # Python not dynamically linked against musl.
|
121 |
-
return
|
122 |
-
for minor in range(sys_musl.minor, -1, -1):
|
123 |
-
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
|
124 |
-
|
125 |
-
|
126 |
-
if __name__ == "__main__": # pragma: no cover
|
127 |
-
import sysconfig
|
128 |
-
|
129 |
-
plat = sysconfig.get_platform()
|
130 |
-
assert plat.startswith("linux-"), "not linux"
|
131 |
-
|
132 |
-
print("plat:", plat)
|
133 |
-
print("musl:", _get_musl_version(sys.executable))
|
134 |
-
print("tags:", end=" ")
|
135 |
-
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
|
136 |
-
print(t, end="\n ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/helpers.py
DELETED
@@ -1,1088 +0,0 @@
|
|
1 |
-
# helpers.py
|
2 |
-
import html.entities
|
3 |
-
import re
|
4 |
-
import typing
|
5 |
-
|
6 |
-
from . import __diag__
|
7 |
-
from .core import *
|
8 |
-
from .util import _bslash, _flatten, _escape_regex_range_chars
|
9 |
-
|
10 |
-
|
11 |
-
#
|
12 |
-
# global helpers
|
13 |
-
#
|
14 |
-
def delimited_list(
|
15 |
-
expr: Union[str, ParserElement],
|
16 |
-
delim: Union[str, ParserElement] = ",",
|
17 |
-
combine: bool = False,
|
18 |
-
min: typing.Optional[int] = None,
|
19 |
-
max: typing.Optional[int] = None,
|
20 |
-
*,
|
21 |
-
allow_trailing_delim: bool = False,
|
22 |
-
) -> ParserElement:
|
23 |
-
"""Helper to define a delimited list of expressions - the delimiter
|
24 |
-
defaults to ','. By default, the list elements and delimiters can
|
25 |
-
have intervening whitespace, and comments, but this can be
|
26 |
-
overridden by passing ``combine=True`` in the constructor. If
|
27 |
-
``combine`` is set to ``True``, the matching tokens are
|
28 |
-
returned as a single token string, with the delimiters included;
|
29 |
-
otherwise, the matching tokens are returned as a list of tokens,
|
30 |
-
with the delimiters suppressed.
|
31 |
-
|
32 |
-
If ``allow_trailing_delim`` is set to True, then the list may end with
|
33 |
-
a delimiter.
|
34 |
-
|
35 |
-
Example::
|
36 |
-
|
37 |
-
delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
|
38 |
-
delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
|
39 |
-
"""
|
40 |
-
if isinstance(expr, str_type):
|
41 |
-
expr = ParserElement._literalStringClass(expr)
|
42 |
-
|
43 |
-
dlName = "{expr} [{delim} {expr}]...{end}".format(
|
44 |
-
expr=str(expr.copy().streamline()),
|
45 |
-
delim=str(delim),
|
46 |
-
end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
|
47 |
-
)
|
48 |
-
|
49 |
-
if not combine:
|
50 |
-
delim = Suppress(delim)
|
51 |
-
|
52 |
-
if min is not None:
|
53 |
-
if min < 1:
|
54 |
-
raise ValueError("min must be greater than 0")
|
55 |
-
min -= 1
|
56 |
-
if max is not None:
|
57 |
-
if min is not None and max <= min:
|
58 |
-
raise ValueError("max must be greater than, or equal to min")
|
59 |
-
max -= 1
|
60 |
-
delimited_list_expr = expr + (delim + expr)[min, max]
|
61 |
-
|
62 |
-
if allow_trailing_delim:
|
63 |
-
delimited_list_expr += Opt(delim)
|
64 |
-
|
65 |
-
if combine:
|
66 |
-
return Combine(delimited_list_expr).set_name(dlName)
|
67 |
-
else:
|
68 |
-
return delimited_list_expr.set_name(dlName)
|
69 |
-
|
70 |
-
|
71 |
-
def counted_array(
|
72 |
-
expr: ParserElement,
|
73 |
-
int_expr: typing.Optional[ParserElement] = None,
|
74 |
-
*,
|
75 |
-
intExpr: typing.Optional[ParserElement] = None,
|
76 |
-
) -> ParserElement:
|
77 |
-
"""Helper to define a counted list of expressions.
|
78 |
-
|
79 |
-
This helper defines a pattern of the form::
|
80 |
-
|
81 |
-
integer expr expr expr...
|
82 |
-
|
83 |
-
where the leading integer tells how many expr expressions follow.
|
84 |
-
The matched tokens returns the array of expr tokens as a list - the
|
85 |
-
leading count token is suppressed.
|
86 |
-
|
87 |
-
If ``int_expr`` is specified, it should be a pyparsing expression
|
88 |
-
that produces an integer value.
|
89 |
-
|
90 |
-
Example::
|
91 |
-
|
92 |
-
counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
|
93 |
-
|
94 |
-
# in this parser, the leading integer value is given in binary,
|
95 |
-
# '10' indicating that 2 values are in the array
|
96 |
-
binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
|
97 |
-
counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
|
98 |
-
|
99 |
-
# if other fields must be parsed after the count but before the
|
100 |
-
# list items, give the fields results names and they will
|
101 |
-
# be preserved in the returned ParseResults:
|
102 |
-
count_with_metadata = integer + Word(alphas)("type")
|
103 |
-
typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
|
104 |
-
result = typed_array.parse_string("3 bool True True False")
|
105 |
-
print(result.dump())
|
106 |
-
|
107 |
-
# prints
|
108 |
-
# ['True', 'True', 'False']
|
109 |
-
# - items: ['True', 'True', 'False']
|
110 |
-
# - type: 'bool'
|
111 |
-
"""
|
112 |
-
intExpr = intExpr or int_expr
|
113 |
-
array_expr = Forward()
|
114 |
-
|
115 |
-
def count_field_parse_action(s, l, t):
|
116 |
-
nonlocal array_expr
|
117 |
-
n = t[0]
|
118 |
-
array_expr <<= (expr * n) if n else Empty()
|
119 |
-
# clear list contents, but keep any named results
|
120 |
-
del t[:]
|
121 |
-
|
122 |
-
if intExpr is None:
|
123 |
-
intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
|
124 |
-
else:
|
125 |
-
intExpr = intExpr.copy()
|
126 |
-
intExpr.set_name("arrayLen")
|
127 |
-
intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
|
128 |
-
return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
|
129 |
-
|
130 |
-
|
131 |
-
def match_previous_literal(expr: ParserElement) -> ParserElement:
|
132 |
-
"""Helper to define an expression that is indirectly defined from
|
133 |
-
the tokens matched in a previous expression, that is, it looks for
|
134 |
-
a 'repeat' of a previous expression. For example::
|
135 |
-
|
136 |
-
first = Word(nums)
|
137 |
-
second = match_previous_literal(first)
|
138 |
-
match_expr = first + ":" + second
|
139 |
-
|
140 |
-
will match ``"1:1"``, but not ``"1:2"``. Because this
|
141 |
-
matches a previous literal, will also match the leading
|
142 |
-
``"1:1"`` in ``"1:10"``. If this is not desired, use
|
143 |
-
:class:`match_previous_expr`. Do *not* use with packrat parsing
|
144 |
-
enabled.
|
145 |
-
"""
|
146 |
-
rep = Forward()
|
147 |
-
|
148 |
-
def copy_token_to_repeater(s, l, t):
|
149 |
-
if t:
|
150 |
-
if len(t) == 1:
|
151 |
-
rep << t[0]
|
152 |
-
else:
|
153 |
-
# flatten t tokens
|
154 |
-
tflat = _flatten(t.as_list())
|
155 |
-
rep << And(Literal(tt) for tt in tflat)
|
156 |
-
else:
|
157 |
-
rep << Empty()
|
158 |
-
|
159 |
-
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
160 |
-
rep.set_name("(prev) " + str(expr))
|
161 |
-
return rep
|
162 |
-
|
163 |
-
|
164 |
-
def match_previous_expr(expr: ParserElement) -> ParserElement:
|
165 |
-
"""Helper to define an expression that is indirectly defined from
|
166 |
-
the tokens matched in a previous expression, that is, it looks for
|
167 |
-
a 'repeat' of a previous expression. For example::
|
168 |
-
|
169 |
-
first = Word(nums)
|
170 |
-
second = match_previous_expr(first)
|
171 |
-
match_expr = first + ":" + second
|
172 |
-
|
173 |
-
will match ``"1:1"``, but not ``"1:2"``. Because this
|
174 |
-
matches by expressions, will *not* match the leading ``"1:1"``
|
175 |
-
in ``"1:10"``; the expressions are evaluated first, and then
|
176 |
-
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
|
177 |
-
with packrat parsing enabled.
|
178 |
-
"""
|
179 |
-
rep = Forward()
|
180 |
-
e2 = expr.copy()
|
181 |
-
rep <<= e2
|
182 |
-
|
183 |
-
def copy_token_to_repeater(s, l, t):
|
184 |
-
matchTokens = _flatten(t.as_list())
|
185 |
-
|
186 |
-
def must_match_these_tokens(s, l, t):
|
187 |
-
theseTokens = _flatten(t.as_list())
|
188 |
-
if theseTokens != matchTokens:
|
189 |
-
raise ParseException(
|
190 |
-
s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
|
191 |
-
)
|
192 |
-
|
193 |
-
rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
|
194 |
-
|
195 |
-
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
196 |
-
rep.set_name("(prev) " + str(expr))
|
197 |
-
return rep
|
198 |
-
|
199 |
-
|
200 |
-
def one_of(
|
201 |
-
strs: Union[typing.Iterable[str], str],
|
202 |
-
caseless: bool = False,
|
203 |
-
use_regex: bool = True,
|
204 |
-
as_keyword: bool = False,
|
205 |
-
*,
|
206 |
-
useRegex: bool = True,
|
207 |
-
asKeyword: bool = False,
|
208 |
-
) -> ParserElement:
|
209 |
-
"""Helper to quickly define a set of alternative :class:`Literal` s,
|
210 |
-
and makes sure to do longest-first testing when there is a conflict,
|
211 |
-
regardless of the input order, but returns
|
212 |
-
a :class:`MatchFirst` for best performance.
|
213 |
-
|
214 |
-
Parameters:
|
215 |
-
|
216 |
-
- ``strs`` - a string of space-delimited literals, or a collection of
|
217 |
-
string literals
|
218 |
-
- ``caseless`` - treat all literals as caseless - (default= ``False``)
|
219 |
-
- ``use_regex`` - as an optimization, will
|
220 |
-
generate a :class:`Regex` object; otherwise, will generate
|
221 |
-
a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
|
222 |
-
creating a :class:`Regex` raises an exception) - (default= ``True``)
|
223 |
-
- ``as_keyword`` - enforce :class:`Keyword`-style matching on the
|
224 |
-
generated expressions - (default= ``False``)
|
225 |
-
- ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
|
226 |
-
but will be removed in a future release
|
227 |
-
|
228 |
-
Example::
|
229 |
-
|
230 |
-
comp_oper = one_of("< = > <= >= !=")
|
231 |
-
var = Word(alphas)
|
232 |
-
number = Word(nums)
|
233 |
-
term = var | number
|
234 |
-
comparison_expr = term + comp_oper + term
|
235 |
-
print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
|
236 |
-
|
237 |
-
prints::
|
238 |
-
|
239 |
-
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
|
240 |
-
"""
|
241 |
-
asKeyword = asKeyword or as_keyword
|
242 |
-
useRegex = useRegex and use_regex
|
243 |
-
|
244 |
-
if (
|
245 |
-
isinstance(caseless, str_type)
|
246 |
-
and __diag__.warn_on_multiple_string_args_to_oneof
|
247 |
-
):
|
248 |
-
warnings.warn(
|
249 |
-
"More than one string argument passed to one_of, pass"
|
250 |
-
" choices as a list or space-delimited string",
|
251 |
-
stacklevel=2,
|
252 |
-
)
|
253 |
-
|
254 |
-
if caseless:
|
255 |
-
isequal = lambda a, b: a.upper() == b.upper()
|
256 |
-
masks = lambda a, b: b.upper().startswith(a.upper())
|
257 |
-
parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
|
258 |
-
else:
|
259 |
-
isequal = lambda a, b: a == b
|
260 |
-
masks = lambda a, b: b.startswith(a)
|
261 |
-
parseElementClass = Keyword if asKeyword else Literal
|
262 |
-
|
263 |
-
symbols: List[str] = []
|
264 |
-
if isinstance(strs, str_type):
|
265 |
-
symbols = strs.split()
|
266 |
-
elif isinstance(strs, Iterable):
|
267 |
-
symbols = list(strs)
|
268 |
-
else:
|
269 |
-
raise TypeError("Invalid argument to one_of, expected string or iterable")
|
270 |
-
if not symbols:
|
271 |
-
return NoMatch()
|
272 |
-
|
273 |
-
# reorder given symbols to take care to avoid masking longer choices with shorter ones
|
274 |
-
# (but only if the given symbols are not just single characters)
|
275 |
-
if any(len(sym) > 1 for sym in symbols):
|
276 |
-
i = 0
|
277 |
-
while i < len(symbols) - 1:
|
278 |
-
cur = symbols[i]
|
279 |
-
for j, other in enumerate(symbols[i + 1 :]):
|
280 |
-
if isequal(other, cur):
|
281 |
-
del symbols[i + j + 1]
|
282 |
-
break
|
283 |
-
elif masks(cur, other):
|
284 |
-
del symbols[i + j + 1]
|
285 |
-
symbols.insert(i, other)
|
286 |
-
break
|
287 |
-
else:
|
288 |
-
i += 1
|
289 |
-
|
290 |
-
if useRegex:
|
291 |
-
re_flags: int = re.IGNORECASE if caseless else 0
|
292 |
-
|
293 |
-
try:
|
294 |
-
if all(len(sym) == 1 for sym in symbols):
|
295 |
-
# symbols are just single characters, create range regex pattern
|
296 |
-
patt = "[{}]".format(
|
297 |
-
"".join(_escape_regex_range_chars(sym) for sym in symbols)
|
298 |
-
)
|
299 |
-
else:
|
300 |
-
patt = "|".join(re.escape(sym) for sym in symbols)
|
301 |
-
|
302 |
-
# wrap with \b word break markers if defining as keywords
|
303 |
-
if asKeyword:
|
304 |
-
patt = r"\b(?:{})\b".format(patt)
|
305 |
-
|
306 |
-
ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
|
307 |
-
|
308 |
-
if caseless:
|
309 |
-
# add parse action to return symbols as specified, not in random
|
310 |
-
# casing as found in input string
|
311 |
-
symbol_map = {sym.lower(): sym for sym in symbols}
|
312 |
-
ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
|
313 |
-
|
314 |
-
return ret
|
315 |
-
|
316 |
-
except re.error:
|
317 |
-
warnings.warn(
|
318 |
-
"Exception creating Regex for one_of, building MatchFirst", stacklevel=2
|
319 |
-
)
|
320 |
-
|
321 |
-
# last resort, just use MatchFirst
|
322 |
-
return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
|
323 |
-
" | ".join(symbols)
|
324 |
-
)
|
325 |
-
|
326 |
-
|
327 |
-
def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
|
328 |
-
"""Helper to easily and clearly define a dictionary by specifying
|
329 |
-
the respective patterns for the key and value. Takes care of
|
330 |
-
defining the :class:`Dict`, :class:`ZeroOrMore`, and
|
331 |
-
:class:`Group` tokens in the proper order. The key pattern
|
332 |
-
can include delimiting markers or punctuation, as long as they are
|
333 |
-
suppressed, thereby leaving the significant key text. The value
|
334 |
-
pattern can include named results, so that the :class:`Dict` results
|
335 |
-
can include named token fields.
|
336 |
-
|
337 |
-
Example::
|
338 |
-
|
339 |
-
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
|
340 |
-
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
|
341 |
-
print(attr_expr[1, ...].parse_string(text).dump())
|
342 |
-
|
343 |
-
attr_label = label
|
344 |
-
attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
|
345 |
-
|
346 |
-
# similar to Dict, but simpler call format
|
347 |
-
result = dict_of(attr_label, attr_value).parse_string(text)
|
348 |
-
print(result.dump())
|
349 |
-
print(result['shape'])
|
350 |
-
print(result.shape) # object attribute access works too
|
351 |
-
print(result.as_dict())
|
352 |
-
|
353 |
-
prints::
|
354 |
-
|
355 |
-
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
|
356 |
-
- color: 'light blue'
|
357 |
-
- posn: 'upper left'
|
358 |
-
- shape: 'SQUARE'
|
359 |
-
- texture: 'burlap'
|
360 |
-
SQUARE
|
361 |
-
SQUARE
|
362 |
-
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
|
363 |
-
"""
|
364 |
-
return Dict(OneOrMore(Group(key + value)))
|
365 |
-
|
366 |
-
|
367 |
-
def original_text_for(
|
368 |
-
expr: ParserElement, as_string: bool = True, *, asString: bool = True
|
369 |
-
) -> ParserElement:
|
370 |
-
"""Helper to return the original, untokenized text for a given
|
371 |
-
expression. Useful to restore the parsed fields of an HTML start
|
372 |
-
tag into the raw tag text itself, or to revert separate tokens with
|
373 |
-
intervening whitespace back to the original matching input text. By
|
374 |
-
default, returns astring containing the original parsed text.
|
375 |
-
|
376 |
-
If the optional ``as_string`` argument is passed as
|
377 |
-
``False``, then the return value is
|
378 |
-
a :class:`ParseResults` containing any results names that
|
379 |
-
were originally matched, and a single token containing the original
|
380 |
-
matched text from the input string. So if the expression passed to
|
381 |
-
:class:`original_text_for` contains expressions with defined
|
382 |
-
results names, you must set ``as_string`` to ``False`` if you
|
383 |
-
want to preserve those results name values.
|
384 |
-
|
385 |
-
The ``asString`` pre-PEP8 argument is retained for compatibility,
|
386 |
-
but will be removed in a future release.
|
387 |
-
|
388 |
-
Example::
|
389 |
-
|
390 |
-
src = "this is test <b> bold <i>text</i> </b> normal text "
|
391 |
-
for tag in ("b", "i"):
|
392 |
-
opener, closer = make_html_tags(tag)
|
393 |
-
patt = original_text_for(opener + SkipTo(closer) + closer)
|
394 |
-
print(patt.search_string(src)[0])
|
395 |
-
|
396 |
-
prints::
|
397 |
-
|
398 |
-
['<b> bold <i>text</i> </b>']
|
399 |
-
['<i>text</i>']
|
400 |
-
"""
|
401 |
-
asString = asString and as_string
|
402 |
-
|
403 |
-
locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
|
404 |
-
endlocMarker = locMarker.copy()
|
405 |
-
endlocMarker.callPreparse = False
|
406 |
-
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
|
407 |
-
if asString:
|
408 |
-
extractText = lambda s, l, t: s[t._original_start : t._original_end]
|
409 |
-
else:
|
410 |
-
|
411 |
-
def extractText(s, l, t):
|
412 |
-
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
|
413 |
-
|
414 |
-
matchExpr.set_parse_action(extractText)
|
415 |
-
matchExpr.ignoreExprs = expr.ignoreExprs
|
416 |
-
matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
|
417 |
-
return matchExpr
|
418 |
-
|
419 |
-
|
420 |
-
def ungroup(expr: ParserElement) -> ParserElement:
|
421 |
-
"""Helper to undo pyparsing's default grouping of And expressions,
|
422 |
-
even if all but one are non-empty.
|
423 |
-
"""
|
424 |
-
return TokenConverter(expr).add_parse_action(lambda t: t[0])
|
425 |
-
|
426 |
-
|
427 |
-
def locatedExpr(expr: ParserElement) -> ParserElement:
|
428 |
-
"""
|
429 |
-
(DEPRECATED - future code should use the Located class)
|
430 |
-
Helper to decorate a returned token with its starting and ending
|
431 |
-
locations in the input string.
|
432 |
-
|
433 |
-
This helper adds the following results names:
|
434 |
-
|
435 |
-
- ``locn_start`` - location where matched expression begins
|
436 |
-
- ``locn_end`` - location where matched expression ends
|
437 |
-
- ``value`` - the actual parsed results
|
438 |
-
|
439 |
-
Be careful if the input text contains ``<TAB>`` characters, you
|
440 |
-
may want to call :class:`ParserElement.parseWithTabs`
|
441 |
-
|
442 |
-
Example::
|
443 |
-
|
444 |
-
wd = Word(alphas)
|
445 |
-
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
|
446 |
-
print(match)
|
447 |
-
|
448 |
-
prints::
|
449 |
-
|
450 |
-
[[0, 'ljsdf', 5]]
|
451 |
-
[[8, 'lksdjjf', 15]]
|
452 |
-
[[18, 'lkkjj', 23]]
|
453 |
-
"""
|
454 |
-
locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
|
455 |
-
return Group(
|
456 |
-
locator("locn_start")
|
457 |
-
+ expr("value")
|
458 |
-
+ locator.copy().leaveWhitespace()("locn_end")
|
459 |
-
)
|
460 |
-
|
461 |
-
|
462 |
-
def nested_expr(
|
463 |
-
opener: Union[str, ParserElement] = "(",
|
464 |
-
closer: Union[str, ParserElement] = ")",
|
465 |
-
content: typing.Optional[ParserElement] = None,
|
466 |
-
ignore_expr: ParserElement = quoted_string(),
|
467 |
-
*,
|
468 |
-
ignoreExpr: ParserElement = quoted_string(),
|
469 |
-
) -> ParserElement:
|
470 |
-
"""Helper method for defining nested lists enclosed in opening and
|
471 |
-
closing delimiters (``"("`` and ``")"`` are the default).
|
472 |
-
|
473 |
-
Parameters:
|
474 |
-
- ``opener`` - opening character for a nested list
|
475 |
-
(default= ``"("``); can also be a pyparsing expression
|
476 |
-
- ``closer`` - closing character for a nested list
|
477 |
-
(default= ``")"``); can also be a pyparsing expression
|
478 |
-
- ``content`` - expression for items within the nested lists
|
479 |
-
(default= ``None``)
|
480 |
-
- ``ignore_expr`` - expression for ignoring opening and closing delimiters
|
481 |
-
(default= :class:`quoted_string`)
|
482 |
-
- ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
|
483 |
-
but will be removed in a future release
|
484 |
-
|
485 |
-
If an expression is not provided for the content argument, the
|
486 |
-
nested expression will capture all whitespace-delimited content
|
487 |
-
between delimiters as a list of separate values.
|
488 |
-
|
489 |
-
Use the ``ignore_expr`` argument to define expressions that may
|
490 |
-
contain opening or closing characters that should not be treated as
|
491 |
-
opening or closing characters for nesting, such as quoted_string or
|
492 |
-
a comment expression. Specify multiple expressions using an
|
493 |
-
:class:`Or` or :class:`MatchFirst`. The default is
|
494 |
-
:class:`quoted_string`, but if no expressions are to be ignored, then
|
495 |
-
pass ``None`` for this argument.
|
496 |
-
|
497 |
-
Example::
|
498 |
-
|
499 |
-
data_type = one_of("void int short long char float double")
|
500 |
-
decl_data_type = Combine(data_type + Opt(Word('*')))
|
501 |
-
ident = Word(alphas+'_', alphanums+'_')
|
502 |
-
number = pyparsing_common.number
|
503 |
-
arg = Group(decl_data_type + ident)
|
504 |
-
LPAR, RPAR = map(Suppress, "()")
|
505 |
-
|
506 |
-
code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
|
507 |
-
|
508 |
-
c_function = (decl_data_type("type")
|
509 |
-
+ ident("name")
|
510 |
-
+ LPAR + Opt(delimited_list(arg), [])("args") + RPAR
|
511 |
-
+ code_body("body"))
|
512 |
-
c_function.ignore(c_style_comment)
|
513 |
-
|
514 |
-
source_code = '''
|
515 |
-
int is_odd(int x) {
|
516 |
-
return (x%2);
|
517 |
-
}
|
518 |
-
|
519 |
-
int dec_to_hex(char hchar) {
|
520 |
-
if (hchar >= '0' && hchar <= '9') {
|
521 |
-
return (ord(hchar)-ord('0'));
|
522 |
-
} else {
|
523 |
-
return (10+ord(hchar)-ord('A'));
|
524 |
-
}
|
525 |
-
}
|
526 |
-
'''
|
527 |
-
for func in c_function.search_string(source_code):
|
528 |
-
print("%(name)s (%(type)s) args: %(args)s" % func)
|
529 |
-
|
530 |
-
|
531 |
-
prints::
|
532 |
-
|
533 |
-
is_odd (int) args: [['int', 'x']]
|
534 |
-
dec_to_hex (int) args: [['char', 'hchar']]
|
535 |
-
"""
|
536 |
-
if ignoreExpr != ignore_expr:
|
537 |
-
ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
|
538 |
-
if opener == closer:
|
539 |
-
raise ValueError("opening and closing strings cannot be the same")
|
540 |
-
if content is None:
|
541 |
-
if isinstance(opener, str_type) and isinstance(closer, str_type):
|
542 |
-
if len(opener) == 1 and len(closer) == 1:
|
543 |
-
if ignoreExpr is not None:
|
544 |
-
content = Combine(
|
545 |
-
OneOrMore(
|
546 |
-
~ignoreExpr
|
547 |
-
+ CharsNotIn(
|
548 |
-
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
|
549 |
-
exact=1,
|
550 |
-
)
|
551 |
-
)
|
552 |
-
).set_parse_action(lambda t: t[0].strip())
|
553 |
-
else:
|
554 |
-
content = empty.copy() + CharsNotIn(
|
555 |
-
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
|
556 |
-
).set_parse_action(lambda t: t[0].strip())
|
557 |
-
else:
|
558 |
-
if ignoreExpr is not None:
|
559 |
-
content = Combine(
|
560 |
-
OneOrMore(
|
561 |
-
~ignoreExpr
|
562 |
-
+ ~Literal(opener)
|
563 |
-
+ ~Literal(closer)
|
564 |
-
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
|
565 |
-
)
|
566 |
-
).set_parse_action(lambda t: t[0].strip())
|
567 |
-
else:
|
568 |
-
content = Combine(
|
569 |
-
OneOrMore(
|
570 |
-
~Literal(opener)
|
571 |
-
+ ~Literal(closer)
|
572 |
-
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
|
573 |
-
)
|
574 |
-
).set_parse_action(lambda t: t[0].strip())
|
575 |
-
else:
|
576 |
-
raise ValueError(
|
577 |
-
"opening and closing arguments must be strings if no content expression is given"
|
578 |
-
)
|
579 |
-
ret = Forward()
|
580 |
-
if ignoreExpr is not None:
|
581 |
-
ret <<= Group(
|
582 |
-
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
|
583 |
-
)
|
584 |
-
else:
|
585 |
-
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
|
586 |
-
ret.set_name("nested %s%s expression" % (opener, closer))
|
587 |
-
return ret
|
588 |
-
|
589 |
-
|
590 |
-
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
|
591 |
-
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
|
592 |
-
if isinstance(tagStr, str_type):
|
593 |
-
resname = tagStr
|
594 |
-
tagStr = Keyword(tagStr, caseless=not xml)
|
595 |
-
else:
|
596 |
-
resname = tagStr.name
|
597 |
-
|
598 |
-
tagAttrName = Word(alphas, alphanums + "_-:")
|
599 |
-
if xml:
|
600 |
-
tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
|
601 |
-
openTag = (
|
602 |
-
suppress_LT
|
603 |
-
+ tagStr("tag")
|
604 |
-
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
|
605 |
-
+ Opt("/", default=[False])("empty").set_parse_action(
|
606 |
-
lambda s, l, t: t[0] == "/"
|
607 |
-
)
|
608 |
-
+ suppress_GT
|
609 |
-
)
|
610 |
-
else:
|
611 |
-
tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
|
612 |
-
printables, exclude_chars=">"
|
613 |
-
)
|
614 |
-
openTag = (
|
615 |
-
suppress_LT
|
616 |
-
+ tagStr("tag")
|
617 |
-
+ Dict(
|
618 |
-
ZeroOrMore(
|
619 |
-
Group(
|
620 |
-
tagAttrName.set_parse_action(lambda t: t[0].lower())
|
621 |
-
+ Opt(Suppress("=") + tagAttrValue)
|
622 |
-
)
|
623 |
-
)
|
624 |
-
)
|
625 |
-
+ Opt("/", default=[False])("empty").set_parse_action(
|
626 |
-
lambda s, l, t: t[0] == "/"
|
627 |
-
)
|
628 |
-
+ suppress_GT
|
629 |
-
)
|
630 |
-
closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
|
631 |
-
|
632 |
-
openTag.set_name("<%s>" % resname)
|
633 |
-
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
|
634 |
-
openTag.add_parse_action(
|
635 |
-
lambda t: t.__setitem__(
|
636 |
-
"start" + "".join(resname.replace(":", " ").title().split()), t.copy()
|
637 |
-
)
|
638 |
-
)
|
639 |
-
closeTag = closeTag(
|
640 |
-
"end" + "".join(resname.replace(":", " ").title().split())
|
641 |
-
).set_name("</%s>" % resname)
|
642 |
-
openTag.tag = resname
|
643 |
-
closeTag.tag = resname
|
644 |
-
openTag.tag_body = SkipTo(closeTag())
|
645 |
-
return openTag, closeTag
|
646 |
-
|
647 |
-
|
648 |
-
def make_html_tags(
|
649 |
-
tag_str: Union[str, ParserElement]
|
650 |
-
) -> Tuple[ParserElement, ParserElement]:
|
651 |
-
"""Helper to construct opening and closing tag expressions for HTML,
|
652 |
-
given a tag name. Matches tags in either upper or lower case,
|
653 |
-
attributes with namespaces and with quoted or unquoted values.
|
654 |
-
|
655 |
-
Example::
|
656 |
-
|
657 |
-
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
|
658 |
-
# make_html_tags returns pyparsing expressions for the opening and
|
659 |
-
# closing tags as a 2-tuple
|
660 |
-
a, a_end = make_html_tags("A")
|
661 |
-
link_expr = a + SkipTo(a_end)("link_text") + a_end
|
662 |
-
|
663 |
-
for link in link_expr.search_string(text):
|
664 |
-
# attributes in the <A> tag (like "href" shown here) are
|
665 |
-
# also accessible as named results
|
666 |
-
print(link.link_text, '->', link.href)
|
667 |
-
|
668 |
-
prints::
|
669 |
-
|
670 |
-
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
|
671 |
-
"""
|
672 |
-
return _makeTags(tag_str, False)
|
673 |
-
|
674 |
-
|
675 |
-
def make_xml_tags(
|
676 |
-
tag_str: Union[str, ParserElement]
|
677 |
-
) -> Tuple[ParserElement, ParserElement]:
|
678 |
-
"""Helper to construct opening and closing tag expressions for XML,
|
679 |
-
given a tag name. Matches tags only in the given upper/lower case.
|
680 |
-
|
681 |
-
Example: similar to :class:`make_html_tags`
|
682 |
-
"""
|
683 |
-
return _makeTags(tag_str, True)
|
684 |
-
|
685 |
-
|
686 |
-
any_open_tag: ParserElement
|
687 |
-
any_close_tag: ParserElement
|
688 |
-
any_open_tag, any_close_tag = make_html_tags(
|
689 |
-
Word(alphas, alphanums + "_:").set_name("any tag")
|
690 |
-
)
|
691 |
-
|
692 |
-
_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
|
693 |
-
common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
|
694 |
-
"common HTML entity"
|
695 |
-
)
|
696 |
-
|
697 |
-
|
698 |
-
def replace_html_entity(t):
|
699 |
-
"""Helper parser action to replace common HTML entities with their special characters"""
|
700 |
-
return _htmlEntityMap.get(t.entity)
|
701 |
-
|
702 |
-
|
703 |
-
class OpAssoc(Enum):
|
704 |
-
LEFT = 1
|
705 |
-
RIGHT = 2
|
706 |
-
|
707 |
-
|
708 |
-
InfixNotationOperatorArgType = Union[
|
709 |
-
ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
|
710 |
-
]
|
711 |
-
InfixNotationOperatorSpec = Union[
|
712 |
-
Tuple[
|
713 |
-
InfixNotationOperatorArgType,
|
714 |
-
int,
|
715 |
-
OpAssoc,
|
716 |
-
typing.Optional[ParseAction],
|
717 |
-
],
|
718 |
-
Tuple[
|
719 |
-
InfixNotationOperatorArgType,
|
720 |
-
int,
|
721 |
-
OpAssoc,
|
722 |
-
],
|
723 |
-
]
|
724 |
-
|
725 |
-
|
726 |
-
def infix_notation(
|
727 |
-
base_expr: ParserElement,
|
728 |
-
op_list: List[InfixNotationOperatorSpec],
|
729 |
-
lpar: Union[str, ParserElement] = Suppress("("),
|
730 |
-
rpar: Union[str, ParserElement] = Suppress(")"),
|
731 |
-
) -> ParserElement:
|
732 |
-
"""Helper method for constructing grammars of expressions made up of
|
733 |
-
operators working in a precedence hierarchy. Operators may be unary
|
734 |
-
or binary, left- or right-associative. Parse actions can also be
|
735 |
-
attached to operator expressions. The generated parser will also
|
736 |
-
recognize the use of parentheses to override operator precedences
|
737 |
-
(see example below).
|
738 |
-
|
739 |
-
Note: if you define a deep operator list, you may see performance
|
740 |
-
issues when using infix_notation. See
|
741 |
-
:class:`ParserElement.enable_packrat` for a mechanism to potentially
|
742 |
-
improve your parser performance.
|
743 |
-
|
744 |
-
Parameters:
|
745 |
-
- ``base_expr`` - expression representing the most basic operand to
|
746 |
-
be used in the expression
|
747 |
-
- ``op_list`` - list of tuples, one for each operator precedence level
|
748 |
-
in the expression grammar; each tuple is of the form ``(op_expr,
|
749 |
-
num_operands, right_left_assoc, (optional)parse_action)``, where:
|
750 |
-
|
751 |
-
- ``op_expr`` is the pyparsing expression for the operator; may also
|
752 |
-
be a string, which will be converted to a Literal; if ``num_operands``
|
753 |
-
is 3, ``op_expr`` is a tuple of two expressions, for the two
|
754 |
-
operators separating the 3 terms
|
755 |
-
- ``num_operands`` is the number of terms for this operator (must be 1,
|
756 |
-
2, or 3)
|
757 |
-
- ``right_left_assoc`` is the indicator whether the operator is right
|
758 |
-
or left associative, using the pyparsing-defined constants
|
759 |
-
``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
|
760 |
-
- ``parse_action`` is the parse action to be associated with
|
761 |
-
expressions matching this operator expression (the parse action
|
762 |
-
tuple member may be omitted); if the parse action is passed
|
763 |
-
a tuple or list of functions, this is equivalent to calling
|
764 |
-
``set_parse_action(*fn)``
|
765 |
-
(:class:`ParserElement.set_parse_action`)
|
766 |
-
- ``lpar`` - expression for matching left-parentheses; if passed as a
|
767 |
-
str, then will be parsed as Suppress(lpar). If lpar is passed as
|
768 |
-
an expression (such as ``Literal('(')``), then it will be kept in
|
769 |
-
the parsed results, and grouped with them. (default= ``Suppress('(')``)
|
770 |
-
- ``rpar`` - expression for matching right-parentheses; if passed as a
|
771 |
-
str, then will be parsed as Suppress(rpar). If rpar is passed as
|
772 |
-
an expression (such as ``Literal(')')``), then it will be kept in
|
773 |
-
the parsed results, and grouped with them. (default= ``Suppress(')')``)
|
774 |
-
|
775 |
-
Example::
|
776 |
-
|
777 |
-
# simple example of four-function arithmetic with ints and
|
778 |
-
# variable names
|
779 |
-
integer = pyparsing_common.signed_integer
|
780 |
-
varname = pyparsing_common.identifier
|
781 |
-
|
782 |
-
arith_expr = infix_notation(integer | varname,
|
783 |
-
[
|
784 |
-
('-', 1, OpAssoc.RIGHT),
|
785 |
-
(one_of('* /'), 2, OpAssoc.LEFT),
|
786 |
-
(one_of('+ -'), 2, OpAssoc.LEFT),
|
787 |
-
])
|
788 |
-
|
789 |
-
arith_expr.run_tests('''
|
790 |
-
5+3*6
|
791 |
-
(5+3)*6
|
792 |
-
-2--11
|
793 |
-
''', full_dump=False)
|
794 |
-
|
795 |
-
prints::
|
796 |
-
|
797 |
-
5+3*6
|
798 |
-
[[5, '+', [3, '*', 6]]]
|
799 |
-
|
800 |
-
(5+3)*6
|
801 |
-
[[[5, '+', 3], '*', 6]]
|
802 |
-
|
803 |
-
-2--11
|
804 |
-
[[['-', 2], '-', ['-', 11]]]
|
805 |
-
"""
|
806 |
-
# captive version of FollowedBy that does not do parse actions or capture results names
|
807 |
-
class _FB(FollowedBy):
|
808 |
-
def parseImpl(self, instring, loc, doActions=True):
|
809 |
-
self.expr.try_parse(instring, loc)
|
810 |
-
return loc, []
|
811 |
-
|
812 |
-
_FB.__name__ = "FollowedBy>"
|
813 |
-
|
814 |
-
ret = Forward()
|
815 |
-
if isinstance(lpar, str):
|
816 |
-
lpar = Suppress(lpar)
|
817 |
-
if isinstance(rpar, str):
|
818 |
-
rpar = Suppress(rpar)
|
819 |
-
|
820 |
-
# if lpar and rpar are not suppressed, wrap in group
|
821 |
-
if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
|
822 |
-
lastExpr = base_expr | Group(lpar + ret + rpar)
|
823 |
-
else:
|
824 |
-
lastExpr = base_expr | (lpar + ret + rpar)
|
825 |
-
|
826 |
-
for i, operDef in enumerate(op_list):
|
827 |
-
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
|
828 |
-
if isinstance(opExpr, str_type):
|
829 |
-
opExpr = ParserElement._literalStringClass(opExpr)
|
830 |
-
if arity == 3:
|
831 |
-
if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
|
832 |
-
raise ValueError(
|
833 |
-
"if numterms=3, opExpr must be a tuple or list of two expressions"
|
834 |
-
)
|
835 |
-
opExpr1, opExpr2 = opExpr
|
836 |
-
term_name = "{}{} term".format(opExpr1, opExpr2)
|
837 |
-
else:
|
838 |
-
term_name = "{} term".format(opExpr)
|
839 |
-
|
840 |
-
if not 1 <= arity <= 3:
|
841 |
-
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
|
842 |
-
|
843 |
-
if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
|
844 |
-
raise ValueError("operator must indicate right or left associativity")
|
845 |
-
|
846 |
-
thisExpr: Forward = Forward().set_name(term_name)
|
847 |
-
if rightLeftAssoc is OpAssoc.LEFT:
|
848 |
-
if arity == 1:
|
849 |
-
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
|
850 |
-
elif arity == 2:
|
851 |
-
if opExpr is not None:
|
852 |
-
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
|
853 |
-
lastExpr + (opExpr + lastExpr)[1, ...]
|
854 |
-
)
|
855 |
-
else:
|
856 |
-
matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
|
857 |
-
elif arity == 3:
|
858 |
-
matchExpr = _FB(
|
859 |
-
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
|
860 |
-
) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
|
861 |
-
elif rightLeftAssoc is OpAssoc.RIGHT:
|
862 |
-
if arity == 1:
|
863 |
-
# try to avoid LR with this extra test
|
864 |
-
if not isinstance(opExpr, Opt):
|
865 |
-
opExpr = Opt(opExpr)
|
866 |
-
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
|
867 |
-
elif arity == 2:
|
868 |
-
if opExpr is not None:
|
869 |
-
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
|
870 |
-
lastExpr + (opExpr + thisExpr)[1, ...]
|
871 |
-
)
|
872 |
-
else:
|
873 |
-
matchExpr = _FB(lastExpr + thisExpr) + Group(
|
874 |
-
lastExpr + thisExpr[1, ...]
|
875 |
-
)
|
876 |
-
elif arity == 3:
|
877 |
-
matchExpr = _FB(
|
878 |
-
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
|
879 |
-
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
|
880 |
-
if pa:
|
881 |
-
if isinstance(pa, (tuple, list)):
|
882 |
-
matchExpr.set_parse_action(*pa)
|
883 |
-
else:
|
884 |
-
matchExpr.set_parse_action(pa)
|
885 |
-
thisExpr <<= (matchExpr | lastExpr).setName(term_name)
|
886 |
-
lastExpr = thisExpr
|
887 |
-
ret <<= lastExpr
|
888 |
-
return ret
|
889 |
-
|
890 |
-
|
891 |
-
def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
|
892 |
-
"""
|
893 |
-
(DEPRECATED - use IndentedBlock class instead)
|
894 |
-
Helper method for defining space-delimited indentation blocks,
|
895 |
-
such as those used to define block statements in Python source code.
|
896 |
-
|
897 |
-
Parameters:
|
898 |
-
|
899 |
-
- ``blockStatementExpr`` - expression defining syntax of statement that
|
900 |
-
is repeated within the indented block
|
901 |
-
- ``indentStack`` - list created by caller to manage indentation stack
|
902 |
-
(multiple ``statementWithIndentedBlock`` expressions within a single
|
903 |
-
grammar should share a common ``indentStack``)
|
904 |
-
- ``indent`` - boolean indicating whether block must be indented beyond
|
905 |
-
the current level; set to ``False`` for block of left-most statements
|
906 |
-
(default= ``True``)
|
907 |
-
|
908 |
-
A valid block must contain at least one ``blockStatement``.
|
909 |
-
|
910 |
-
(Note that indentedBlock uses internal parse actions which make it
|
911 |
-
incompatible with packrat parsing.)
|
912 |
-
|
913 |
-
Example::
|
914 |
-
|
915 |
-
data = '''
|
916 |
-
def A(z):
|
917 |
-
A1
|
918 |
-
B = 100
|
919 |
-
G = A2
|
920 |
-
A2
|
921 |
-
A3
|
922 |
-
B
|
923 |
-
def BB(a,b,c):
|
924 |
-
BB1
|
925 |
-
def BBA():
|
926 |
-
bba1
|
927 |
-
bba2
|
928 |
-
bba3
|
929 |
-
C
|
930 |
-
D
|
931 |
-
def spam(x,y):
|
932 |
-
def eggs(z):
|
933 |
-
pass
|
934 |
-
'''
|
935 |
-
|
936 |
-
|
937 |
-
indentStack = [1]
|
938 |
-
stmt = Forward()
|
939 |
-
|
940 |
-
identifier = Word(alphas, alphanums)
|
941 |
-
funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
|
942 |
-
func_body = indentedBlock(stmt, indentStack)
|
943 |
-
funcDef = Group(funcDecl + func_body)
|
944 |
-
|
945 |
-
rvalue = Forward()
|
946 |
-
funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
|
947 |
-
rvalue << (funcCall | identifier | Word(nums))
|
948 |
-
assignment = Group(identifier + "=" + rvalue)
|
949 |
-
stmt << (funcDef | assignment | identifier)
|
950 |
-
|
951 |
-
module_body = stmt[1, ...]
|
952 |
-
|
953 |
-
parseTree = module_body.parseString(data)
|
954 |
-
parseTree.pprint()
|
955 |
-
|
956 |
-
prints::
|
957 |
-
|
958 |
-
[['def',
|
959 |
-
'A',
|
960 |
-
['(', 'z', ')'],
|
961 |
-
':',
|
962 |
-
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
|
963 |
-
'B',
|
964 |
-
['def',
|
965 |
-
'BB',
|
966 |
-
['(', 'a', 'b', 'c', ')'],
|
967 |
-
':',
|
968 |
-
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
|
969 |
-
'C',
|
970 |
-
'D',
|
971 |
-
['def',
|
972 |
-
'spam',
|
973 |
-
['(', 'x', 'y', ')'],
|
974 |
-
':',
|
975 |
-
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
|
976 |
-
"""
|
977 |
-
backup_stacks.append(indentStack[:])
|
978 |
-
|
979 |
-
def reset_stack():
|
980 |
-
indentStack[:] = backup_stacks[-1]
|
981 |
-
|
982 |
-
def checkPeerIndent(s, l, t):
|
983 |
-
if l >= len(s):
|
984 |
-
return
|
985 |
-
curCol = col(l, s)
|
986 |
-
if curCol != indentStack[-1]:
|
987 |
-
if curCol > indentStack[-1]:
|
988 |
-
raise ParseException(s, l, "illegal nesting")
|
989 |
-
raise ParseException(s, l, "not a peer entry")
|
990 |
-
|
991 |
-
def checkSubIndent(s, l, t):
|
992 |
-
curCol = col(l, s)
|
993 |
-
if curCol > indentStack[-1]:
|
994 |
-
indentStack.append(curCol)
|
995 |
-
else:
|
996 |
-
raise ParseException(s, l, "not a subentry")
|
997 |
-
|
998 |
-
def checkUnindent(s, l, t):
|
999 |
-
if l >= len(s):
|
1000 |
-
return
|
1001 |
-
curCol = col(l, s)
|
1002 |
-
if not (indentStack and curCol in indentStack):
|
1003 |
-
raise ParseException(s, l, "not an unindent")
|
1004 |
-
if curCol < indentStack[-1]:
|
1005 |
-
indentStack.pop()
|
1006 |
-
|
1007 |
-
NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
|
1008 |
-
INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
|
1009 |
-
PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
|
1010 |
-
UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
|
1011 |
-
if indent:
|
1012 |
-
smExpr = Group(
|
1013 |
-
Opt(NL)
|
1014 |
-
+ INDENT
|
1015 |
-
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
|
1016 |
-
+ UNDENT
|
1017 |
-
)
|
1018 |
-
else:
|
1019 |
-
smExpr = Group(
|
1020 |
-
Opt(NL)
|
1021 |
-
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
|
1022 |
-
+ Opt(UNDENT)
|
1023 |
-
)
|
1024 |
-
|
1025 |
-
# add a parse action to remove backup_stack from list of backups
|
1026 |
-
smExpr.add_parse_action(
|
1027 |
-
lambda: backup_stacks.pop(-1) and None if backup_stacks else None
|
1028 |
-
)
|
1029 |
-
smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
|
1030 |
-
blockStatementExpr.ignore(_bslash + LineEnd())
|
1031 |
-
return smExpr.set_name("indented block")
|
1032 |
-
|
1033 |
-
|
1034 |
-
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
|
1035 |
-
c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
|
1036 |
-
"C style comment"
|
1037 |
-
)
|
1038 |
-
"Comment of the form ``/* ... */``"
|
1039 |
-
|
1040 |
-
html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
|
1041 |
-
"Comment of the form ``<!-- ... -->``"
|
1042 |
-
|
1043 |
-
rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
|
1044 |
-
dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
|
1045 |
-
"Comment of the form ``// ... (to end of line)``"
|
1046 |
-
|
1047 |
-
cpp_style_comment = Combine(
|
1048 |
-
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
|
1049 |
-
).set_name("C++ style comment")
|
1050 |
-
"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
|
1051 |
-
|
1052 |
-
java_style_comment = cpp_style_comment
|
1053 |
-
"Same as :class:`cpp_style_comment`"
|
1054 |
-
|
1055 |
-
python_style_comment = Regex(r"#.*").set_name("Python style comment")
|
1056 |
-
"Comment of the form ``# ... (to end of line)``"
|
1057 |
-
|
1058 |
-
|
1059 |
-
# build list of built-in expressions, for future reference if a global default value
|
1060 |
-
# gets updated
|
1061 |
-
_builtin_exprs: List[ParserElement] = [
|
1062 |
-
v for v in vars().values() if isinstance(v, ParserElement)
|
1063 |
-
]
|
1064 |
-
|
1065 |
-
|
1066 |
-
# pre-PEP8 compatible names
|
1067 |
-
delimitedList = delimited_list
|
1068 |
-
countedArray = counted_array
|
1069 |
-
matchPreviousLiteral = match_previous_literal
|
1070 |
-
matchPreviousExpr = match_previous_expr
|
1071 |
-
oneOf = one_of
|
1072 |
-
dictOf = dict_of
|
1073 |
-
originalTextFor = original_text_for
|
1074 |
-
nestedExpr = nested_expr
|
1075 |
-
makeHTMLTags = make_html_tags
|
1076 |
-
makeXMLTags = make_xml_tags
|
1077 |
-
anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
|
1078 |
-
commonHTMLEntity = common_html_entity
|
1079 |
-
replaceHTMLEntity = replace_html_entity
|
1080 |
-
opAssoc = OpAssoc
|
1081 |
-
infixNotation = infix_notation
|
1082 |
-
cStyleComment = c_style_comment
|
1083 |
-
htmlComment = html_comment
|
1084 |
-
restOfLine = rest_of_line
|
1085 |
-
dblSlashComment = dbl_slash_comment
|
1086 |
-
cppStyleComment = cpp_style_comment
|
1087 |
-
javaStyleComment = java_style_comment
|
1088 |
-
pythonStyleComment = python_style_comment
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/appengine.py
DELETED
@@ -1,314 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This module provides a pool manager that uses Google App Engine's
|
3 |
-
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
|
4 |
-
|
5 |
-
Example usage::
|
6 |
-
|
7 |
-
from urllib3 import PoolManager
|
8 |
-
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
|
9 |
-
|
10 |
-
if is_appengine_sandbox():
|
11 |
-
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
|
12 |
-
http = AppEngineManager()
|
13 |
-
else:
|
14 |
-
# PoolManager uses a socket-level API behind the scenes
|
15 |
-
http = PoolManager()
|
16 |
-
|
17 |
-
r = http.request('GET', 'https://google.com/')
|
18 |
-
|
19 |
-
There are `limitations <https://cloud.google.com/appengine/docs/python/\
|
20 |
-
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
|
21 |
-
the best choice for your application. There are three options for using
|
22 |
-
urllib3 on Google App Engine:
|
23 |
-
|
24 |
-
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
|
25 |
-
cost-effective in many circumstances as long as your usage is within the
|
26 |
-
limitations.
|
27 |
-
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
|
28 |
-
Sockets also have `limitations and restrictions
|
29 |
-
<https://cloud.google.com/appengine/docs/python/sockets/\
|
30 |
-
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
|
31 |
-
To use sockets, be sure to specify the following in your ``app.yaml``::
|
32 |
-
|
33 |
-
env_variables:
|
34 |
-
GAE_USE_SOCKETS_HTTPLIB : 'true'
|
35 |
-
|
36 |
-
3. If you are using `App Engine Flexible
|
37 |
-
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
|
38 |
-
:class:`PoolManager` without any configuration or special environment variables.
|
39 |
-
"""
|
40 |
-
|
41 |
-
from __future__ import absolute_import
|
42 |
-
|
43 |
-
import io
|
44 |
-
import logging
|
45 |
-
import warnings
|
46 |
-
|
47 |
-
from ..exceptions import (
|
48 |
-
HTTPError,
|
49 |
-
HTTPWarning,
|
50 |
-
MaxRetryError,
|
51 |
-
ProtocolError,
|
52 |
-
SSLError,
|
53 |
-
TimeoutError,
|
54 |
-
)
|
55 |
-
from ..packages.six.moves.urllib.parse import urljoin
|
56 |
-
from ..request import RequestMethods
|
57 |
-
from ..response import HTTPResponse
|
58 |
-
from ..util.retry import Retry
|
59 |
-
from ..util.timeout import Timeout
|
60 |
-
from . import _appengine_environ
|
61 |
-
|
62 |
-
try:
|
63 |
-
from google.appengine.api import urlfetch
|
64 |
-
except ImportError:
|
65 |
-
urlfetch = None
|
66 |
-
|
67 |
-
|
68 |
-
log = logging.getLogger(__name__)
|
69 |
-
|
70 |
-
|
71 |
-
class AppEnginePlatformWarning(HTTPWarning):
|
72 |
-
pass
|
73 |
-
|
74 |
-
|
75 |
-
class AppEnginePlatformError(HTTPError):
|
76 |
-
pass
|
77 |
-
|
78 |
-
|
79 |
-
class AppEngineManager(RequestMethods):
|
80 |
-
"""
|
81 |
-
Connection manager for Google App Engine sandbox applications.
|
82 |
-
|
83 |
-
This manager uses the URLFetch service directly instead of using the
|
84 |
-
emulated httplib, and is subject to URLFetch limitations as described in
|
85 |
-
the App Engine documentation `here
|
86 |
-
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
|
87 |
-
|
88 |
-
Notably it will raise an :class:`AppEnginePlatformError` if:
|
89 |
-
* URLFetch is not available.
|
90 |
-
* If you attempt to use this on App Engine Flexible, as full socket
|
91 |
-
support is available.
|
92 |
-
* If a request size is more than 10 megabytes.
|
93 |
-
* If a response size is more than 32 megabytes.
|
94 |
-
* If you use an unsupported request method such as OPTIONS.
|
95 |
-
|
96 |
-
Beyond those cases, it will raise normal urllib3 errors.
|
97 |
-
"""
|
98 |
-
|
99 |
-
def __init__(
|
100 |
-
self,
|
101 |
-
headers=None,
|
102 |
-
retries=None,
|
103 |
-
validate_certificate=True,
|
104 |
-
urlfetch_retries=True,
|
105 |
-
):
|
106 |
-
if not urlfetch:
|
107 |
-
raise AppEnginePlatformError(
|
108 |
-
"URLFetch is not available in this environment."
|
109 |
-
)
|
110 |
-
|
111 |
-
warnings.warn(
|
112 |
-
"urllib3 is using URLFetch on Google App Engine sandbox instead "
|
113 |
-
"of sockets. To use sockets directly instead of URLFetch see "
|
114 |
-
"https://urllib3.readthedocs.io/en/1.26.x/reference/urllib3.contrib.html.",
|
115 |
-
AppEnginePlatformWarning,
|
116 |
-
)
|
117 |
-
|
118 |
-
RequestMethods.__init__(self, headers)
|
119 |
-
self.validate_certificate = validate_certificate
|
120 |
-
self.urlfetch_retries = urlfetch_retries
|
121 |
-
|
122 |
-
self.retries = retries or Retry.DEFAULT
|
123 |
-
|
124 |
-
def __enter__(self):
|
125 |
-
return self
|
126 |
-
|
127 |
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
128 |
-
# Return False to re-raise any potential exceptions
|
129 |
-
return False
|
130 |
-
|
131 |
-
def urlopen(
|
132 |
-
self,
|
133 |
-
method,
|
134 |
-
url,
|
135 |
-
body=None,
|
136 |
-
headers=None,
|
137 |
-
retries=None,
|
138 |
-
redirect=True,
|
139 |
-
timeout=Timeout.DEFAULT_TIMEOUT,
|
140 |
-
**response_kw
|
141 |
-
):
|
142 |
-
|
143 |
-
retries = self._get_retries(retries, redirect)
|
144 |
-
|
145 |
-
try:
|
146 |
-
follow_redirects = redirect and retries.redirect != 0 and retries.total
|
147 |
-
response = urlfetch.fetch(
|
148 |
-
url,
|
149 |
-
payload=body,
|
150 |
-
method=method,
|
151 |
-
headers=headers or {},
|
152 |
-
allow_truncated=False,
|
153 |
-
follow_redirects=self.urlfetch_retries and follow_redirects,
|
154 |
-
deadline=self._get_absolute_timeout(timeout),
|
155 |
-
validate_certificate=self.validate_certificate,
|
156 |
-
)
|
157 |
-
except urlfetch.DeadlineExceededError as e:
|
158 |
-
raise TimeoutError(self, e)
|
159 |
-
|
160 |
-
except urlfetch.InvalidURLError as e:
|
161 |
-
if "too large" in str(e):
|
162 |
-
raise AppEnginePlatformError(
|
163 |
-
"URLFetch request too large, URLFetch only "
|
164 |
-
"supports requests up to 10mb in size.",
|
165 |
-
e,
|
166 |
-
)
|
167 |
-
raise ProtocolError(e)
|
168 |
-
|
169 |
-
except urlfetch.DownloadError as e:
|
170 |
-
if "Too many redirects" in str(e):
|
171 |
-
raise MaxRetryError(self, url, reason=e)
|
172 |
-
raise ProtocolError(e)
|
173 |
-
|
174 |
-
except urlfetch.ResponseTooLargeError as e:
|
175 |
-
raise AppEnginePlatformError(
|
176 |
-
"URLFetch response too large, URLFetch only supports"
|
177 |
-
"responses up to 32mb in size.",
|
178 |
-
e,
|
179 |
-
)
|
180 |
-
|
181 |
-
except urlfetch.SSLCertificateError as e:
|
182 |
-
raise SSLError(e)
|
183 |
-
|
184 |
-
except urlfetch.InvalidMethodError as e:
|
185 |
-
raise AppEnginePlatformError(
|
186 |
-
"URLFetch does not support method: %s" % method, e
|
187 |
-
)
|
188 |
-
|
189 |
-
http_response = self._urlfetch_response_to_http_response(
|
190 |
-
response, retries=retries, **response_kw
|
191 |
-
)
|
192 |
-
|
193 |
-
# Handle redirect?
|
194 |
-
redirect_location = redirect and http_response.get_redirect_location()
|
195 |
-
if redirect_location:
|
196 |
-
# Check for redirect response
|
197 |
-
if self.urlfetch_retries and retries.raise_on_redirect:
|
198 |
-
raise MaxRetryError(self, url, "too many redirects")
|
199 |
-
else:
|
200 |
-
if http_response.status == 303:
|
201 |
-
method = "GET"
|
202 |
-
|
203 |
-
try:
|
204 |
-
retries = retries.increment(
|
205 |
-
method, url, response=http_response, _pool=self
|
206 |
-
)
|
207 |
-
except MaxRetryError:
|
208 |
-
if retries.raise_on_redirect:
|
209 |
-
raise MaxRetryError(self, url, "too many redirects")
|
210 |
-
return http_response
|
211 |
-
|
212 |
-
retries.sleep_for_retry(http_response)
|
213 |
-
log.debug("Redirecting %s -> %s", url, redirect_location)
|
214 |
-
redirect_url = urljoin(url, redirect_location)
|
215 |
-
return self.urlopen(
|
216 |
-
method,
|
217 |
-
redirect_url,
|
218 |
-
body,
|
219 |
-
headers,
|
220 |
-
retries=retries,
|
221 |
-
redirect=redirect,
|
222 |
-
timeout=timeout,
|
223 |
-
**response_kw
|
224 |
-
)
|
225 |
-
|
226 |
-
# Check if we should retry the HTTP response.
|
227 |
-
has_retry_after = bool(http_response.headers.get("Retry-After"))
|
228 |
-
if retries.is_retry(method, http_response.status, has_retry_after):
|
229 |
-
retries = retries.increment(method, url, response=http_response, _pool=self)
|
230 |
-
log.debug("Retry: %s", url)
|
231 |
-
retries.sleep(http_response)
|
232 |
-
return self.urlopen(
|
233 |
-
method,
|
234 |
-
url,
|
235 |
-
body=body,
|
236 |
-
headers=headers,
|
237 |
-
retries=retries,
|
238 |
-
redirect=redirect,
|
239 |
-
timeout=timeout,
|
240 |
-
**response_kw
|
241 |
-
)
|
242 |
-
|
243 |
-
return http_response
|
244 |
-
|
245 |
-
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
|
246 |
-
|
247 |
-
if is_prod_appengine():
|
248 |
-
# Production GAE handles deflate encoding automatically, but does
|
249 |
-
# not remove the encoding header.
|
250 |
-
content_encoding = urlfetch_resp.headers.get("content-encoding")
|
251 |
-
|
252 |
-
if content_encoding == "deflate":
|
253 |
-
del urlfetch_resp.headers["content-encoding"]
|
254 |
-
|
255 |
-
transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
|
256 |
-
# We have a full response's content,
|
257 |
-
# so let's make sure we don't report ourselves as chunked data.
|
258 |
-
if transfer_encoding == "chunked":
|
259 |
-
encodings = transfer_encoding.split(",")
|
260 |
-
encodings.remove("chunked")
|
261 |
-
urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
|
262 |
-
|
263 |
-
original_response = HTTPResponse(
|
264 |
-
# In order for decoding to work, we must present the content as
|
265 |
-
# a file-like object.
|
266 |
-
body=io.BytesIO(urlfetch_resp.content),
|
267 |
-
msg=urlfetch_resp.header_msg,
|
268 |
-
headers=urlfetch_resp.headers,
|
269 |
-
status=urlfetch_resp.status_code,
|
270 |
-
**response_kw
|
271 |
-
)
|
272 |
-
|
273 |
-
return HTTPResponse(
|
274 |
-
body=io.BytesIO(urlfetch_resp.content),
|
275 |
-
headers=urlfetch_resp.headers,
|
276 |
-
status=urlfetch_resp.status_code,
|
277 |
-
original_response=original_response,
|
278 |
-
**response_kw
|
279 |
-
)
|
280 |
-
|
281 |
-
def _get_absolute_timeout(self, timeout):
|
282 |
-
if timeout is Timeout.DEFAULT_TIMEOUT:
|
283 |
-
return None # Defer to URLFetch's default.
|
284 |
-
if isinstance(timeout, Timeout):
|
285 |
-
if timeout._read is not None or timeout._connect is not None:
|
286 |
-
warnings.warn(
|
287 |
-
"URLFetch does not support granular timeout settings, "
|
288 |
-
"reverting to total or default URLFetch timeout.",
|
289 |
-
AppEnginePlatformWarning,
|
290 |
-
)
|
291 |
-
return timeout.total
|
292 |
-
return timeout
|
293 |
-
|
294 |
-
def _get_retries(self, retries, redirect):
|
295 |
-
if not isinstance(retries, Retry):
|
296 |
-
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
|
297 |
-
|
298 |
-
if retries.connect or retries.read or retries.redirect:
|
299 |
-
warnings.warn(
|
300 |
-
"URLFetch only supports total retries and does not "
|
301 |
-
"recognize connect, read, or redirect retry parameters.",
|
302 |
-
AppEnginePlatformWarning,
|
303 |
-
)
|
304 |
-
|
305 |
-
return retries
|
306 |
-
|
307 |
-
|
308 |
-
# Alias methods from _appengine_environ to maintain public API interface.
|
309 |
-
|
310 |
-
is_appengine = _appengine_environ.is_appengine
|
311 |
-
is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
|
312 |
-
is_local_appengine = _appengine_environ.is_local_appengine
|
313 |
-
is_prod_appengine = _appengine_environ.is_prod_appengine
|
314 |
-
is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BillBojangeles2000/bart-large-cnn-samsum/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/philschmid/bart-large-cnn-samsum").launch()
|
|
|
|
|
|
|
|
spaces/Blackroot/Fancy-Audiogen/audio.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import os, re, json, sys
|
3 |
-
import torch, torchaudio, pathlib
|
4 |
-
from audiocraft.data.audio_utils import convert_audio
|
5 |
-
|
6 |
-
def load_and_process_audio(model, duration, optional_audio, sample_rate):
|
7 |
-
if optional_audio is None:
|
8 |
-
return None
|
9 |
-
sr, optional_audio = optional_audio[0], torch.from_numpy(optional_audio[1]).to(model.device).float().t()
|
10 |
-
if optional_audio.dim() == 1:
|
11 |
-
optional_audio = optional_audio[None]
|
12 |
-
optional_audio = optional_audio[..., :int(sr * duration)]
|
13 |
-
optional_audio = convert_audio(optional_audio, sr, sr, 1)
|
14 |
-
return optional_audio
|
15 |
-
|
16 |
-
#From https://colab.research.google.com/drive/154CqogsdP-D_TfSF9S2z8-BY98GN_na4?usp=sharing#scrollTo=exKxNU_Z4i5I
|
17 |
-
#Thank you DragonForged for the link
|
18 |
-
def extend_audio(model, prompt_waveform, prompts, prompt_sr, segments=5, overlap=2):
|
19 |
-
# Calculate the number of samples corresponding to the overlap
|
20 |
-
overlap_samples = int(overlap * prompt_sr)
|
21 |
-
|
22 |
-
device = model.device
|
23 |
-
prompt_waveform = prompt_waveform.to(device)
|
24 |
-
|
25 |
-
for i in range(1, segments):
|
26 |
-
# Grab the end of the waveform
|
27 |
-
end_waveform = prompt_waveform[...,-overlap_samples:]
|
28 |
-
|
29 |
-
# Process the trimmed waveform using the model
|
30 |
-
new_audio = model.generate_continuation(end_waveform, descriptions=[prompts[i]], prompt_sample_rate=prompt_sr, progress=True)
|
31 |
-
|
32 |
-
# Cut the seed audio off the newly generated audio
|
33 |
-
new_audio = new_audio[...,overlap_samples:]
|
34 |
-
|
35 |
-
prompt_waveform = torch.cat([prompt_waveform, new_audio], dim=2)
|
36 |
-
|
37 |
-
return prompt_waveform
|
38 |
-
|
39 |
-
def predict(model, prompts, duration, melody_parameters, extension_parameters):
|
40 |
-
melody = load_and_process_audio(model, duration, **melody_parameters)
|
41 |
-
|
42 |
-
if melody is not None:
|
43 |
-
output = model.generate_with_chroma(
|
44 |
-
descriptions=[prompts[0]],
|
45 |
-
melody_wavs=melody,
|
46 |
-
melody_sample_rate=melody_parameters['sample_rate'],
|
47 |
-
progress=False
|
48 |
-
)
|
49 |
-
else:
|
50 |
-
output = model.generate(descriptions=[prompts[0]], progress=True)
|
51 |
-
|
52 |
-
sample_rate = model.sample_rate
|
53 |
-
|
54 |
-
if extension_parameters['segments'] > 1:
|
55 |
-
output_tensors = extend_audio(model, output, prompts, sample_rate, **extension_parameters).detach().cpu().float()
|
56 |
-
else:
|
57 |
-
output_tensors = output.detach().cpu().float()
|
58 |
-
|
59 |
-
return sample_rate, output_tensors
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_resources/file.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
from typing import cast
|
4 |
-
|
5 |
-
import openai
|
6 |
-
from openai import api_requestor, util, error
|
7 |
-
from openai.api_resources.abstract import DeletableAPIResource, ListableAPIResource
|
8 |
-
from openai.util import ApiType
|
9 |
-
|
10 |
-
|
11 |
-
class File(ListableAPIResource, DeletableAPIResource):
|
12 |
-
OBJECT_NAME = "files"
|
13 |
-
|
14 |
-
@classmethod
|
15 |
-
def create(
|
16 |
-
cls,
|
17 |
-
file,
|
18 |
-
purpose,
|
19 |
-
model=None,
|
20 |
-
api_key=None,
|
21 |
-
api_base=None,
|
22 |
-
api_type=None,
|
23 |
-
api_version=None,
|
24 |
-
organization=None,
|
25 |
-
user_provided_filename=None,
|
26 |
-
):
|
27 |
-
if purpose != "search" and model is not None:
|
28 |
-
raise ValueError("'model' is only meaningful if 'purpose' is 'search'")
|
29 |
-
requestor = api_requestor.APIRequestor(
|
30 |
-
api_key,
|
31 |
-
api_base=api_base or openai.api_base,
|
32 |
-
api_type=api_type,
|
33 |
-
api_version=api_version,
|
34 |
-
organization=organization,
|
35 |
-
)
|
36 |
-
typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
|
37 |
-
|
38 |
-
if typed_api_type == ApiType.AZURE:
|
39 |
-
base = cls.class_url()
|
40 |
-
url = "/%s%s?api-version=%s" % (cls.azure_api_prefix, base, api_version)
|
41 |
-
elif typed_api_type == ApiType.OPEN_AI:
|
42 |
-
url = cls.class_url()
|
43 |
-
else:
|
44 |
-
raise error.InvalidAPIType('Unsupported API type %s' % api_type)
|
45 |
-
|
46 |
-
# Set the filename on 'purpose' and 'model' to None so they are
|
47 |
-
# interpreted as form data.
|
48 |
-
files = [("purpose", (None, purpose))]
|
49 |
-
if model is not None:
|
50 |
-
files.append(("model", (None, model)))
|
51 |
-
if user_provided_filename is not None:
|
52 |
-
files.append(("file", (user_provided_filename, file, 'application/octet-stream')))
|
53 |
-
else:
|
54 |
-
files.append(("file", ("file", file, 'application/octet-stream')))
|
55 |
-
response, _, api_key = requestor.request("post", url, files=files)
|
56 |
-
return util.convert_to_openai_object(
|
57 |
-
response, api_key, api_version, organization
|
58 |
-
)
|
59 |
-
|
60 |
-
@classmethod
|
61 |
-
def download(
|
62 |
-
cls,
|
63 |
-
id,
|
64 |
-
api_key=None,
|
65 |
-
api_base=None,
|
66 |
-
api_type=None,
|
67 |
-
api_version=None,
|
68 |
-
organization=None
|
69 |
-
):
|
70 |
-
requestor = api_requestor.APIRequestor(
|
71 |
-
api_key,
|
72 |
-
api_base=api_base or openai.api_base,
|
73 |
-
api_type=api_type,
|
74 |
-
api_version=api_version,
|
75 |
-
organization=organization,
|
76 |
-
)
|
77 |
-
typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
|
78 |
-
|
79 |
-
if typed_api_type == ApiType.AZURE:
|
80 |
-
base = cls.class_url()
|
81 |
-
url = "/%s%s/%s/content?api-version=%s" % (cls.azure_api_prefix, base, id, api_version)
|
82 |
-
elif typed_api_type == ApiType.OPEN_AI:
|
83 |
-
url = f"{cls.class_url()}/{id}/content"
|
84 |
-
else:
|
85 |
-
raise error.InvalidAPIType('Unsupported API type %s' % api_type)
|
86 |
-
|
87 |
-
result = requestor.request_raw("get", url)
|
88 |
-
if not 200 <= result.status_code < 300:
|
89 |
-
raise requestor.handle_error_response(
|
90 |
-
result.content,
|
91 |
-
result.status_code,
|
92 |
-
json.loads(cast(bytes, result.content)),
|
93 |
-
result.headers,
|
94 |
-
stream_error=False,
|
95 |
-
)
|
96 |
-
return result.content
|
97 |
-
|
98 |
-
@classmethod
|
99 |
-
def find_matching_files(
|
100 |
-
cls,
|
101 |
-
name,
|
102 |
-
bytes,
|
103 |
-
purpose,
|
104 |
-
api_key=None,
|
105 |
-
api_base=None,
|
106 |
-
api_type=None,
|
107 |
-
api_version=None,
|
108 |
-
organization=None,
|
109 |
-
):
|
110 |
-
"""Find already uploaded files with the same name, size, and purpose."""
|
111 |
-
all_files = cls.list(
|
112 |
-
api_key=api_key,
|
113 |
-
api_base=api_base or openai.api_base,
|
114 |
-
api_type=api_type,
|
115 |
-
api_version=api_version,
|
116 |
-
organization=organization,
|
117 |
-
).get("data", [])
|
118 |
-
matching_files = []
|
119 |
-
basename = os.path.basename(name)
|
120 |
-
for f in all_files:
|
121 |
-
if f["purpose"] != purpose:
|
122 |
-
continue
|
123 |
-
file_basename = os.path.basename(f["filename"])
|
124 |
-
if file_basename != basename:
|
125 |
-
continue
|
126 |
-
if "bytes" in f and f["bytes"] != bytes:
|
127 |
-
continue
|
128 |
-
if "size" in f and int(f["size"]) != bytes:
|
129 |
-
continue
|
130 |
-
matching_files.append(f)
|
131 |
-
return matching_files
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/utils.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
from __future__ import print_function
|
2 |
-
|
3 |
-
import errno
|
4 |
-
import os
|
5 |
-
import numpy as np
|
6 |
-
from PIL import Image
|
7 |
-
import torch
|
8 |
-
import torch.nn as nn
|
9 |
-
|
10 |
-
|
11 |
-
EPS = 1e-7
|
12 |
-
|
13 |
-
|
14 |
-
def assert_eq(real, expected):
|
15 |
-
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
|
16 |
-
|
17 |
-
|
18 |
-
def assert_array_eq(real, expected):
|
19 |
-
assert (np.abs(real-expected) < EPS).all(), \
|
20 |
-
'%s (true) vs %s (expected)' % (real, expected)
|
21 |
-
|
22 |
-
|
23 |
-
def load_folder(folder, suffix):
|
24 |
-
imgs = []
|
25 |
-
for f in sorted(os.listdir(folder)):
|
26 |
-
if f.endswith(suffix):
|
27 |
-
imgs.append(os.path.join(folder, f))
|
28 |
-
return imgs
|
29 |
-
|
30 |
-
|
31 |
-
def load_imageid(folder):
|
32 |
-
images = load_folder(folder, 'jpg')
|
33 |
-
img_ids = set()
|
34 |
-
for img in images:
|
35 |
-
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
|
36 |
-
img_ids.add(img_id)
|
37 |
-
return img_ids
|
38 |
-
|
39 |
-
|
40 |
-
def pil_loader(path):
|
41 |
-
with open(path, 'rb') as f:
|
42 |
-
with Image.open(f) as img:
|
43 |
-
return img.convert('RGB')
|
44 |
-
|
45 |
-
|
46 |
-
def weights_init(m):
|
47 |
-
"""custom weights initialization."""
|
48 |
-
cname = m.__class__
|
49 |
-
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
|
50 |
-
m.weight.data.normal_(0.0, 0.02)
|
51 |
-
elif cname == nn.BatchNorm2d:
|
52 |
-
m.weight.data.normal_(1.0, 0.02)
|
53 |
-
m.bias.data.fill_(0)
|
54 |
-
else:
|
55 |
-
print('%s is not initialized.' % cname)
|
56 |
-
|
57 |
-
|
58 |
-
def init_net(net, net_file):
|
59 |
-
if net_file:
|
60 |
-
net.load_state_dict(torch.load(net_file))
|
61 |
-
else:
|
62 |
-
net.apply(weights_init)
|
63 |
-
|
64 |
-
|
65 |
-
def create_dir(path):
|
66 |
-
if not os.path.exists(path):
|
67 |
-
try:
|
68 |
-
os.makedirs(path)
|
69 |
-
except OSError as exc:
|
70 |
-
if exc.errno != errno.EEXIST:
|
71 |
-
raise
|
72 |
-
|
73 |
-
|
74 |
-
class Logger(object):
|
75 |
-
def __init__(self, output_name):
|
76 |
-
dirname = os.path.dirname(output_name)
|
77 |
-
if not os.path.exists(dirname):
|
78 |
-
os.mkdir(dirname)
|
79 |
-
|
80 |
-
self.log_file = open(output_name, 'w')
|
81 |
-
self.infos = {}
|
82 |
-
|
83 |
-
def append(self, key, val):
|
84 |
-
vals = self.infos.setdefault(key, [])
|
85 |
-
vals.append(val)
|
86 |
-
|
87 |
-
def log(self, extra_msg=''):
|
88 |
-
msgs = [extra_msg]
|
89 |
-
for key, vals in self.infos.iteritems():
|
90 |
-
msgs.append('%s %.6f' % (key, np.mean(vals)))
|
91 |
-
msg = '\n'.join(msgs)
|
92 |
-
self.log_file.write(msg + '\n')
|
93 |
-
self.log_file.flush()
|
94 |
-
self.infos = {}
|
95 |
-
return msg
|
96 |
-
|
97 |
-
def write(self, msg):
|
98 |
-
self.log_file.write(msg + '\n')
|
99 |
-
self.log_file.flush()
|
100 |
-
print(msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
/*!
|
2 |
-
**************************************************************************************************
|
3 |
-
* Deformable DETR
|
4 |
-
* Copyright (c) 2020 SenseTime. All Rights Reserved.
|
5 |
-
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
6 |
-
**************************************************************************************************
|
7 |
-
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
|
8 |
-
**************************************************************************************************
|
9 |
-
*/
|
10 |
-
|
11 |
-
#include <vector>
|
12 |
-
|
13 |
-
#include <ATen/ATen.h>
|
14 |
-
#include <ATen/cuda/CUDAContext.h>
|
15 |
-
|
16 |
-
namespace groundingdino {
|
17 |
-
|
18 |
-
at::Tensor
|
19 |
-
ms_deform_attn_cpu_forward(
|
20 |
-
const at::Tensor &value,
|
21 |
-
const at::Tensor &spatial_shapes,
|
22 |
-
const at::Tensor &level_start_index,
|
23 |
-
const at::Tensor &sampling_loc,
|
24 |
-
const at::Tensor &attn_weight,
|
25 |
-
const int im2col_step)
|
26 |
-
{
|
27 |
-
AT_ERROR("Not implement on cpu");
|
28 |
-
}
|
29 |
-
|
30 |
-
std::vector<at::Tensor>
|
31 |
-
ms_deform_attn_cpu_backward(
|
32 |
-
const at::Tensor &value,
|
33 |
-
const at::Tensor &spatial_shapes,
|
34 |
-
const at::Tensor &level_start_index,
|
35 |
-
const at::Tensor &sampling_loc,
|
36 |
-
const at::Tensor &attn_weight,
|
37 |
-
const at::Tensor &grad_output,
|
38 |
-
const int im2col_step)
|
39 |
-
{
|
40 |
-
AT_ERROR("Not implement on cpu");
|
41 |
-
}
|
42 |
-
|
43 |
-
} // namespace groundingdino
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|