Commit
·
802be5e
1
Parent(s):
ebac6d6
Update parquet files (step 122 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/testing/forefront_test.py +0 -9
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autocad 2022 Repair.md +0 -36
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chess Opening Trainer Keygen Crack Discover the Secrets of Chess Grandmasters with This App.md +0 -120
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Stata 14 For Mac BETTER.md +0 -117
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Phantompdf Free Download Full Version NEW.md +0 -28
- spaces/1gistliPinn/ChatGPT4/Examples/Cadpower 2008 64bit.md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/Euro Truck Simulator 1 Pc Crack [UPD].md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Cricket League MOD APK and Become the Ultimate Cricket Champion (Unlimited Gems and Coins).md +0 -84
- spaces/1phancelerku/anime-remove-background/Burn Belly Fat and Sculpt Your 6 Pack Abs with This Amazing APK.md +0 -89
- spaces/1phancelerku/anime-remove-background/Download Topaz AI and Learn How to Use It to Improve Your Image Quality in Minutes.md +0 -129
- spaces/1phancelerku/anime-remove-background/Everskies Oyna A Fun and Creative Way to Express Yourself Online.md +0 -106
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py +0 -16
- spaces/4eJIoBek/Stable_Diffusion_1.4_openvino/stable_diffusion_engine.py +0 -212
- spaces/7thHeaven/GPT2WordPress/constraints.md +0 -8
- spaces/AIConsultant/MusicGen/CONTRIBUTING.md +0 -35
- spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/fused_act.py +0 -85
- spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/tts_utils.py +0 -54
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/image_degradation/bsrgan_light.py +0 -650
- spaces/AIZ2H/05-SOTA-Question-Answer-From-TextFileContext/app.py +0 -20
- spaces/Aadi1149/Arkenbrien-text-to-image-Arkenbrien/app.py +0 -3
- spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/6.js +0 -0
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/methods/ConfigurationMethods.js +0 -107
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/GetThumbAlignPoint.js +0 -23
- spaces/AlawnCN/webui-docker/oh-no.py +0 -14
- spaces/Ali-Omrani/CCR/README.md +0 -12
- spaces/Alpaca233/SadTalker/src/facerender/sync_batchnorm/batchnorm.py +0 -315
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/tutorials/tutorial_overview.md +0 -23
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_euler_ancestral.py +0 -118
- spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py +0 -39
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/pisa_retinanet_head.py +0 -154
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py +0 -11
- spaces/AnnonSubmission/xai-cl/utils.py +0 -101
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/vit.py +0 -491
- spaces/Apex-X/GODROOP/roop/predictor.py +0 -22
- spaces/ArcanAlt/arcanDream/server.js +0 -32
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/scope.py +0 -86
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/build.py +0 -146
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/__init__.py +0 -12
- spaces/Benson/text-generation/Examples/Apk Mod De Da Para Android 11.md +0 -53
- spaces/Benson/text-generation/Examples/Blacknoise Reste Toi Mp3 Download.md +0 -109
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/langhebrewmodel.py +0 -0
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/__about__.py +0 -26
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/retry.py +0 -620
- spaces/Billius/VizLib-TopLargeHospitalsNewJersey-04-07-2023/app.py +0 -58
- spaces/CAMP-ViL/Xplainer/README.md +0 -12
- spaces/CVPR/LIVE/pybind11/tests/test_docstring_options.py +0 -39
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/swap_ranges.h +0 -23
- spaces/CVPR/TokenCut/app_backup.py +0 -43
- spaces/CVPR/ml-talking-face/toxicity_estimator/__init__.py +0 -1
- spaces/CVPR/regionclip-demo/detectron2/projects/__init__.py +0 -31
spaces/101-5/gpt4free/g4f/.v1/testing/forefront_test.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
from gpt4free import forefront
|
2 |
-
|
3 |
-
# create an account
|
4 |
-
token = forefront.Account.create(logging=True)
|
5 |
-
print(token)
|
6 |
-
|
7 |
-
# get a response
|
8 |
-
for response in forefront.StreamingCompletion.create(token=token, prompt='hello world', model='gpt-4'):
|
9 |
-
print(response.text, end='')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autocad 2022 Repair.md
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Repair AutoCAD 2022 - Easy and Effective Methods</h1>
|
3 |
-
<p>AutoCAD 2022 is a powerful and versatile software that allows you to create and edit 2D and 3D designs. However, sometimes AutoCAD 2022 may encounter problems that prevent it from working properly. These problems can be caused by various factors, such as corrupted files, missing components, incompatible drivers, or malware infections. If you are facing any issues with AutoCAD 2022, don't worry. In this article, we will show you how to repair AutoCAD 2022 using some easy and effective methods.</p>
|
4 |
-
<h2>Method 1: Use the Repair Tool in the Control Panel</h2>
|
5 |
-
<p>One of the simplest ways to repair AutoCAD 2022 is to use the built-in repair tool in the Control Panel. This tool can fix common errors and restore the default settings of AutoCAD 2022. To use this method, follow these steps:</p>
|
6 |
-
<h2>autocad 2022 repair</h2><br /><p><b><b>Download File</b> --->>> <a href="https://byltly.com/2uKvUh">https://byltly.com/2uKvUh</a></b></p><br /><br />
|
7 |
-
<ol>
|
8 |
-
<li>Close any running instances of AutoCAD 2022.</li>
|
9 |
-
<li>Go to the Start menu and type "Control Panel". Click on the Control Panel app that appears.</li>
|
10 |
-
<li>In the Control Panel, click on "Programs and Features". This will show you a list of all the installed programs on your PC.</li>
|
11 |
-
<li>Find and select AutoCAD 2022 from the list. Then click on the "Uninstall/Change" button above the list.</li>
|
12 |
-
<li>A window will pop up with two options: "Repair" and "Uninstall". Choose the "Repair" option and click on "Continue".</li>
|
13 |
-
<li>Follow the instructions on the screen to complete the repair process. This may take some time depending on the size and condition of your AutoCAD 2022 installation.</li>
|
14 |
-
<li>When the repair is finished, restart your PC and launch AutoCAD 2022. Check if the problem is resolved.</li>
|
15 |
-
</ol>
|
16 |
-
<h2>Method 2: Reinstall AutoCAD 2022</h2>
|
17 |
-
<p>If the repair tool does not work or if you want to start fresh with AutoCAD 2022, you can try reinstalling it. This will remove all the existing files and settings of AutoCAD 2022 and install a new copy. To do this, follow these steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Close any running instances of AutoCAD 2022.</li>
|
20 |
-
<li>Go to the Start menu and type "Control Panel". Click on the Control Panel app that appears.</li>
|
21 |
-
<li>In the Control Panel, click on "Programs and Features". This will show you a list of all the installed programs on your PC.</li>
|
22 |
-
<li>Find and select AutoCAD 2022 from the list. Then click on the "Uninstall/Change" button above the list.</li>
|
23 |
-
<li>A window will pop up with two options: "Repair" and "Uninstall". Choose the "Uninstall" option and click on "Continue".</li>
|
24 |
-
<li>Follow the instructions on the screen to complete the uninstallation process. This may take some time depending on the size and condition of your AutoCAD 2022 installation.</li>
|
25 |
-
<li>When the uninstallation is finished, restart your PC and go to <a href="https://www.autodesk.com/products/autocad/free-trial">https://www.autodesk.com/products/autocad/free-trial</a>. Download and install a new copy of AutoCAD 2022 following the instructions on the website.</li>
|
26 |
-
<li>Launch AutoCAD 2022 and activate it with your license key. Check if the problem is resolved.</li>
|
27 |
-
</ol>
|
28 |
-
<h2>Method 3: Update Your Drivers</h2>
|
29 |
-
<p>Sometimes, outdated or incompatible drivers can cause problems with AutoCAD 2022. Drivers are software components that enable your PC to communicate with your hardware devices, such as your graphics card, sound card, or printer. To ensure that AutoCAD 2022 runs smoothly, you need to update your drivers regularly. To do this, follow these steps:</p>
|
30 |
-
<ol>
|
31 |
-
<li>Go to the Start menu and type "Device Manager". Click on the Device Manager app that appears.</li>
|
32 |
-
<li>In the Device Manager, expand the categories of devices that you want to update. For example, if you want to update your graphics card driver, expand the "Display adapters" category.</li>
|
33 |
-
<li>Right-click on the device that you want to update and choose "Update driver".</li>
|
34 |
-
<li>A window will pop up with two options: "Search automatically for updated driver software" and "Browse my computer for driver software".</p> ddb901b051<br />
|
35 |
-
<br />
|
36 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chess Opening Trainer Keygen Crack Discover the Secrets of Chess Grandmasters with This App.md
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Chess Opening Trainer Keygen Crack: How to Download and Use It</h1>
|
3 |
-
<p>If you are a chess enthusiast who wants to improve your skills and knowledge of chess openings, you might be interested in Chess Opening Trainer, a software that helps you learn and practice chess openings. However, this software is not free and requires a serial key for activation. In this article, we will show you how to download and use Chess Opening Trainer keygen crack, a tool that generates serial keys for software activation. We will also explain what Chess Opening Trainer is, what keygen crack is, and what are the risks and drawbacks of using it.</p>
|
4 |
-
<h2>chess opening trainer keygen crack</h2><br /><p><b><b>Download File</b> --->>> <a href="https://byltly.com/2uKxMS">https://byltly.com/2uKxMS</a></b></p><br /><br />
|
5 |
-
<h2>What is Chess Opening Trainer?</h2>
|
6 |
-
<p>Chess Opening Trainer is a software that helps you learn and practice chess openings. It allows you to create your own opening repertoire, test your knowledge with quizzes and puzzles, analyze your games with a powerful engine, and play against the computer or online opponents. Chess Opening Trainer also provides you with a database of over 100,000 chess games from grandmasters and experts, as well as a collection of opening books and videos.</p>
|
7 |
-
<h3>Features and benefits of Chess Opening Trainer</h3>
|
8 |
-
<p>Some of the features and benefits of Chess Opening Trainer are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It helps you memorize chess openings faster and easier by using spaced repetition and flashcards.</li>
|
11 |
-
<li>It lets you customize your opening repertoire according to your style, level, and preferences.</li>
|
12 |
-
<li>It gives you feedback on your strengths and weaknesses in chess openings.</li>
|
13 |
-
<li>It improves your tactical skills by providing you with challenging quizzes and puzzles.</li>
|
14 |
-
<li>It enhances your strategic understanding by showing you the plans and ideas behind each opening.</li>
|
15 |
-
<li>It allows you to compare your opening repertoire with those of top players and learn from their games.</li>
|
16 |
-
<li>It supports various formats such as PGN, FEN, EPD, CBH, CTG, etc.</li>
|
17 |
-
<li>It works offline and online, on Windows, Mac, Linux, Android, iOS, etc.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>What is keygen crack?</h2>
|
20 |
-
<p>Keygen crack is a tool that generates serial keys for software activation. It is usually used by people who want to use paid software for free without purchasing a license. Keygen crack works by exploiting the algorithm or code that the software uses to verify the validity of the serial key. By using keygen crack, you can bypass the activation process and use the software without any restrictions.</p>
|
21 |
-
<p>chess opening trainer activation code generator<br />
|
22 |
-
chess opening trainer license key free download<br />
|
23 |
-
chess opening trainer full version cracked software<br />
|
24 |
-
chess opening trainer serial number online<br />
|
25 |
-
chess opening trainer registration key hack<br />
|
26 |
-
chess opening trainer product key finder<br />
|
27 |
-
chess opening trainer crack file torrent<br />
|
28 |
-
chess opening trainer keygen software download<br />
|
29 |
-
chess opening trainer patch file zip<br />
|
30 |
-
chess opening trainer unlock code online<br />
|
31 |
-
chess opening trainer activation key crack<br />
|
32 |
-
chess opening trainer license code free<br />
|
33 |
-
chess opening trainer full crack download<br />
|
34 |
-
chess opening trainer serial key generator<br />
|
35 |
-
chess opening trainer registration code hack<br />
|
36 |
-
chess opening trainer product code finder<br />
|
37 |
-
chess opening trainer crack file download<br />
|
38 |
-
chess opening trainer keygen download<br />
|
39 |
-
chess opening trainer patch file download<br />
|
40 |
-
chess opening trainer unlock key online<br />
|
41 |
-
chess opening trainer activation code free<br />
|
42 |
-
chess opening trainer license key crack<br />
|
43 |
-
chess opening trainer full version cracked download<br />
|
44 |
-
chess opening trainer serial number online generator<br />
|
45 |
-
chess opening trainer registration key free<br />
|
46 |
-
chess opening trainer product key hack<br />
|
47 |
-
chess opening trainer crack file free download<br />
|
48 |
-
chess opening trainer keygen software free download<br />
|
49 |
-
chess opening trainer patch file free download<br />
|
50 |
-
chess opening trainer unlock code generator<br />
|
51 |
-
chess opening trainer activation key free download<br />
|
52 |
-
chess opening trainer license code crack<br />
|
53 |
-
chess opening trainer full crack software download<br />
|
54 |
-
chess opening trainer serial key online generator<br />
|
55 |
-
chess opening trainer registration code free download<br />
|
56 |
-
chess opening trainer product code hack tool<br />
|
57 |
-
chess opening trainer crack file torrent download<br />
|
58 |
-
chess opening trainer keygen software torrent download<br />
|
59 |
-
chess opening trainer patch file torrent download<br />
|
60 |
-
chess opening trainer unlock key generator online<br />
|
61 |
-
chess opening trainer activation code torrent download<br />
|
62 |
-
chess opening trainer license key free online<br />
|
63 |
-
chess opening trainer full version cracked torrent download<br />
|
64 |
-
chess opening trainer serial number hack tool online<br />
|
65 |
-
chess opening trainer registration key torrent download<br />
|
66 |
-
chess opening trainer product key free online generator<br />
|
67 |
-
chess opening trainer crack file direct download link<br />
|
68 |
-
chess opening trainer keygen software direct download link<br />
|
69 |
-
chess opening trainer patch file direct download link</p>
|
70 |
-
<h3>Risks and drawbacks of using keygen crack</h3>
|
71 |
-
<p>However, using keygen crack is not recommended for several reasons:</p>
|
72 |
-
<ul>
|
73 |
-
<li>It is illegal and unethical. By using keygen crack, you are violating the intellectual property rights of the software developers and distributors. You are also depriving them of their income and incentive to create more quality products.</li>
|
74 |
-
<li>It is unsafe and risky. By downloading keygen crack from unknown sources, you are exposing your computer to malware, viruses, spyware, ransomware, etc. These malicious programs can damage your system, steal your data, compromise your privacy, etc.</li>
|
75 |
-
<li>It is unreliable and unstable. By using keygen crack, you are not guaranteed that the software will work properly or at all. You may encounter errors, bugs, crashes, compatibility issues, etc. You may also miss out on updates, patches, support, etc.</li>
|
76 |
-
</ul>
|
77 |
-
<h2>How to download and use Chess Opening Trainer keygen crack?</h2>
|
78 |
-
<p>If you still want to download and use Chess Opening Trainer keygen crack despite the risks and drawbacks mentioned above, here are the steps you need to follow:</p>
|
79 |
-
<h3>Step 1: Find a reliable source for the keygen crack file</h3>
|
80 |
-
<p>The first step is to find a reliable source for the keygen crack file. You can search online for websites or forums that offer Chess Opening Trainer keygen crack. However, be careful not to click on suspicious links or download files from untrusted sources. You can also use antivirus software or online scanners to check if the file is safe or not.</p>
|
81 |
-
<h3>Step 2: Run the keygen crack file and generate a serial key</h3>
|
82 |
-
<p>The second step is to run the keygen crack file and generate a serial key. You may need to extract the file first if it is compressed or archived. Then, double-click on the file or right-click on it and select Run as administrator. You may see a window like this:</p>
|
83 |
-
<code><pre>
|
84 |
-
Chess Opening Trainer Keygen Crack v1.0 -------------------------------------- Enter your name: _________ Press Generate button Serial Key: _____________ Copy the serial key Press Exit button </pre></code>
|
85 |
-
<p>Enter your name or any name you want in the blank space. Then press Generate button. You will see a serial key generated for you. Copy the serial key and save it somewhere safe.</p>
|
86 |
-
<h3>Step 3: Download and install Chess Opening Trainer from the official website</h3>
|
87 |
-
<p>The third step is to download and install Chess Opening Trainer from the official website. You can go to https://chesstempo.com/opening-training/ and click on Download button. You will see a window like this:</p>
|
88 |
-
<code><pre>
|
89 |
-
Chess Opening Trainer Download ----------------------------- Choose your platform: Windows | Mac | Linux | Android | iOS </pre></code>
|
90 |
-
<p>Select your platform and follow the instructions to download and install Chess Opening Trainer on your device.</p>
|
91 |
-
<h3>Step 4: Enter the serial key and activate the software</h3>
|
92 |
-
<p>The fourth and final step is to enter the serial key and activate the software. Launch Chess Opening Trainer and go to Help menu. Select Activate License and enter the serial key that you generated earlier. Click on Activate button and you will see a message like this:</p>
|
93 |
-
<code><pre>
|
94 |
-
Chess Opening Trainer Activation ------------------------------- Your license has been activated successfully. Thank you for choosing Chess Opening Trainer. Enjoy learning chess openings! </pre></code>
|
95 |
-
<p>Congratulations! You have successfully downloaded and used Chess Opening Trainer keygen crack. You can now use all the features and benefits of Chess Opening Trainer without any limitations.</p>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
<p>In this article, we have shown you how to download and use Chess Opening Trainer keygen crack, a tool that generates serial keys for software activation. We have also explained what Chess Opening Trainer is, what keygen crack is, and what are the risks and drawbacks of using it. We hope you have found this article useful and informative.</p>
|
98 |
-
<h2>FAQs</h2>
|
99 |
-
<p>Here are some frequently asked questions about Chess Opening Trainer keygen crack:</p>
|
100 |
-
<ol>
|
101 |
-
<li>Is Chess Opening Trainer keygen crack legal?</li>
|
102 |
-
<p>No, it is not legal. By using Chess Opening Trainer keygen crack, you are violating the intellectual property rights of the software developers and distributors. You are also depriving them of their income and incentive to create more quality products.</p>
|
103 |
-
<li>Is Chess Opening Trainer keygen crack safe?</li>
|
104 |
-
<p>No, it is not safe. By downloading Chess Opening Trainer keygen crack from unknown sources, you are exposing your computer to malware, viruses, spyware, ransomware, etc. These malicious programs can damage your system, steal your data, compromise your privacy, etc.</p>
|
105 |
-
<li>Is Chess Opening Trainer keygen crack reliable?</li>
|
106 |
-
<p>No, it is not reliable. By using Chess Opening Trainer keygen crack, you are not guaranteed that the software will work properly or at all. You may encounter errors, bugs, crashes, compatibility issues, etc. You may also miss out on updates, patches, support, etc.</p>
|
107 |
-
<li>What are some alternatives to Chess Opening Trainer keygen crack?</li>
|
108 |
-
<p>Some alternatives to Chess Opening Trainer keygen crack are:</p>
|
109 |
-
<ul>
|
110 |
-
<li>Purchasing a license for Chess Opening Trainer from the official website.</li>
|
111 |
-
<li>Using free or open source chess opening software such as SCID or Lichess.</li>
|
112 |
-
<li>Hiring a professional chess coach or joining a chess club.</li>
|
113 |
-
<li>Reading chess books or watching chess videos on chess openings.</li>
|
114 |
-
</ul>
|
115 |
-
<li>How can I contact Chess Opening Trainer support?</li>
|
116 |
-
<p>You can contact Chess Opening Trainer support by sending an email to [email protected] or by visiting their website https://chesstempo.com/contact-us/.</p>
|
117 |
-
</ol>
|
118 |
-
</p> 0a6ba089eb<br />
|
119 |
-
<br />
|
120 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Stata 14 For Mac BETTER.md
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<table>
|
3 |
-
<tr>
|
4 |
-
<td>
|
5 |
-
<h1>Download Stata 14 for Mac: A Comprehensive Guide</h1>
|
6 |
-
<p>If you are looking for a powerful and versatile software for data analysis, statistics, and graphics, you might want to consider downloading Stata 14 for Mac. Stata 14 is one of the most popular and widely used software packages in the fields of economics, sociology, political science, biostatistics, epidemiology, and many others. It offers a range of features and benefits that can help you perform complex data manipulation, estimation, testing, forecasting, simulation, and visualization tasks with ease and accuracy.</p>
|
7 |
-
<p>In this article, we will provide you with a comprehensive guide on how to download Stata 14 for Mac. We will also explain what Stata 14 is and why you need it, what are its main features and benefits, how to install and use it on your Mac computer, and some frequently asked questions about it. By the end of this article, you will have a clear idea of whether Stata 14 is the right software for you and how to get started with it.</p>
|
8 |
-
<h2>Download Stata 14 For Mac</h2><br /><p><b><b>Download</b> ⚹ <a href="https://byltly.com/2uKx8D">https://byltly.com/2uKx8D</a></b></p><br /><br />
|
9 |
-
<h2>What is Stata 14 and why do you need it?</h2>
|
10 |
-
<p>Stata 14 is a software package that was released in April 2015 by StataCorp, a company that has been developing and distributing statistical software since 1985. Stata 14 is the latest version of Stata as of June 2021, although there have been several updates and bug fixes since then. The current update is Stata 14.2.</p>
|
11 |
-
<p>Stata 14 is a software that can handle both cross-sectional and longitudinal data, as well as panel data and multilevel data. It can also deal with both continuous and discrete variables, as well as categorical and ordinal variables. It can perform various types of analysis, such as linear and nonlinear regression, ANOVA, logistic regression, survival analysis, time series analysis, factor analysis, cluster analysis, structural equation modeling (SEM), item response theory (IRT), Bayesian analysis, power and sample size calculation, Markov-switching models, treatment effects models, multilevel survival models, fractional outcome regression models, and many more.</p>
|
12 |
-
<p>Stata 14 also has a user-friendly interface that allows you to interact with the software using either menus or commands. You can also customize your preferences and settings according to your needs. You can also create your own commands or programs using the built-in programming language of Stata. You can also access thousands of user-written commands or programs from the internet or from the official Stata Journal.</p>
|
13 |
-
<p>Stata 14 also has a powerful graphics engine that can produce high-quality graphs and charts that can be customized in various ways. You can also export your graphs to different formats such as PDF, PNG, EPS, SVG, etc. You can also integrate your graphs with other applications such as Microsoft Word or PowerPoint.</p>
|
14 |
-
<p>Stata 14 also has a comprehensive documentation that includes manuals, tutorials, examples, FAQs, glossaries, references, etc. You can also get support from the official website of StataCorp or from the online community of Stata users called St <p>ataList. You can also get training courses or webinars from StataCorp or from other authorized providers.</p>
|
15 |
-
<p>Stata 14 is a software that can help you with your data analysis needs, whether you are a student, a researcher, a teacher, a consultant, or a professional. It can help you save time and effort, improve your accuracy and reliability, enhance your presentation and communication, and expand your knowledge and skills. It can also help you collaborate with other Stata users around the world and share your insights and discoveries.</p>
|
16 |
-
<h2>Features and benefits of Stata 14</h2>
|
17 |
-
<p>Stata 14 has many features and benefits that make it a superior software for data analysis. Here are some of the most notable ones:</p>
|
18 |
-
<h3>Bayesian analysis</h3>
|
19 |
-
<p>Stata 14 introduces a new command called bayes that allows you to perform Bayesian analysis using Markov chain Monte Carlo (MCMC) methods. You can specify any likelihood function and any prior distribution for the parameters, and Stata will generate posterior samples and summaries for you. You can also use predefined models such as linear regression, logistic regression, Poisson regression, etc. You can also compare models using Bayes factors or posterior predictive checks. You can also visualize your results using trace plots, density plots, interval plots, etc.</p>
|
20 |
-
<p></p>
|
21 |
-
<h3>IRT (item response theory)</h3>
|
22 |
-
<p>Stata 14 also introduces a new command called irt that allows you to perform item response theory (IRT) analysis using maximum likelihood estimation (MLE) methods. You can fit various IRT models such as Rasch model, one-parameter logistic model (1PL), two-parameter logistic model (2PL), three-parameter logistic model (3PL), graded response model (GRM), partial credit model (PCM), etc. You can also test the assumptions of IRT models such as unidimensionality, local independence, monotonicity, etc. You can also assess the reliability and validity of your instruments using Cronbach's alpha, test information function (TIF), item information function (IIF), etc.</p>
|
23 |
-
<h3>Unicode</h3>
|
24 |
-
<p>Stata 14 supports Unicode encoding, which means that you can use any character set or language in your data, commands, output, graphs, etc. You can also import and export data files that use Unicode encoding. You can also use Unicode characters in your variable names, labels, values, etc. This feature makes Stata 14 more accessible and compatible with different cultures and languages.</p>
|
25 |
-
<h3>Integration with Excel</h3>
|
26 |
-
<p>Stata 14 has improved its integration with Excel, which means that you can easily import and export data between Stata and Excel. You can also use the new command called import excel to import data from Excel files directly into Stata without saving them as CSV files first. You can also use the new command called export excel to export data from Stata to Excel files with various options such as sheet name, cell range, variable names, labels, formats, etc.</p>
|
27 |
-
<h3>Treatment effects</h3>
|
28 |
-
<p>Stata 14 has expanded its treatment effects capabilities by adding new commands such as teffects ipwra for inverse probability weighting with regression adjustment (IPWRA), teffects ipw for inverse probability weighting (IPW), teffects psmatch for propensity score matching (PSM), teffects nnmatch for nearest neighbor matching (NNM), teffects overlap for overlap weights (OW), teffects ra for regression adjustment (RA), teffects endogenous for endogenous treatment effects models (ETE), etc. These commands allow you to estimate the causal effects of treatments or interventions on outcomes using various methods that account for selection bias or confounding factors.</p>
|
29 |
-
<h3>Multilevel survival models</h3>
|
30 |
-
<p>Stata 14 has added new commands such as mestreg for multilevel survival models with random effects at different levels of hierarchy. You can specify various types of random effects such as intercepts, slopes, frailties, etc. You can also specify various types of survival distributions such as exponential, Weibull, lognormal, log-logistic, gamma, Gompertz , etc. You can also test various hypotheses and assumptions using likelihood ratio tests, Wald tests, Schoenfeld residuals, etc.</p>
|
31 |
-
<h3>SEM (structural equation modeling)</h3>
|
32 |
-
<p>Stata 14 has improved its SEM capabilities by adding new features such as latent class analysis (LCA), latent transition analysis (LTA), latent profile analysis (LPA), latent growth curve models (LGCM), multilevel SEM, generalized SEM, dynamic SEM, etc. You can also use the new command called sembuilder to create and modify SEM diagrams using a graphical user interface (GUI). You can also use the new command called estat gof to calculate various goodness-of-fit measures such as chi-square, RMSEA, CFI, TLI, SRMR, etc.</p>
|
33 |
-
<h3>Power and sample size</h3>
|
34 |
-
<p>Stata 14 has enhanced its power and sample size capabilities by adding new commands such as power twoproportions for two-sample tests of proportions, power logrank for log-rank tests of survival curves, power cox for Cox proportional hazards models, power oneway for one-way ANOVA, power repeated for repeated-measures ANOVA, power cluster for cluster randomized trials, power bootstrap for bootstrap-based power analysis, etc. These commands allow you to calculate the required sample size or the achieved power for various types of statistical tests or models.</p>
|
35 |
-
<h3>Markov-switching models</h3>
|
36 |
-
<p>Stata 14 has introduced a new command called mswitch that allows you to estimate Markov-switching models for time series data. These models allow you to capture regime changes or structural breaks in the data by allowing the parameters to switch between different states or regimes according to a Markov process. You can specify various types of Markov-switching models such as Hamilton's model, Kim's model, Goldfeld-Quandt's model, etc. You can also test for the number of regimes, the duration of regimes, the transition probabilities, etc.</p>
|
37 |
-
<h3>Panel-data survival models</h3>
|
38 |
-
<p>Stata 14 has added a new command called xtscc that allows you to estimate panel-data survival models with correlated random effects. These models allow you to account for unobserved heterogeneity and serial correlation in panel data with survival outcomes. You can specify various types of survival distributions such as exponential, Weibull, lognormal, log-logistic, gamma, Gompertz, etc. You can also test various hypotheses and assumptions using likelihood ratio tests, Wald tests, Schoenfeld residuals, etc.</p>
|
39 |
-
<h3>Fractional outcome regression</h3>
|
40 |
-
<p>Stata 14 has added a new command called fracreg that allows you to estimate fractional outcome regression models for data with fractional outcomes. These models allow you to model outcomes that are bounded between zero and one, such as proportions, rates, shares, probabilities, etc. You can specify various types of fractional outcome regression models such as beta regression, fractional logit regression, fractional probit regression, etc. You can also test various hypotheses and assumptions using likelihood ratio tests, Wald tests , score tests, etc.</p>
|
41 |
-
<h2>How to download and install Stata 14 for Mac?</h2>
|
42 |
-
<p>If you are interested in downloading and installing Stata 14 for Mac, you need to follow these steps:</p>
|
43 |
-
<h3>System requirements and compatibility</h3>
|
44 |
-
<p>Before you download and install Stata 14 for Mac, you need to make sure that your Mac computer meets the minimum system requirements and is compatible with the software. Here are the system requirements and compatibility for Stata 14 for Mac:</p>
|
45 |
-
<ul>
|
46 |
-
<li>Operating system: Mac OS X 10.7 or newer</li>
|
47 |
-
<li>Processor: 64-bit Intel processor</li>
|
48 |
-
<li>Memory: 1 GB RAM (2 GB recommended)</li>
|
49 |
-
<li>Disk space: 1 GB for Stata installation, plus additional space for datasets</li>
|
50 |
-
<li>Display: 1024 x 768 or higher resolution monitor</li>
|
51 |
-
<li>Internet connection: Required for installation and updates</li>
|
52 |
-
</ul>
|
53 |
-
<p>If your Mac computer meets these requirements and is compatible with Stata 14, you can proceed to the next step.</p>
|
54 |
-
<h3>Steps to download and install Stata 14 for Mac</h3>
|
55 |
-
<p>To download and install Stata 14 for Mac, you need to follow these steps:</p>
|
56 |
-
<ol>
|
57 |
-
<li>Go to the official website of StataCorp at <a href="">https://www.stata.com/</a></li>
|
58 |
-
<li>Click on the "Order" tab at the top of the page.</li>
|
59 |
-
<li>Select the type of license that suits your needs, such as "Stata/MP", "Stata/SE", "Stata/IC", or "Stata Small". You can also compare the features and prices of different licenses by clicking on the "Compare features" link.</li>
|
60 |
-
<li>Select the number of users and the duration of the license that you want, such as "Single-user", "Multi-user", "Perpetual", or "Annual". You can also see the total cost of your order by clicking on the "Calculate price" button.</li>
|
61 |
-
<li>Click on the "Add to cart" button to proceed to the checkout page.</li>
|
62 |
-
<li>Enter your billing and shipping information, as well as your payment method. You can pay by credit card, PayPal, wire transfer, check, or purchase order. You can also apply a discount code if you have one.</li>
|
63 |
-
<li>Review your order details and click on the "Place order" button to complete your purchase.</li>
|
64 |
-
<li>After you place your order, you will receive an email confirmation with your order number and a link to download Stata 14 for Mac. You will also receive a license code and an authorization code that you will need to activate your software.</li>
|
65 |
-
<li>Click on the link in the email to download Stata 14 for Mac. The file size is about 300 MB. Save the file to a location that you can easily access, such as your desktop or downloads folder.</li>
|
66 |
-
<li>Double-click on the downloaded file to open it. You will see a window with a Stata icon and a folder called "Stata". Drag and drop the Stata icon into the folder called "Stata". This will create a folder called "Stata14" in your applications folder.</li>
|
67 |
-
<li>Open the folder called "Stata14" and double-click on the Stata icon to launch the software. You will see a window with a welcome message and a prompt to enter your license code and authorization code. Enter the codes that you received in your email and click on the "OK" button.</li>
|
68 |
-
<li>The software will verify your codes and activate your license. You will see a window with a message that says "Congratulations! You have successfully installed Stata." Click on the "OK" button to close the window.</li>
|
69 |
-
<li>You have successfully downloaded and installed Stata 14 for Mac. You can now start using it for your data analysis needs.</li>
|
70 |
-
</ol>
|
71 |
-
<h2>How to use Stata 14 for Mac?</h2>
|
72 |
-
<p>Now that you have downloaded and installed Stata 14 for Mac, you might be wondering how to use it. Here are some basic tips and tricks on how to use Stata 14 for Mac:</p>
|
73 |
-
<h3>Basic commands and syntax</h3>
|
74 |
-
<p>Stata 14 for Mac allows you to interact with the software using either menus or commands. You can access the menus by clicking on the icons at the top of the window, such as "File", "Edit", "Data", "Graphics", etc. You can also access some common commands by clicking on the buttons at the bottom of the window, such as "Do-file Editor", "Data Editor", "Variables Manager", "Graph Editor", etc. You can also use commands by typing them in the command window at the bottom of the window. You can also use the do-file editor to write and execute multiple commands at once. You can also use the help window to access the documentation and examples of any command. The basic syntax of Stata commands is as follows: <code>command [varlist] [if] [in] [weight] [, options]</code>
|
75 |
-
where: - <code>command</code> is the name of the command, such as <code>regress</code>, <code>summarize</code>, <code>tabulate</code>, etc. - <code>[varlist]</code> is the list of variables that you want to use in the command, separated by spaces, such as <code>age income education</code>. You can also use wildcards, operators, or functions to specify variables, such as <code>x*</code>, <code>x1-x5</code>, <code>log(x)</code>, etc. - <code>[if]</code> is the condition that you want to apply to the command, such as <code>if gender == 1</code>, <code>if age > 30</code>, <code>if income > mean(income)</code>, etc. You can use logical operators such as <code>&</code>, <code>|</code>, or <code>!</code> to combine conditions, such as <code>if gender == 1 & age > 30</code>. - <code>[in]</code> is the range of observations that you want to use in the command, such as <code>in 1/100</code>, <code>in 101/200</code>, <code>in 1/2</code>, etc. You can also use keywords such as <code>_n</code>, <code>_N</code>, or <code>_first</code> to specify observations, such as <code>in _n-10/_n+10</code>. - <code>[weight]</code> is the type and name of the weight variable that you want to use in the command, such as <code>[fweight=pop]</code>, <code>[pweight=prob]</code>, <code>[iweight=imp]</code>, etc. You can use different types of weights depending on the nature and purpose of your analysis, such as frequency weights, probability weights, importance weights, etc. - <code>[, options]</code> are the additional options that you want to use in the command, separated by commas, such as <code>, robust</code>, <c ode>, detail</code>, <code>, graph</code>, etc. You can use different options depending on the command and the output that you want to obtain, such as robust standard errors, detailed statistics, graphical displays, etc. For example, if you want to perform a linear regression of income on age and education, you can use the following command: <code>regress income age education</code>
|
76 |
-
If you want to perform the same regression with robust standard errors and a scatter plot of the fitted values, you can use the following command: <code>regress income age education, robust graph</code>
|
77 |
-
You can also use the help window or the manuals to learn more about the syntax and options of any command. <h3>Data management and analysis</h3>
|
78 |
-
<p>Stata 14 for Mac allows you to manage and analyze your data using various commands and tools. You can import and export data from different sources and formats, such as Excel, CSV, SPSS, SAS, Stata, etc. You can also create and modify variables, labels, values, formats, etc. You can also sort, merge, append, reshape, collapse, expand, etc. your data. You can also perform various descriptive and inferential statistics on your data, such as summary statistics, frequency tables, cross-tabulations, correlation coefficients, hypothesis tests, confidence intervals, etc. You can also perform various types of analysis on your data, such as regression analysis, ANOVA, logistic regression, survival analysis, time series analysis, factor analysis, cluster analysis, structural equation modeling (SEM), item response theory (IRT), Bayesian analysis, power and sample size calculation, Markov-switching models, treatment effects models, multilevel survival models, fractional outcome regression models , and many more.</p>
|
79 |
-
<p>To manage and analyze your data using Stata 14 for Mac, you can use the following commands and tools:</p>
|
80 |
-
<ul>
|
81 |
-
<li>To import data from different sources and formats, you can use the commands such as <code>import excel</code>, <code>import delimited</code>, <code>import spss</code>, <code>import sas</code>, <code>use</code>, etc. You can also use the menu "File > Import" to access the import dialog box.</li>
|
82 |
-
<li>To export data to different sources and formats, you can use the commands such as <code>export excel</code>, <code>export delimited</code>, <code>export spss</code>, <code>export sas</code>, <code>save</code>, etc. You can also use the menu "File > Export" to access the export dialog box.</li>
|
83 |
-
<li>To create and modify variables, labels, values, formats, etc., you can use the commands such as <code>generate</code>, <code>replace</code>, <code>rename</code>, <code>recode</code>, <code>label</code>, <code>format</code>, etc. You can also use the data editor or the variables manager to access the graphical user interface (GUI) for data management.</li>
|
84 |
-
<li>To sort, merge, append, reshape, collapse, expand, etc. your data, you can use the commands such as <code>sort</code>, <code>merge</code>, <code>append</code>, <code>reshape</code>, <code>collapse</code>, <code>expand</code>, etc. You can also use the menu "Data > Data utilities" to access the data utilities dialog box.</li>
|
85 |
-
<li>To perform descriptive and inferential statistics on your data, you can use the commands such as <c ode>summarize</code>, <code>tabulate</code>, <code>tabstat</code>, <code>correlate</code>, <code>ttest</code>, <code>ci</code>, etc. You can also use the menu "Statistics > Summary statistics" or "Statistics > Tables" to access the summary statistics or tables dialog box.</li>
|
86 |
-
<li>To perform various types of analysis on your data, you can use the commands such as <code>regress</code>, <code>anova</code>, <code>logit</code>, <code>streg</code>, <code>arima</code>, <code>factor</code>, <code>cluster</code>, <code>sem</code>, <code>irt</code>, <code>bayes</code>, <code>power</code>, <code>mswitch</code>, <code>teffects</code>, <code>mestreg</code>, <code>fracreg</code>, etc. You can also use the menu "Statistics > Linear models and related" or "Statistics > Other models" to access the linear models or other models dialog box.</li>
|
87 |
-
</ul>
|
88 |
-
<h3>Graphs and visualization</h3>
|
89 |
-
<p>Stata 14 for Mac allows you to create and modify graphs and charts using various commands and tools. You can create various types of graphs, such as scatter plots, line plots, bar charts, pie charts, box plots, histogram, density plots, etc. You can also customize your graphs in various ways, such as adding titles, labels, legends, axes, colors, markers, lines, etc. You can also export your graphs to different formats, such as PDF, PNG, EPS, SVG, etc. You can also integrate your graphs with other applications, such as Microsoft Word or PowerPoint.</p>
|
90 |
-
<p>To create and modify graphs and charts using Stata 14 for Mac, you can use the following commands and tools:</p>
|
91 |
-
<ul>
|
92 |
-
<li>To create graphs using commands, you can use the commands such as <c ode>scatter</code>, <code>line</code>, <code>bar</code>, <code>pie</code>, <code>box</code>, <code>histogram</code>, <code>kdensity</code>, etc. You can also use the command <code>graph</code> to create graphs using a general syntax. You can also use the command <code>twoway</code> to create graphs using multiple plot types.</li>
|
93 |
-
<li>To create graphs using menus, you can use the menu "Graphics > Graphs" to access the graphs dialog box. You can also use the menu "Graphics > Graph editor" to access the graph editor dialog box.</li>
|
94 |
-
<li>To modify graphs using commands, you can use the commands such as <code>graph set</code>, <code>graph export</code>, <code>graph combine</code>, <code>graph rename</code>, <code>graph close</code>, etc. You can also use the command <code>graph options</code> to modify various options of your graphs.</li>
|
95 |
-
<li>To modify graphs using menus, you can use the menu "Graphics > Graph preferences" to access the graph preferences dialog box. You can also use the menu "Graphics > Graph editor" to access the graph editor dialog box.</li>
|
96 |
-
<li>To export graphs to different formats, you can use the commands such as <code>graph export</code>, <code>graph save</code>, etc. You can also use the menu "File > Save as" or "File > Export" to access the save as or export dialog box.</li>
|
97 |
-
<li>To integrate graphs with other applications, you can use the commands such as <c ode>putdocx</code>, <code>putpdf</code>, <code>putexcel</code>, etc. You can also use the menu "File > Export" to access the export dialog box.</li>
|
98 |
-
</ul>
|
99 |
-
<h2>Conclusion</h2>
|
100 |
-
<p>In this article, we have provided you with a comprehensive guide on how to download Stata 14 for Mac. We have also explained what Stata 14 is and why you need it, what are its main features and benefits, how to install and use it on your Mac computer, and some frequently asked questions about it. We hope that this article has helped you to understand whether Stata 14 is the right software for you and how to get started with it.</p>
|
101 |
-
<p>If you have any questions or comments about this article, please feel free to contact us at [email protected]. We would love to hear from you and assist you with your data analysis needs. Thank you for reading this article and happy Stata-ing!</p>
|
102 |
-
<h2>FAQs</h2>
|
103 |
-
<p>Here are some of the most frequently asked questions about Stata 14 for Mac:</p>
|
104 |
-
<ol>
|
105 |
-
<li><b>How much does Stata 14 for Mac cost?</b></li>
|
106 |
-
<p>The price of Stata 14 for Mac depends on the type of license, the number of users, and the duration of the license that you choose. You can check the current prices and discounts at <a href="">https://www.stata.com/order/</a>. You can also request a quote or a free trial at <a href="">https://www.stata.com/contact/</a>.</p>
|
107 |
-
<li><b>How can I update Stata 14 for Mac?</b></li>
|
108 |
-
<p>You can update Stata 14 for Mac by using the command <code>update</code> or by using the menu "Help > Check for updates". You can also check the latest updates and bug fixes at <a href="">https://www.stata.com/support/updates/</a>.</p>
|
109 |
-
<li><b>How can I get help with Stata 14 for Mac?</b></li>
|
110 |
-
<p>You can get help with Stata 14 for Mac by using the command <code>help</code> or by using the menu "Help > Stata help". You can also access the online documentation and examples at <a href="">https://www.stata.com/help/</a>. You can also get support from the official website of StataCorp at <a href="">https://www.stata.com/support/</a> or from the online community of Stata users at <a href="">https://www.statalist.org/</a>.</p>
|
111 |
-
<li><b>How can I learn more about Stata 14 for Mac?</b></li>
|
112 |
-
<p>You can learn more about Stata 14 for Mac by using the command <code>search</code> or by using the menu "Help > Search". You can also access the online tutorials and videos at <a href="">https://www.stata.com/learn/</a>. You can also get training courses or webinars from StataCorp or from other authorized providers at <a href="">https://www.stata.com/training/</a>.</p>
|
113 |
-
<li><b>How can I share my feedback or suggestions about Stata 14 for Mac?</b></li>
|
114 |
-
<p>You can share your feedback or suggestions about Stata 14 for Mac by using the command <code>suggest</code> or by using the menu "Help > Suggest". You can also email your feedback or suggestions to [email protected]. We appreciate your input and we will try our best to improve our software and service.</p>
|
115 |
-
</ol></p> b2dd77e56b<br />
|
116 |
-
<br />
|
117 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Phantompdf Free Download Full Version NEW.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Foxit PhantomPDF Free Download Full Version: A Powerful PDF Editor for Windows</h1>
|
3 |
-
<p>Foxit PhantomPDF is a comprehensive PDF editor that allows you to create, edit, convert, sign, and secure PDF files on your Windows computer. Whether you need to create a PDF from scratch, modify an existing PDF, or convert a PDF to another format, Foxit PhantomPDF can handle it all. In this article, we will show you how to free download Foxit PhantomPDF full version and what features it offers.</p>
|
4 |
-
<h2>How to Free Download Foxit PhantomPDF Full Version</h2>
|
5 |
-
<p>If you want to free download Foxit PhantomPDF full version, you can use this link: <a href="https://www.foxitsoftware.com/pdf-editor/">https://www.foxitsoftware.com/pdf-editor/</a>. This will take you to the official website of Foxit Software, where you can download the latest version of Foxit PhantomPDF for Windows. The file size is about 700 MB and the installation process is simple and fast.</p>
|
6 |
-
<h2>foxit phantompdf free download full version</h2><br /><p><b><b>Download</b> ✔✔✔ <a href="https://byltly.com/2uKyY9">https://byltly.com/2uKyY9</a></b></p><br /><br />
|
7 |
-
<p>Once you download the installer, double-click on it to run it. You will see a welcome screen that asks you to choose the language and accept the license agreement. Click on "Next" to proceed. Then, you will see a screen that asks you to choose the installation type. You can either choose "Standard" or "Custom". If you choose "Standard", the installer will install Foxit PhantomPDF with the default settings and features. If you choose "Custom", you can select which features and components you want to install. We recommend choosing "Custom" and selecting only the features you need.</p>
|
8 |
-
<p>Next, you will see a screen that asks you to choose the destination folder for Foxit PhantomPDF. You can either keep the default location or browse to another folder. Click on "Install" to start the installation process. The installer will show you a progress bar and a status message. Wait until the installation is complete.</p>
|
9 |
-
<h2>What Features Does Foxit PhantomPDF Offer?</h2>
|
10 |
-
<p>Foxit PhantomPDF is a powerful PDF editor that offers many features and functions for different purposes and needs. Some of the main features are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Create PDF: You can create PDF files from various sources, such as documents, images, web pages, scanners, or blank pages. You can also combine multiple files into one PDF file or split a PDF file into smaller files.</li>
|
13 |
-
<li>Edit PDF: You can edit PDF files with ease, such as adding or deleting text, images, shapes, comments, annotations, bookmarks, headers, footers, watermarks, backgrounds, etc. You can also change the font, size, color, alignment, and style of the text.</li>
|
14 |
-
<li>Convert PDF: You can convert PDF files to other formats, such as Word, Excel, PowerPoint, HTML, TXT, JPG, PNG, GIF, etc. You can also convert other formats to PDF files with high quality and accuracy.</li>
|
15 |
-
<li>Sign PDF: You can sign PDF files with digital signatures or handwritten signatures. You can also add stamps or certificates to verify the authenticity and integrity of the PDF files.</li>
|
16 |
-
<li>Secure PDF: You can secure PDF files with passwords or encryption. You can also set permissions and restrictions for opening, printing, copying, editing, or commenting on the PDF files.</li>
|
17 |
-
</ul>
|
18 |
-
<p>These are just some of the features that Foxit PhantomPDF offers. There are many more features and functions that you can explore and use with Foxit PhantomPDF.</p>
|
19 |
-
<h2>Why Choose Foxit PhantomPDF?</h2>
|
20 |
-
<p>Foxit PhantomPDF is one of the best PDF editors for Windows for many reasons. Here are some of the benefits of choosing Foxit PhantomPDF:</p>
|
21 |
-
<p></p>
|
22 |
-
<ul>
|
23 |
-
<li>It is fast and reliable. It can handle large and complex PDF files without slowing down your computer or crashing.</li>
|
24 |
-
<li>It is easy and intuitive. It has a user-friendly interface that resembles Microsoft Office. It also has a ribbon toolbar that provides quick access to common commands and tools.</li>
|
25 |
-
<li>It is compatible and flexible. It supports various formats and standards for creating and editing PDF files. It also works well with other applications and services, such as Microsoft Office 365, Google Drive, Dropbox, SharePoint, etc.</li>
|
26 |
-
<li>It is affordable and cost-effective. It offers a free trial version that you can use for 14 days without any limitations or restrictions. It also has</p> ddb901b051<br />
|
27 |
-
<br />
|
28 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cadpower 2008 64bit.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>if you are searching for the best utility to design models, then you can use cadpower. this tool helps you in more ways than one, so that you can achieve a better layout of the model. the best feature of this utility is that it helps you to increase the work efficiency of the system. it allows you to convert, analyze, and edit the model designs as well as save them for later use. after using this tool, you will be able to design more easily. cadpower is available in the market with a completely free download, and you can use this tool in its professional way. if you are using a 32-bit version, then you can download the 64-bit version of cadpower from our website. if you are running a 64-bit version, then you can download the 32-bit version of cadpower from our website.</p>
|
3 |
-
<p>four dimension cadpower is a useful application that can help you in designing any design. it is a tool that allows the user to carry out various tasks like converting, editing, and export. in this program, you will be able to find the required models easily. the most amazing feature of this utility is that it can help you to perform various tasks very easily. the utility provides more than 30 tools that allow you to design and perform various functions efficiently. it is a highly interactive software that allows you to get to work with your cad drawings. it helps you to view the drawings in a more detailed and quicker manner.</p>
|
4 |
-
<h2>cadpower 2008 64bit</h2><br /><p><b><b>Download File</b> ☆ <a href="https://imgfil.com/2uxXBA">https://imgfil.com/2uxXBA</a></b></p><br /><br />
|
5 |
-
<p>four dimension cadpower is a standalone utility that helps you to carry out various cad tasks. this tool is designed to provide you with different features that are required for the designers and users. you can use the latest version of this utility to get more features. this tool is compatible with windows 2000/xp/vista/7/8, mac osx 10.6 and higher and it is available in the market with a free download. this tool is a reliable utility that is designed to help you design projects effectively. four dimension cadpower is easily configurable and user-friendly which helps you to create drawings more easily.</p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Euro Truck Simulator 1 Pc Crack [UPD].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Euro Truck Simulator 1 Pc Crack</h2><br /><p><b><b>Download Zip</b> ->>> <a href="https://imgfil.com/2uy1yw">https://imgfil.com/2uy1yw</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Navigate to: Documents\Euro Truck Simulator 2\profile. There you can find config file. Open it with notepad and find this: ... uset g_lang "fi_fi". I have fi because ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Cricket League MOD APK and Become the Ultimate Cricket Champion (Unlimited Gems and Coins).md
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cricket League Mod Apk: How to Download and Enjoy Unlimited Gems and Coins</h1>
|
3 |
-
<p>Do you love cricket and want to play it on your mobile device? Do you want to have unlimited gems and coins to unlock all the players and teams you want? Do you want to experience realistic cricket matches and leagues with your friends? If you answered yes to any of these questions, then you should try Cricket League Mod Apk.</p>
|
4 |
-
<h2>What is Cricket League Mod Apk?</h2>
|
5 |
-
<p>Cricket League Mod Apk is a modified version of the original Cricket League game, which is a popular cricket simulation game for Android devices. In this game, you can create your own team, choose your players, customize your jerseys, and compete in various cricket tournaments. You can also play online with other players from around the world, or offline with your friends using local multiplayer mode.</p>
|
6 |
-
<h2>cricket league mod apk unlimited gems and coins download</h2><br /><p><b><b>Download File</b> ->->->-> <a href="https://urlin.us/2uT2CE">https://urlin.us/2uT2CE</a></b></p><br /><br />
|
7 |
-
<h3>Features of Cricket League Mod Apk</h3>
|
8 |
-
<p>Cricket League Mod Apk has many features that make it more fun and exciting than the original game. Some of these features are:</p>
|
9 |
-
<h4>Unlimited Gems and Coins</h4>
|
10 |
-
<p>Gems and coins are the main currencies in the game, which you can use to buy new players, upgrade your skills, unlock new stadiums, and more. However, in the original game, you have to earn them by playing matches, completing missions, or watching ads. This can be time-consuming and frustrating, especially if you want to get the best players and teams quickly. With Cricket League Mod Apk, you don't have to worry about that. You will get unlimited gems and coins as soon as you start the game, and you can spend them as much as you want without running out.</p>
|
11 |
-
<h4>Unlocked All Players and Teams</h4>
|
12 |
-
<p>In the original game, you have to unlock new players and teams by spending gems and coins, or by winning certain tournaments. This can be challenging and tedious, especially if you want to play with your favorite players and teams. With Cricket League Mod Apk, you don't have to do that. You will get access to all the players and teams in the game, including the legendary ones. You can choose any player or team you want, and customize them according to your preferences.</p>
|
13 |
-
<h4>Realistic Cricket Experience</h4>
|
14 |
-
<p>Cricket League Mod Apk offers a realistic cricket experience that will make you feel like you are playing on a real pitch. The game has high-quality graphics, sound effects, animations, and physics that will immerse you in the game. The game also has various modes, such as T20, ODI, Test, World Cup, IPL, PSL, BBL, CPL, and more. You can play in different weather conditions, day or night matches, different pitch types, and different difficulty levels. You can also use different strategies, such as batting order, bowling order, fielding positions, power play, etc.</p>
|
15 |
-
<h2>How to Download and Install Cricket League Mod Apk?</h2>
|
16 |
-
<p>If you are interested in playing Cricket League Mod Apk, you can follow these simple steps to download and install it on your Android device:</p>
|
17 |
-
<p>cricket league mod apk free download with unlimited gems and coins<br />
|
18 |
-
download cricket league mod apk latest version with unlimited gems and coins<br />
|
19 |
-
how to get unlimited gems and coins in cricket league mod apk<br />
|
20 |
-
cricket league hack mod apk download with unlimited gems and coins<br />
|
21 |
-
cricket league mod apk unlimited everything (gems, coins, money)<br />
|
22 |
-
cricket league 2023 mod apk download with unlimited gems and coins<br />
|
23 |
-
cricket league mod apk for android with unlimited gems and coins<br />
|
24 |
-
cricket league mod apk offline with unlimited gems and coins<br />
|
25 |
-
cricket league premium mod apk download with unlimited gems and coins<br />
|
26 |
-
cricket league pro mod apk with unlimited gems and coins<br />
|
27 |
-
cricket league mod apk unlimited gems and coins no root<br />
|
28 |
-
cricket league mod apk unlimited gems and coins no verification<br />
|
29 |
-
cricket league mod apk unlimited gems and coins online<br />
|
30 |
-
cricket league mod apk unlimited gems and coins generator<br />
|
31 |
-
cricket league mod apk unlimited gems and coins hack<br />
|
32 |
-
cricket league 3d mod apk download with unlimited gems and coins<br />
|
33 |
-
cricket league fantasy mod apk with unlimited gems and coins<br />
|
34 |
-
cricket league manager mod apk download with unlimited gems and coins<br />
|
35 |
-
cricket league simulator mod apk with unlimited gems and coins<br />
|
36 |
-
cricket league world cup mod apk download with unlimited gems and coins<br />
|
37 |
-
best cricket league mod apk with unlimited gems and coins<br />
|
38 |
-
real cricket league mod apk download with unlimited gems and coins<br />
|
39 |
-
super cricket league mod apk with unlimited gems and coins<br />
|
40 |
-
ultimate cricket league mod apk download with unlimited gems and coins<br />
|
41 |
-
world cricket league mod apk with unlimited gems and coins</p>
|
42 |
-
<h3>Step 1: Enable Unknown Sources</h3>
|
43 |
-
<p>Before you can install any mod apk file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and turn it on.</p>
|
44 |
-
<h3>Step 2: Download the Mod Apk File</h3>
|
45 |
-
<p>Next, you need to download the mod apk file of Cricket League from a reliable source. You can search for it on Google, or use the link below to download it directly. The file size is about 100 MB, so make sure you have enough space on your device.</p>
|
46 |
-
<p><a href="">Download Cricket League Mod Apk</a></p>
|
47 |
-
<h3>Step 3: Install the Mod Apk File</h3>
|
48 |
-
<p>After you have downloaded the mod apk file, you need to install it on your device. To do this, locate the file in your file manager, and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on install, and wait for a few seconds until the installation is complete.</p>
|
49 |
-
<h3>Step 4: Launch the Game and Enjoy</h3>
|
50 |
-
<p>Finally, you can launch the game and enjoy unlimited gems and coins, and all the features of Cricket League Mod Apk. You will see a welcome screen with some instructions and tips. You can skip them or read them as you wish. Then, you can create your profile, choose your team, and start playing the game.</p>
|
51 |
-
<h2>Why Should You Play Cricket League Mod Apk?</h2>
|
52 |
-
<p>Cricket League Mod Apk is a great game for cricket lovers who want to have more fun and excitement in their mobile gaming. Here are some of the pros and cons of playing this game:</p>
|
53 |
-
<h3>Pros of Cricket League Mod Apk</h3>
|
54 |
-
<h4>Free and Easy to Play</h4>
|
55 |
-
<p>One of the best things about Cricket League Mod Apk is that it is free and easy to play. You don't have to spend any money to download or play this game. You also don't have to worry about any complicated controls or rules. The game has a simple and intuitive interface that will guide you through the game. You can also adjust the settings according to your preferences and comfort level.</p>
|
56 |
-
<h4>Fun and Engaging Gameplay</h4>
|
57 |
-
<p>Another great thing about Cricket League Mod Apk is that it has a fun and engaging gameplay that will keep you hooked for hours. The game has various modes, tournaments, challenges, and missions that will test your skills and strategy. You can also play with other players online or offline, and chat with them using the in-game chat feature. The game also has a leaderboard and achievements system that will motivate you to improve your performance and rank.</p>
|
58 |
-
<h4>Customizable and Diverse Options</h4>
|
59 |
-
<p>A third great thing about Cricket League Mod Apk is that it has customizable and diverse options that will make your game more enjoyable and unique. You can choose from hundreds of players and teams, each with their own stats and abilities. You can also customize your jerseys, logos, bats, balls, etc. You can also play in different stadiums, weather conditions, pitch types, etc.</p>
|
60 |
-
<h3>Cons of Cricket League Mod Apk</h3>
|
61 |
-
<h4>Requires Internet Connection</h4>
|
62 |
-
<p>One of the drawbacks of Cricket League Mod Apk is that it requires an internet connection to play online mode or update the game. This can be a problem if you have a slow or unstable internet connection, or if you don't have access to Wi-Fi or mobile data. You may experience lagging, crashing, or loading issues while playing the game.</p>
|
63 |
-
<h4>May Contain Ads and Bugs</h4>
|
64 |
-
<p>Another drawback of Cricket League Mod Apk is that it may contain ads and bugs that can affect your gaming experience. Since this is a mod apk file, it may not be compatible with some devices or versions of Android. It may also have some glitches or errors that can cause the game to freeze or crash. You may also see some ads popping up while playing the game, which can be annoying or distracting.</p>
|
65 |
-
<h2>Conclusion</h2>
|
66 |
-
<p>Cricket League Mod Apk is a fantastic cricket simulation game that will give you unlimited gems and coins, and access to all the players and teams in the game. You can also enjoy realistic cricket matches and leagues with your friends online or offline. The game has high-quality graphics, sound effects, animations, and physics that will make you feel like you are playing on a real pitch. The game also has various modes, such as T20, ODI, Test, World Cup, IPL, PSL, BBL, CPL, and more.</p>
|
67 |
-
<p>If you are a cricket fan who wants to have more fun and excitement in your mobile gaming, then you should definitely try Cricket League Mod Ap k. However, you should also be aware of the drawbacks of this game, such as requiring an internet connection, and containing ads and bugs. You should also be careful about downloading and installing mod apk files from unknown sources, as they may contain viruses or malware that can harm your device or data.</p>
|
68 |
-
<p>We hope this article has helped you learn more about Cricket League Mod Apk, and how to download and enjoy it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
69 |
-
<h2>FAQs</h2>
|
70 |
-
<p>Here are some of the frequently asked questions about Cricket League Mod Apk:</p>
|
71 |
-
<ol>
|
72 |
-
<li>Is Cricket League Mod Apk safe to download and install?</li>
|
73 |
-
<p>Cricket League Mod Apk is generally safe to download and install, as long as you get it from a reliable source. However, you should always scan the file with an antivirus or malware detector before installing it, and backup your data before playing the game. You should also avoid giving any personal or sensitive information to the game or its developers.</p>
|
74 |
-
<li>Is Cricket League Mod Apk legal to play?</li>
|
75 |
-
<p>Cricket League Mod Apk is not legal to play, as it violates the terms and conditions of the original game and its developers. By playing this game, you are infringing on the intellectual property rights of the original game and its developers. You may also face legal consequences if you are caught playing this game by the authorities or the original game developers.</p>
|
76 |
-
<li>How can I update Cricket League Mod Apk?</li>
|
77 |
-
<p>Cricket League Mod Apk does not have an official update system, as it is not supported by the original game developers. You may have to download and install a new mod apk file every time there is a new version of the original game. However, this may not work if the new version of the original game has some changes or features that are incompatible with the mod apk file.</p>
|
78 |
-
<li>Can I play Cricket League Mod Apk with my friends?</li>
|
79 |
-
<p>Yes, you can play Cricket League Mod Apk with your friends online or offline. You can join or create online matches with other players from around the world, or use local multiplayer mode to play with your friends using Bluetooth or Wi-Fi. However, you may not be able to play with your friends who are using the original game, as they may have different versions or features than you.</p>
|
80 |
-
<li>Can I play Cricket League Mod Apk on PC or iOS devices?</li>
|
81 |
-
<p>No, you cannot play Cricket League Mod Apk on PC or iOS devices, as it is only designed for Android devices. You may be able to use some emulators or converters to run this game on PC or iOS devices, but they may not work properly or cause some issues. We do not recommend using any emulators or converters to play this game on PC or iOS devices.</p>
|
82 |
-
</ol></p> 197e85843d<br />
|
83 |
-
<br />
|
84 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Burn Belly Fat and Sculpt Your 6 Pack Abs with This Amazing APK.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>6 Pack Abs APK - What Is It and How Does It Work?</h1>
|
3 |
-
<p>If you want to get six pack abs without going to the gym or spending money on expensive equipment, you might want to try 6 pack abs apk. This is a free app that provides you with a 30-day workout plan that targets your upper and lower abdominal muscles. The app also features animations and video guides that show you how to perform each exercise correctly and effectively. You can also customize your workout reminders and track your progress automatically.</p>
|
4 |
-
<h2>6 pack abs apk</h2><br /><p><b><b>Download Zip</b> <a href="https://jinyurl.com/2uNTnl">https://jinyurl.com/2uNTnl</a></b></p><br /><br />
|
5 |
-
<p>But why should you care about getting six pack abs in the first place? Well, there are many benefits of having six pack abs, both physical and psychological. Here are some of them:</p>
|
6 |
-
<ul>
|
7 |
-
<li><strong>Improved posture and core strength.</strong> Having six pack abs means having strong abdominal muscles that support your spine and pelvis. This can help you improve your posture, prevent lower back pain, and enhance your core strength.</li>
|
8 |
-
<li><strong>Better sporting performance and agility.</strong> Having six pack abs also means having more power transfer between your upper and lower body. This can help you improve your sporting performance, agility, balance, coordination, and speed.</li>
|
9 |
-
<li><strong>Increased basal metabolic rate and fat burning.</strong> Having six pack abs also means having more muscle mass in your body. This can help you increase your basal metabolic rate, which is the amount of calories you burn at rest. This can also help you burn more fat and reduce your body fat percentage, which is necessary to reveal your six pack abs.</li>
|
10 |
-
</ul>
|
11 |
-
<p>As you can see, 6 pack abs apk is a great app that can help you get six pack abs and enjoy many benefits. But before you download it and start working out, there are some myths about six pack abs that you need to be aware of.</p>
|
12 |
-
<h2>The Myths About 6 Pack Abs APK</h2>
|
13 |
-
<p>There are many myths and misconceptions about six pack abs that can prevent you from achieving your goal or even harm your health. Here are some of the most common ones and why they are not true:</p>
|
14 |
-
<p>6 pack abs workout app download<br />
|
15 |
-
best 6 pack abs exercises apk<br />
|
16 |
-
how to get 6 pack abs in 30 days apk<br />
|
17 |
-
6 pack abs home workout apk<br />
|
18 |
-
6 pack abs trainer pro apk<br />
|
19 |
-
6 pack abs photo editor apk<br />
|
20 |
-
6 pack abs challenge apk<br />
|
21 |
-
6 pack abs diet plan apk<br />
|
22 |
-
6 pack abs video tutorial apk<br />
|
23 |
-
6 pack abs fitness apk<br />
|
24 |
-
6 pack abs yoga apk<br />
|
25 |
-
6 pack abs simulator apk<br />
|
26 |
-
6 pack abs bodybuilding apk<br />
|
27 |
-
6 pack abs tips and tricks apk<br />
|
28 |
-
6 pack abs motivation apk<br />
|
29 |
-
6 pack abs transformation apk<br />
|
30 |
-
6 pack abs calculator apk<br />
|
31 |
-
6 pack abs anatomy apk<br />
|
32 |
-
6 pack abs wallpaper apk<br />
|
33 |
-
6 pack abs quiz apk<br />
|
34 |
-
6 pack abs game apk<br />
|
35 |
-
6 pack abs music apk<br />
|
36 |
-
6 pack abs jokes apk<br />
|
37 |
-
6 pack abs stickers apk<br />
|
38 |
-
6 pack abs emoji apk<br />
|
39 |
-
6 pack abs memes apk<br />
|
40 |
-
6 pack abs quotes apk<br />
|
41 |
-
6 pack abs facts apk<br />
|
42 |
-
6 pack abs myths apk<br />
|
43 |
-
6 pack abs secrets apk<br />
|
44 |
-
6 pack abs stories apk<br />
|
45 |
-
6 pack abs testimonials apk<br />
|
46 |
-
6 pack abs reviews apk<br />
|
47 |
-
6 pack abs ratings apk<br />
|
48 |
-
6 pack abs comparison apk<br />
|
49 |
-
6 pack abs alternatives apk<br />
|
50 |
-
6 pack abs benefits apk<br />
|
51 |
-
6 pack abs results apk<br />
|
52 |
-
6 pack abs progress apk<br />
|
53 |
-
6 pack abs goals apk</p>
|
54 |
-
<h3>You Need a Fat Burner or a Low Carb Diet</h3>
|
55 |
-
<p>Some people think that they need to take a fat burner supplement or follow a low carb diet to get six pack abs. This is not true. Fat burners are not effective or safe, as they can cause side effects such as insomnia, anxiety, high blood pressure, and liver damage. Low carb diets are also not necessary or sustainable, as they can cause fatigue, mood swings, muscle loss, and nutrient deficiencies. The best way to get six pack abs is to eat a healthy, balanced diet that provides enough calories and macronutrients (protein, carbs, and fats) for your body and activity level.</p>
|
56 |
-
<h3>You Can Crunch Your Way to a Six Pack</h3>
|
57 |
-
<p>Some people think that they can crunch their way to a six pack by doing hundreds of crunches every day. This is not true. Crunches are not enough to reveal your six pack abs, as they only target one part of your abdominal muscles (the rectus abdominis). To get six pack abs, you need to work out all the muscles in your core, including the obliques, the transverse abdominis, and the lower back. You also need to reduce your body fat percentage by doing cardio and strength training exercises that burn calories and build muscle mass.</p>
|
58 |
-
<h3>You Must Train Abs Every Day or Use Special Equipment</h3>
|
59 |
-
<p>Some people think that they must train their abs every day or use special equipment such as ab rollers, ab machines, or ab belts to get six pack abs. This is not true. Training your abs every day is not necessary or beneficial, as it can lead to overtraining, injury, and muscle imbalance. Your abs need rest and recovery just like any other muscle group. You should train your abs two to three times a week with adequate rest days in between. Using special equipment is also not required or effective, as they can limit your range of motion, isolate your muscles, and create false expectations. The best way to train your abs is to use bodyweight exercises that challenge your core stability, strength, and endurance.</p>
|
60 |
-
<h2>The Tips for Using 6 Pack Abs APK Effectively</h2>
|
61 |
-
<p>Now that you know what 6 pack abs apk is and how it works, and what are the myths about six pack abs that you should avoid, here are some tips for using 6 pack abs apk effectively:</p>
|
62 |
-
<h3>Follow the Workout Plan Consistently</h3>
|
63 |
-
<p>The first tip is to follow the workout plan provided by 6 pack abs apk consistently. The app offers a 30-day workout plan that consists of three levels of difficulty (beginner, intermediate, and advanced) and various exercises for the upper and lower abs. Each workout takes about 10 minutes and can be done at home or anywhere else. The app also provides animations and video guides that show you how to perform each exercise correctly and effectively. To get the best results from 6 pack abs apk, you should follow the workout plan without skipping any days or sessions. Consistency is key to getting results.</p>
|
64 |
-
<h3>Eat a Healthy, Balanced Diet</h3>
|
65 |
-
<p>The second tip is to eat a healthy, balanced diet that supports your workout plan and your goal of getting six pack abs. As mentioned earlier, nutrition is important for muscle growth and fat loss. You should eat enough calories and macronutrients (protein, carbs, and fats) for your body and activity level. You should also eat foods that are rich in vitamins, minerals, antioxidants, and fiber. Some examples of healthy foods are lean meats, eggs, fish, dairy products, nuts, seeds, beans, fruits, vegetables, whole grains, and healthy oils. You should also avoid foods that are high in sugar, salt, trans fats, and processed ingredients. Some examples of unhealthy foods are candy, soda, chips, cookies, cakes, fast food, and fried food. Eating a healthy, balanced diet can help you get six pack abs by providing your body with the nutrients it needs to function properly and recover from your workouts.</p>
|
66 |
-
<h3>Drink Plenty of Water and Get Enough Sleep</h3>
|
67 |
-
<p>The third tip is to drink plenty of water and get enough sleep to support your workout plan and your goal of getting six pack abs. Water is essential for your body, as it helps you stay hydrated, regulate your body temperature, flush out toxins, transport nutrients, and lubricate your joints. You should drink at least eight glasses of water a day, or more if you exercise or sweat a lot. Water can also help you get six pack abs by suppressing your appetite, boosting your metabolism, and preventing water retention. Sleep is also vital for your body, as it helps you restore your energy, repair your muscles, consolidate your memory, and regulate your hormones. You should get at least seven to nine hours of sleep a night, or more if you need it. Sleep can also help you get six pack abs by reducing your stress levels, improving your mood, enhancing your performance, and preventing cravings.</p>
|
68 |
-
<h1>Conclusion</h1>
|
69 |
-
<p>6 pack abs apk is a free app that can help you get six pack abs in 30 days by providing you with a workout plan that targets your upper and lower abdominal muscles. The app also features animations and video guides that show you how to perform each exercise correctly and effectively. You can also customize your workout reminders and track your progress automatically.</p>
|
70 |
-
<p>Getting six pack abs can provide you with many benefits, such as improved posture and core strength, better sporting performance and agility, increased basal metabolic rate and fat burning, and more confidence and self-esteem. However, to get six pack abs, you need to avoid some myths and misconceptions that can hinder your progress or harm your health. These include the myths that you need a fat burner or a low carb diet, that you can crunch your way to a six pack, and that you must train abs every day or use special equipment.</p>
|
71 |
-
<p>To use 6 pack abs apk effectively, you need to follow some tips that can help you achieve your goal faster and easier. These include the tips of following the workout plan consistently, eating a healthy, balanced diet, drinking plenty of water and getting enough sleep.</p>
|
72 |
-
<p>If you follow these tips and use 6 pack abs apk regularly, you will be able to get six pack abs in no time. So what are you waiting for? Download 6 pack abs apk today and start working on your dream body!</p>
|
73 |
-
<h2>FAQs</h2>
|
74 |
-
<p>Here are some frequently asked questions about 6 pack abs apk:</p>
|
75 |
-
<ol>
|
76 |
-
<li><strong>How do I download 6 pack abs apk?</strong></li>
|
77 |
-
<p>You can download 6 pack abs apk from the Google Play Store or the App Store for free. Just search for "6 pack abs apk" and install it on your device.</p>
|
78 |
-
<li><strong>How do I use 6 pack abs apk?</strong></li>
|
79 |
-
<p>You can use 6 pack abs apk by following the instructions on the app. First, you need to choose your level of difficulty (beginner, intermediate, or advanced). Then, you need to start the workout plan that consists of various exercises for the upper and lower abs. You can also set reminders for your workouts and track your progress automatically.</p>
|
80 |
-
<li><strong>How long does it take to see results with 6 pack abs apk?</strong></li>
|
81 |
-
<p>The time it takes to see results with 6 pack abs apk depends on several factors, such as your starting point, your diet, your exercise routine, your genetics, and your commitment. However, if you follow the workout plan consistently, eat a healthy, balanced diet, drink plenty of water and get enough sleep, you should be able to see some results in as little as four weeks. Of course, the more you stick to the plan and the more you challenge yourself, the faster and better your results will be.</p>
|
82 |
-
<li><strong>Is 6 pack abs apk safe and effective?</strong></li>
|
83 |
-
<p>Yes, 6 pack abs apk is safe and effective, as it is based on scientific research and proven methods. The app provides you with a workout plan that targets your abdominal muscles with various exercises that are suitable for different levels of difficulty. The app also provides you with animations and video guides that show you how to perform each exercise correctly and effectively. The app also allows you to customize your workout reminders and track your progress automatically. The app does not require any special equipment or supplements, and it does not promote any unhealthy or unrealistic practices.</p>
|
84 |
-
<li><strong>Can I use 6 pack abs apk with other fitness apps or programs?</strong></li>
|
85 |
-
<p>Yes, you can use 6 pack abs apk with other fitness apps or programs, as long as they are compatible and complementary. For example, you can use 6 pack abs apk with a running app or a yoga app to add some cardio and flexibility training to your routine. You can also use 6 pack abs apk with a weight lifting app or a bodyweight app to add some strength and resistance training to your routine. However, you should not use 6 pack abs apk with another ab workout app or program, as this can lead to overtraining, injury, and muscle imbalance. You should also not use 6 pack abs apk with an app or program that contradicts or conflicts with the principles and guidelines of 6 pack abs apk.</p>
|
86 |
-
<li><strong>What if I have questions or feedback about 6 pack abs apk?</strong></li>
|
87 |
-
<p>If you have any questions or feedback about 6 pack abs apk, you can contact the developers of the app through their email address or their social media accounts. You can also leave a review or a rating on the Google Play Store or the App Store to share your experience and opinion with other users. The developers of 6 pack abs apk are always happy to hear from their users and to improve their app based on their suggestions and feedback.</p> 197e85843d<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Topaz AI and Learn How to Use It to Improve Your Image Quality in Minutes.md
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Topaz AI: How to Enhance Your Photos and Videos with Artificial Intelligence</h1>
|
3 |
-
<p>Do you want to improve the quality of your photos and videos with the power of artificial intelligence? If so, you should download Topaz AI, a suite of software products that use cutting-edge image enhancement technology to magically transform your images and videos. In this article, you will learn what Topaz AI is, how it works, how to download and install it on your computer, how to use it from your image editor, and how to apply it to different scenarios. By the end of this article, you will be able to enhance your photos and videos like never before with Topaz AI.</p>
|
4 |
-
<h2>Topaz Photo AI: Maximize Your Image Quality on Autopilot</h2>
|
5 |
-
<p>Topaz Photo AI is a collection of four products that use artificial intelligence to sharpen, remove noise, and increase the resolution of your photos. These products are:</p>
|
6 |
-
<h2>download topaz ai</h2><br /><p><b><b>Download Zip</b> ✦ <a href="https://jinyurl.com/2uNNj5">https://jinyurl.com/2uNNj5</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li><strong>Gigapixel AI</strong>: This product allows you to upscale your images by up to 6x while increasing actual resolution and real detail. You can use it to enlarge your photos for printing, cropping, or restoring old photos.</li>
|
9 |
-
<li><strong>DeNoise AI</strong>: This product allows you to remove noise from your images while preserving detail and color. You can use it to shoot anywhere in any light without worrying about noise.</li>
|
10 |
-
<li><strong>Sharpen AI</strong>: This product allows you to sharpen your images while keeping them natural. You can use it to reverse motion and focus blur, or simply enhance the sharpness of your photos.</li>
|
11 |
-
<li><strong>Video Enhancer AI</strong>: This product allows you to upscale, denoise, sharpen, and deinterlace your videos with stunning results. You can use it to convert SD to HD or HD to 4k, or simply improve the quality of your videos.</li>
|
12 |
-
</ul>
|
13 |
-
<p>Topaz Photo AI uses deep learning algorithms that have been trained on millions of data points to understand what image quality means. Unlike regular image processing filters that often remove details and boost noise/artifacts, Topaz Photo AI enhances image quality by analyzing and enhancing the most important aspects of each image. You can use Topaz Photo AI as a standalone application or as a plug-in for your favorite image editor.</p>
|
14 |
-
<h2>Topaz Video AI: Create Naturally Better Video Quality with AI</h2>
|
15 |
-
<p>Topaz Video AI is a product that uses artificial intelligence to upscale, denoise, sharpen, and deinterlace your videos. It is based on the same technology as Topaz Photo AI, but optimized for video processing. You can use Topaz Video AI to:</p>
|
16 |
-
<ul>
|
17 |
-
<li><strong>Upscale your videos</strong>: You can increase the resolution of your videos by up to 4x while preserving or enhancing the original quality. You can use it to convert SD to HD or HD to 4k, or simply make your videos look better on larger screens.</li>
|
18 |
-
<li><strong>Denoise your videos</strong>: You can remove visible image noise from your videos while retaining details and colors. You can use it to improve the quality of videos shot in low-light conditions, or reduce the compression artifacts from online videos.</li>
|
19 |
-
<li><strong>Sharpen your videos</strong>: You can increase the perceived sharpness of your videos by applying a natural-looking sharpening effect. You can use it to make your videos look more crisp and clear, or correct the softness caused by upscaling or noise reduction.</li>
|
20 |
-
<li><strong>Deinterlace your videos</strong>: You can convert interlaced videos to progressive ones while preserving image definition and reducing artifacts. You can use it to improve the quality of videos from older sources, such as DVDs or TV broadcasts.</li>
|
21 |
-
</ul>
|
22 |
-
<p>Topaz Video AI uses deep learning algorithms that have been trained on thousands of hours of video data to understand what video quality means. Unlike regular video processing filters that often introduce artifacts and distortions, Topaz Video AI enhances video quality by analyzing and improving the most important aspects of each frame. You can use Topaz Video AI as a standalone application or as an external editor for your favorite video editor.</p>
|
23 |
-
<h2>How to Download and Install Topaz AI on Your Computer</h2>
|
24 |
-
<p>If you want to download and install Topaz AI on your computer, you need to follow these steps:</p>
|
25 |
-
<ol>
|
26 |
-
<li><strong>Visit the official website of Topaz Labs</strong>: Go to <a href="">https://topazlabs.com/</a> and click on the "Download" button at the top right corner of the page.</li>
|
27 |
-
<li><strong>Select the products you want to download</strong>: You will see a list of all the products available from Topaz Labs, including Topaz Photo AI and Topaz Video AI. You can select one or more products by clicking on the checkboxes next to them. You can also download a free trial version of each product by clicking on the "Try Free" button below them.</li>
|
28 |
-
<li><strong>Enter your email address and password</strong>: If you already have an account with Topaz Labs, you can enter your email address and password to log in. If you don't have an account, you can create one by clicking on the "Create Account" button and filling in the required information.</li>
|
29 |
-
<li><strong>Download the installer file</strong>: After logging in or creating an account, you will see a download link for each product you selected. Click on the link to download the installer file for your operating system (Windows or Mac).</li>
|
30 |
-
<li><strong>Run the installer file</strong>: After downloading the installer file, locate it on your computer and double-click on it to run it. Follow the instructions on the screen to install the product on your computer.</li>
|
31 |
-
<li><strong>Activate the product</strong>: After installing the product, launch it from your desktop or start menu. You will see a window asking you to activate the product with your license key. If you have purchased the product, you can enter your license key in the field provided and click on "Activate". If you are using a free trial version, you can click on "Start Trial" to activate it for 30 days.</li>
|
32 |
-
</ol>
|
33 |
-
<p>Congratulations! You have successfully downloaded and installed Topaz AI on your computer. Now you can start using it to enhance your photos and videos with artificial intelligence.</p>
|
34 |
-
<h2>How to Access Topaz AI from Your Image Editor</h2>
|
35 |
-
<p>If you want to access Topaz AI from your image editor, such as Photoshop, Lightroom, or other compatible editors, you need to follow these steps:</p>
|
36 |
-
<p>download topaz labs photo ai<br />
|
37 |
-
download topaz video ai for windows<br />
|
38 |
-
download topaz gigapixel ai free trial<br />
|
39 |
-
download topaz sharpen ai mac<br />
|
40 |
-
download topaz denoise ai crack<br />
|
41 |
-
download topaz photo ai full version<br />
|
42 |
-
download topaz video ai latest version<br />
|
43 |
-
download topaz gigapixel ai portable<br />
|
44 |
-
download topaz sharpen ai review<br />
|
45 |
-
download topaz denoise ai coupon<br />
|
46 |
-
download topaz photo ai tutorial<br />
|
47 |
-
download topaz video ai system requirements<br />
|
48 |
-
download topaz gigapixel ai update<br />
|
49 |
-
download topaz sharpen ai before and after<br />
|
50 |
-
download topaz denoise ai vs lightroom<br />
|
51 |
-
download topaz photo ai bundle<br />
|
52 |
-
download topaz video ai reddit<br />
|
53 |
-
download topaz gigapixel ai license key<br />
|
54 |
-
download topaz sharpen ai plugin<br />
|
55 |
-
download topaz denoise ai presets<br />
|
56 |
-
download topaz photo ai online<br />
|
57 |
-
download topaz video ai alternative<br />
|
58 |
-
download topaz gigapixel ai comparison<br />
|
59 |
-
download topaz sharpen ai serial number<br />
|
60 |
-
download topaz denoise ai settings<br />
|
61 |
-
download topaz photo ai software<br />
|
62 |
-
download topaz video ai beta<br />
|
63 |
-
download topaz gigapixel ai tutorial<br />
|
64 |
-
download topaz sharpen ai standalone<br />
|
65 |
-
download topaz denoise ai trial<br />
|
66 |
-
download topaz photo ai app<br />
|
67 |
-
download topaz video ai blog<br />
|
68 |
-
download topaz gigapixel ai coupon code<br />
|
69 |
-
download topaz sharpen ai discount code<br />
|
70 |
-
download topaz denoise ai manual<br />
|
71 |
-
download topaz photo ai for android<br />
|
72 |
-
download topaz video ai for macos<br />
|
73 |
-
download topaz gigapixel ai for photoshop<br />
|
74 |
-
download topaz sharpen ai for lightroom<br />
|
75 |
-
download topaz denoise ai for premiere pro<br />
|
76 |
-
download topaz photo ai guide<br />
|
77 |
-
download topaz video ai help center<br />
|
78 |
-
download topaz gigapixel ai installation guide<br />
|
79 |
-
download topaz sharpen ai keygen<br />
|
80 |
-
download topaz denoise ai license code<br />
|
81 |
-
download topaz photo ai price<br />
|
82 |
-
download topaz video ai release notes<br />
|
83 |
-
download topaz gigapixel ai support forum</p>
|
84 |
-
<ol>
|
85 |
-
<li><strong>Install Topaz AI as a plug-in or external editor</strong>: When you install Topaz AI on your computer, it will automatically detect and install itself as a plug-in or external editor for some of the most popular image editors, such as Photoshop and Lightroom. If you want to install it for other editors, you can manually install it by following the instructions on the <a href="">Topaz Labs support page</a>.</li>
|
86 |
-
<li><strong>Open your image in your image editor</strong>: Launch your image editor and open the image you want to enhance with Topaz AI.</li>
|
87 |
-
<li><strong>Access Topaz AI from your image editor</strong>: Depending on your image editor, you can access Topaz AI in different ways. For example, in Photoshop, you can go to Filter > Topaz Labs > and select the product you want to use. In Lightroom, you can right-click on the image and go to Edit In > and select the product you want to use. For other editors, you can refer to the <a href="">Topaz Labs support page</a> for more details.</li>
|
88 |
-
<li><strong>Edit your image with Topaz AI</strong>: After accessing Topaz AI from your image editor, you will see a new window with the interface of the product you selected. You can use the tools and settings on the left panel to adjust the parameters of the enhancement, and preview the results on the main panel. You can also compare the before and after images by using the buttons on the bottom panel.</li>
|
89 |
-
<li><strong>Save and return to your image editor</strong>: After editing your image with Topaz AI, you can save and return to your image editor by clicking on the "Apply" button on the top right corner of the window. Your image will be updated with the changes made by Topaz AI.</li>
|
90 |
-
</ol>
|
91 |
-
<p>That's it! You have successfully accessed and used Topaz AI from your image editor. Now you can enjoy the benefits of artificial intelligence for your photos.</p>
|
92 |
-
<h2>How to Use Topaz AI to Enhance Your Photos and Videos</h2>
|
93 |
-
<p>If you want to use Topaz AI to enhance your photos and videos, you need to follow these steps:</p>
|
94 |
-
<ol>
|
95 |
-
<li><strong>Select the product that suits your needs</strong>: Depending on what you want to achieve with your photos or videos, you can choose from different products within Topaz Photo AI or Topaz Video AI. For example, if you want to upscale your images, you can use Gigapixel AI. If you want to remove noise from your videos, you can use Video Enhancer AI.</li>
|
96 |
-
<li><strong>Open your photo or video in Topaz AI</strong>: You can open your photo or video in Topaz AI either as a standalone application or as a plug-in or external editor for your image or video editor. See the previous section for more details on how to access Topaz AI from your editor.</li>
|
97 |
-
<li><strong>Select the mode that suits your needs</strong>: Depending on the product you are using, you can select from different modes that offer different levels of enhancement or customization. For example, in Gigapixel AI, you can choose from Auto, Manual, or Custom modes. In Video Enhancer AI, you can choose from Standard Quality, High Quality, or Custom Quality modes.</li>
|
98 |
-
<li><strong>Adjust the settings that suit your needs</strong>: Depending on the mode and product you are using, you can adjust various settings that affect the outcome of the enhancement. For example, in Gigapixel AI, you can adjust the scale factor, output size, noise reduction, face refinement, and more. In Video Enhancer AI, you can adjust the output format, frame rate, bitrate, and more.</li>
|
99 |
-
<li><strong>Preview and compare the results</strong>: Depending on the product you are using, you can preview and compare the results of the enhancement before applying it. For example, in Gigapixel AI, you can zoom in and out of the image and see how it looks at different resolutions. In Video Enhancer AI, you can play back a short clip of the video and see how it looks at different qualities.</li>
|
100 |
-
<li><strong>Apply and save the results</strong>: After previewing and comparing the results, you can apply and save them by clicking on the "Apply" or "Save" button on the top right corner of the window. Your photo or video will be enhanced and saved with Topaz AI.</li>
|
101 |
-
</ol>
|
102 |
-
<p>Congratulations! You have successfully used Topaz AI to enhance your photos or videos with artificial intelligence. Now you can enjoy the improved quality of your images and videos.</p>
|
103 |
-
<h2>Conclusion: Why You Should Download Topaz AI Today</h2>
|
104 |
-
<p>Topaz AI is a suite of software products that use artificial intelligence to enhance your photos and videos with amazing results. With Topaz AI, you can:</p>
|
105 |
-
<ul>
|
106 |
-
<li><strong>Upscale your images and videos by up to 6x or 4x respectively while increasing actual resolution and real detail.</strong></li>
|
107 |
-
<li><strong>Remove noise from your images and videos while preserving detail and color in any lighting condition.</strong></li>
|
108 |
-
<li><strong> Sharpen your images and videos while keeping them natural and reversing motion and focus blur.</strong></li>
|
109 |
-
<li><strong>Deinterlace your videos while preserving image definition and reducing artifacts.</strong></li>
|
110 |
-
<li><strong>Use Topaz AI as a standalone application or as a plug-in or external editor for your favorite image or video editor.</strong></li>
|
111 |
-
</ul>
|
112 |
-
<p>Topaz AI is easy to use, fast, and reliable. It uses deep learning algorithms that have been trained on millions of data points to understand and improve image and video quality. It offers different modes and settings that allow you to customize the enhancement according to your needs and preferences. It also lets you preview and compare the results before applying them, so you can see the difference for yourself.</p>
|
113 |
-
<p>If you want to take your photos and videos to the next level, you should download Topaz AI today. You can try it for free for 30 days, or buy it for a reasonable price. You will be amazed by the results you can achieve with Topaz AI.</p>
|
114 |
-
<h2>FAQs: Frequently Asked Questions about Topaz AI</h2>
|
115 |
-
<p>Here are some of the most common questions and answers about Topaz AI:</p>
|
116 |
-
<ol>
|
117 |
-
<li><strong>What are the system requirements for Topaz AI?</strong></li>
|
118 |
-
<p>Topaz AI requires a Windows or Mac computer with at least 8 GB of RAM, 2 GB of VRAM, and an OpenGL 3.3 compatible graphics card. For optimal performance, it is recommended to have 16 GB of RAM, 4 GB of VRAM, and an NVIDIA or AMD graphics card with CUDA or OpenCL support.</p>
|
119 |
-
<li><strong>How long does it take to process an image or video with Topaz AI?</strong></li>
|
120 |
-
<p>The processing time depends on several factors, such as the size and resolution of the image or video, the mode and settings of the product, and the speed and power of your computer. Generally, it takes a few seconds to a few minutes to process an image, and a few minutes to a few hours to process a video.</p>
|
121 |
-
<li><strong>Can I batch process multiple images or videos with Topaz AI?</strong></li>
|
122 |
-
<p>Yes, you can batch process multiple images or videos with Topaz AI. You can do this by selecting multiple files in the file browser of the standalone application, or by using the batch processing feature of your image or video editor.</p>
|
123 |
-
<li><strong>Can I use Topaz AI on my smartphone or tablet?</strong></li>
|
124 |
-
<p>No, Topaz AI is not available for mobile devices. It is only compatible with Windows or Mac computers.</p>
|
125 |
-
<li><strong>Where can I find more information and support for Topaz AI?</strong></li>
|
126 |
-
<p>You can find more information and support for Topaz AI on the <a href="">Topaz Labs website</a>. There you can access the user guides, tutorials, forums, blogs, and customer service for each product.</p>
|
127 |
-
</ol></p> 197e85843d<br />
|
128 |
-
<br />
|
129 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Everskies Oyna A Fun and Creative Way to Express Yourself Online.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Everskies Oyna: A Guide to the Virtual Dress Up Game</h1>
|
3 |
-
<p>Do you love dressing up, designing clothes, and meeting new people? If so, you might want to try Everskies Oyna, a virtual dress up game that lets you create your own avatar, design your own fashion items, participate in outfit competitions and events, earn money and XP, and find people with similar interests and meet new friends. Everskies Oyna is a fun and creative game for everyone who enjoys fashion, art, and socializing. In this article, we will show you how to play Everskies Oyna and give you some tips and tricks to make the most of your experience.</p>
|
4 |
-
<h2>How to Create Your Own Avatar in Everskies</h2>
|
5 |
-
<p>One of the first things you need to do in Everskies Oyna is to create your own avatar. Your avatar is your virtual representation in the game world, and you can customize it to match your personality and style. Here are the steps to create your own avatar in Everskies:</p>
|
6 |
-
<h2>everskies oyna</h2><br /><p><b><b>Download</b> 🆓 <a href="https://jinyurl.com/2uNSEY">https://jinyurl.com/2uNSEY</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li>Step 1: Choose your gender, skin tone, and facial features. You can choose from male or female avatars, and select from different skin tones, face shapes, eyebrows, noses, mouths, ears, freckles, moles, scars, etc.</li>
|
9 |
-
<li>Step 2: Customize your hair, eyes, and makeup. You can choose from different hair styles, colors, lengths, bangs, highlights, etc. You can also change your eye shape, color, size, lashes, etc. You can also apply different makeup products such as eyeshadow, eyeliner, mascara, blush, lipstick, etc.</li>
|
10 |
-
<li>Step 3: Dress up your avatar with different outfits, accessories, and shoes. You can choose from over 150000 items to dress up your avatar with different fashion outfits, accessories, and shoes. You can mix and match different items to create your own unique look. You can also save your outfits for later use or share them with other users.</li>
|
11 |
-
</ul>
|
12 |
-
<p>Creating your own avatar in Everskies Oyna is easy and fun. You can express yourself through your avatar and show off your style to the world.</p>
|
13 |
-
<h2>How to Design Your Own Fashion Items in Everskies</h2>
|
14 |
-
<p>Another cool feature of Everskies Oyna is that you can design your own fashion items and sell them in the shop or trade them with other users. You can create your own clothing, accessories, shoes, hair, makeup, etc. and show off your creativity and talent. Here are the steps to design your own fashion items in Everskies:</p>
|
15 |
-
<ul>
|
16 |
-
<li>Step 1: Go to the Creative tab and select an item template. You can choose from different categories such as tops, bottoms, dresses, jackets, hats, bags, jewelry, etc. You can also filter by gender, style, season, etc.</li>
|
17 |
-
<li>Step 2: Use the drawing tools and filters to create your own design. You can use different tools such as pencil, brush, eraser, fill, color picker, etc. to draw your design on the item template. You can also use different filters such as hue, saturation, brightness, contrast, etc. to adjust the color and tone of your design.</li>
|
18 |
-
<li>Step 3: Save and submit your item for approval. You can name your item, add a description, and set a price for it. You can also preview how it looks on different avatars. Once you are happy with your design, you can save it and submit it for approval. The approval process may take up to 24 hours, and you will be notified if your item is accepted or rejected.</li>
|
19 |
-
</ul>
|
20 |
-
<p>Designing your own fashion items in Everskies Oyna is a great way to unleash your inner designer and earn some money and XP. You can also get feedback from other users and improve your skills.</p>
|
21 |
-
<h2>How to Participate in Outfit Competitions and Events in Everskies</h2>
|
22 |
-
<p>If you want to challenge yourself and compete with other users in Everskies Oyna, you can participate in outfit competitions and events. Outfit competitions and events are themed contests that require you to create an outfit that matches the theme and criteria. You can win prizes such as money, XP, items, badges, etc. Here are the steps to participate in outfit competitions and events in Everskies:</p>
|
23 |
-
<ul>
|
24 |
-
<li>Step 1: Check the event calendar and the competition rules. You can find the event calendar on the homepage or on the Events tab. You can see the current and upcoming competitions and events, as well as their themes, criteria, deadlines, prizes, etc. You can also read the competition rules and guidelines before entering.</li>
|
25 |
-
<li>Step 2: Create an outfit that matches the theme and criteria. You can use any items that you own or buy from the shop to create your outfit. You can also use items that you designed yourself or traded with other users. Make sure that your outfit follows the theme and criteria of the competition or event.</li>
|
26 |
-
<li>Step 3: Vote for other entries and wait for the results. After you submit your entry, you can vote for other entries by giving them stars from one to five. You can vote for up to 10 entries per day. The more you vote, the more XP you earn. The results of the competition or event will be announced after the deadline, and you will be notified if you won any prizes.</li>
|
27 |
-
</ul>
|
28 |
-
<p>Participating in outfit competitions and events in Everskies Oyna is a fun and rewarding way to test your fashion sense and creativity. You can also get inspired by other users' outfits and discover new styles.</p>
|
29 |
-
<h2>How to Earn Money and XP in Everskies</h2>
|
30 |
-
<p>Money and XP are two important currencies in Everskies Oyna that allow you to buy items from the shop, level up your avatar, and access more features in the game. There are many ways to earn money and XP in Everskies Oyna, such as:</p>
|
31 |
-
<ul>
|
32 |
-
<li>Step 1: Play mini-games such as Memory, Tic Tac Toe, and Planet Popper. You can find the mini-games on the Games tab or on the homepage. You can play the mini-games for free or for a small fee, and you can win money and XP depending on your score and performance.</li>
|
33 |
-
<li>Step 2: Sell your fashion items in the shop or trade them with other users. You can sell your fashion items that you designed yourself or bought from the shop in the shop or in the trade center. You can set your own price for your items, and you can earn money and XP when someone buys or trades them.</li>
|
34 |
-
<li>Step 3: Join clubs, forums, chat rooms, and group messages to socialize and get tips. You can join or create clubs, forums, chat rooms, and group messages that match your interests and hobbies. You can interact with other users, share your outfits, give feedback, and have fun. You can also get tips and tricks from other users on how to play Everskies Oyna better.</li>
|
35 |
-
</ul>
|
36 |
-
<p>Earning money and XP in Everskies Oyna is easy and enjoyable. You can use your money and XP to buy more items, level up your avatar, and unlock more features in the game.</p>
|
37 |
-
<p>everskies oyna online<br />
|
38 |
-
everskies oyna ücretsiz<br />
|
39 |
-
everskies oyna mobil<br />
|
40 |
-
everskies oyna nasıl<br />
|
41 |
-
everskies oyna türkçe<br />
|
42 |
-
everskies oyna apk<br />
|
43 |
-
everskies oyna indir<br />
|
44 |
-
everskies oyna kaydol<br />
|
45 |
-
everskies oyna giriş yap<br />
|
46 |
-
everskies oyna hileleri<br />
|
47 |
-
everskies oyna kıyafet yarışması<br />
|
48 |
-
everskies oyna avatar oluştur<br />
|
49 |
-
everskies oyna mini oyunlar<br />
|
50 |
-
everskies oyna forumlar<br />
|
51 |
-
everskies oyna kulüpler<br />
|
52 |
-
everskies oyna sohbet odaları<br />
|
53 |
-
everskies oyna grup mesajları<br />
|
54 |
-
everskies oyna sanal para kazan<br />
|
55 |
-
everskies oyna tasarım sat<br />
|
56 |
-
everskies oyna öğeler al sat<br />
|
57 |
-
everskies oyna benzeri oyunlar<br />
|
58 |
-
everskies oyna yorumlar<br />
|
59 |
-
everskies oyna ipuçları<br />
|
60 |
-
everskies oyna rehberi<br />
|
61 |
-
everskies oyna sorunları çözümü<br />
|
62 |
-
everskies oyna güncellemeleri<br />
|
63 |
-
everskies oyna haberleri<br />
|
64 |
-
everskies oyna etkinlik takvimi<br />
|
65 |
-
everskies oyna özel setler<br />
|
66 |
-
everskies oyna starpass nedir<br />
|
67 |
-
everskies oyna burç yarışması<br />
|
68 |
-
everskies oyna doğum taşı yarışması<br />
|
69 |
-
everskies oyna doğa korkutucu yarışması<br />
|
70 |
-
everskies oyna gurur ayı kutlama<br />
|
71 |
-
everskies oyna satranç zamanı yarışması<br />
|
72 |
-
everskies oyna moda bebekleri yarışması<br />
|
73 |
-
everskies oyna deniz kızı belki yarışması<br />
|
74 |
-
everskies oyna zümrüt yarışması<br />
|
75 |
-
everskies oyna everchanted orman yarışması<br />
|
76 |
-
everskies oyna elmas yarışması<br />
|
77 |
-
everskies oyna altskyler yarışması<br />
|
78 |
-
everskies oyna akvaryum yarışması<br />
|
79 |
-
everskies oyna hissediyorum tatlı yarışması<br />
|
80 |
-
everskies oyna kırmızı halı yarışması<br />
|
81 |
-
everskies oyna dükkan güncellemeleri</p>
|
82 |
-
<h2>How to Find People with Similar Interests and Meet New Friends in Everskies</h2>
|
83 |
-
<p>One of the best things about Everskies Oyna is that you can find people with similar interests and meet new friends from all over the world. Everskies Oyna is a friendly and welcoming community that supports diversity and creativity. You can connect with other users who share your passion for fashion, art, music, games, etc. Here are the steps to find people with similar interests and meet new friends in Everskies:</p>
|
84 |
-
<ul>
|
85 |
-
<li>Step 1: Browse the clubs, forums, chat rooms, and group messages by category or keyword. You can find the clubs, forums, chat rooms, and group messages on the Community tab or on the homepage. You can browse them by category such as fashion, art, music, games, etc. or by keyword such as anime, kpop, harry potter, etc.</li>
|
86 |
-
<li>Step 2: Join or create a club, forum, chat room, or group message that suits your interests. You can join or create a club, forum, chat room, or group message that matches your interests and hobbies. You can also invite other users to join or create them with you.</li>
|
87 |
-
<li>Step 3: Interact with other users, share your outfits, give feedback, and have fun. You can interact with other users who are members of the same club, forum, chat room, or group message as you. You can share your outfits, give feedback, and have fun. You can also send private messages to other users, add them as friends, or block them if you don't like them.</li>
|
88 |
-
</ul>
|
89 |
-
<p>Finding people with similar interests and meeting new friends in Everskies Oyna is a wonderful way to expand your social circle and enjoy the game more. You can also learn from other users and discover new things.</p>
|
90 |
-
<h1>Conclusion: Everskies Oyna is a Fun and Creative Game for Everyone</h1>
|
91 |
-
<p>Everskies Oyna is a virtual dress up game that lets you create your own avatar, design your own fashion items, participate in outfit competitions and events, earn money and XP, and find people with similar interests and meet new friends. Everskies Oyna is a fun and creative game for everyone who loves fashion, art, and socializing. You can play Everskies Oyna for free on your browser or download the app on your mobile device. You can also follow Everskies Oyna on social media platforms such as Instagram, Twitter, Facebook, etc. to get the latest news and updates. If you are looking for a game that allows you to express yourself, show off your style, and make new friends, you should definitely try Everskies Oyna today!</p>
|
92 |
-
<h3>FAQs</h3>
|
93 |
-
<ul>
|
94 |
-
<li>Q: What is Everskies Oyna?</li>
|
95 |
-
<li>A: Everskies Oyna is a virtual dress up game that lets you create your own avatar, design your own fashion items, participate in outfit competitions and events, earn money and XP, and find people with similar interests and meet new friends.</li>
|
96 |
-
<li>Q: How can I play Everskies Oyna?</li>
|
97 |
-
<li>A: You can play Everskies Oyna for free on your browser or download the app on your mobile device. You can also follow Everskies Oyna on social media platforms such as Instagram, Twitter, Facebook, etc. to get the latest news and updates.</li>
|
98 |
-
<li>Q: How can I create my own avatar in Everskies Oyna?</li>
|
99 |
-
<li>A: You can create your own avatar in Everskies Oyna by choosing your gender, skin tone, facial features, hair, eyes, makeup, outfits, accessories, and shoes. You can customize your avatar to match your personality and style.</li>
|
100 |
-
<li>Q: How can I design my own fashion items in Everskies Oyna?</li>
|
101 |
-
<li>A: You can design your own fashion items in Everskies Oyna by going to the Creative tab and selecting an item template. You can use the drawing tools and filters to create your own design. You can save and submit your item for approval.</li>
|
102 |
-
<li>Q: How can I participate in outfit competitions and events in Everskies Oyna?</li>
|
103 |
-
<li>A: You can participate in outfit competitions and events in Everskies Oyna by checking the event calendar and the competition rules. You can create an outfit that matches the theme and criteria. You can vote for other entries and wait for the results.</li>
|
104 |
-
</ul></p> 197e85843d<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
import os.path as osp
|
3 |
-
|
4 |
-
|
5 |
-
def get_config(config_file):
|
6 |
-
assert config_file.startswith('configs/'), 'config file setting must start with configs/'
|
7 |
-
temp_config_name = osp.basename(config_file)
|
8 |
-
temp_module_name = osp.splitext(temp_config_name)[0]
|
9 |
-
config = importlib.import_module("configs.base")
|
10 |
-
cfg = config.config
|
11 |
-
config = importlib.import_module("configs.%s" % temp_module_name)
|
12 |
-
job_cfg = config.config
|
13 |
-
cfg.update(job_cfg)
|
14 |
-
if cfg.output is None:
|
15 |
-
cfg.output = osp.join('work_dirs', temp_module_name)
|
16 |
-
return cfg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4eJIoBek/Stable_Diffusion_1.4_openvino/stable_diffusion_engine.py
DELETED
@@ -1,212 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
import numpy as np
|
3 |
-
# openvino
|
4 |
-
from openvino.runtime import Core
|
5 |
-
# tokenizer
|
6 |
-
from transformers import CLIPTokenizer
|
7 |
-
# utils
|
8 |
-
from tqdm import tqdm
|
9 |
-
from huggingface_hub import hf_hub_download
|
10 |
-
from diffusers import LMSDiscreteScheduler, PNDMScheduler
|
11 |
-
import cv2
|
12 |
-
|
13 |
-
|
14 |
-
def result(var):
|
15 |
-
return next(iter(var.values()))
|
16 |
-
|
17 |
-
|
18 |
-
class StableDiffusionEngine:
|
19 |
-
def __init__(
|
20 |
-
self,
|
21 |
-
scheduler,
|
22 |
-
model="4eJIoBek/stable-diffusion-v1-4-openvino-fp32",
|
23 |
-
tokenizer="openai/clip-vit-large-patch14",
|
24 |
-
device="CPU"
|
25 |
-
):
|
26 |
-
self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer)
|
27 |
-
self.scheduler = scheduler
|
28 |
-
# models
|
29 |
-
self.core = Core()
|
30 |
-
# text features
|
31 |
-
self._text_encoder = self.core.read_model(
|
32 |
-
hf_hub_download(repo_id=model, filename="text_encoder.xml"),
|
33 |
-
hf_hub_download(repo_id=model, filename="text_encoder.bin")
|
34 |
-
)
|
35 |
-
self.text_encoder = self.core.compile_model(self._text_encoder, device)
|
36 |
-
# diffusion
|
37 |
-
self._unet = self.core.read_model(
|
38 |
-
hf_hub_download(repo_id=model, filename="unet.xml"),
|
39 |
-
hf_hub_download(repo_id=model, filename="unet.bin")
|
40 |
-
)
|
41 |
-
self.unet = self.core.compile_model(self._unet, device)
|
42 |
-
self.latent_shape = tuple(self._unet.inputs[0].shape)[1:]
|
43 |
-
# decoder
|
44 |
-
self._vae_decoder = self.core.read_model(
|
45 |
-
hf_hub_download(repo_id=model, filename="vae_decoder.xml"),
|
46 |
-
hf_hub_download(repo_id=model, filename="vae_decoder.bin")
|
47 |
-
)
|
48 |
-
self.vae_decoder = self.core.compile_model(self._vae_decoder, device)
|
49 |
-
# encoder
|
50 |
-
self._vae_encoder = self.core.read_model(
|
51 |
-
hf_hub_download(repo_id=model, filename="vae_encoder.xml"),
|
52 |
-
hf_hub_download(repo_id=model, filename="vae_encoder.bin")
|
53 |
-
)
|
54 |
-
self.vae_encoder = self.core.compile_model(self._vae_encoder, device)
|
55 |
-
self.init_image_shape = tuple(self._vae_encoder.inputs[0].shape)[2:]
|
56 |
-
|
57 |
-
def _preprocess_mask(self, mask):
|
58 |
-
h, w = mask.shape
|
59 |
-
if h != self.init_image_shape[0] and w != self.init_image_shape[1]:
|
60 |
-
mask = cv2.resize(
|
61 |
-
mask,
|
62 |
-
(self.init_image_shape[1], self.init_image_shape[0]),
|
63 |
-
interpolation = cv2.INTER_NEAREST
|
64 |
-
)
|
65 |
-
mask = cv2.resize(
|
66 |
-
mask,
|
67 |
-
(self.init_image_shape[1] // 8, self.init_image_shape[0] // 8),
|
68 |
-
interpolation = cv2.INTER_NEAREST
|
69 |
-
)
|
70 |
-
mask = mask.astype(np.float32) / 255.0
|
71 |
-
mask = np.tile(mask, (4, 1, 1))
|
72 |
-
mask = mask[None].transpose(0, 1, 2, 3)
|
73 |
-
mask = 1 - mask
|
74 |
-
return mask
|
75 |
-
|
76 |
-
def _preprocess_image(self, image):
|
77 |
-
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
78 |
-
h, w = image.shape[1:]
|
79 |
-
if h != self.init_image_shape[0] and w != self.init_image_shape[1]:
|
80 |
-
image = cv2.resize(
|
81 |
-
image,
|
82 |
-
(self.init_image_shape[1], self.init_image_shape[0]),
|
83 |
-
interpolation=cv2.INTER_LANCZOS4
|
84 |
-
)
|
85 |
-
# normalize
|
86 |
-
image = image.astype(np.float32) / 255.0
|
87 |
-
image = 2.0 * image - 1.0
|
88 |
-
# to batch
|
89 |
-
image = image[None].transpose(0, 3, 1, 2)
|
90 |
-
return image
|
91 |
-
|
92 |
-
def _encode_image(self, init_image):
|
93 |
-
moments = result(self.vae_encoder.infer_new_request({
|
94 |
-
"init_image": self._preprocess_image(init_image)
|
95 |
-
}))
|
96 |
-
mean, logvar = np.split(moments, 2, axis=1)
|
97 |
-
std = np.exp(logvar * 0.5)
|
98 |
-
latent = (mean + std * np.random.randn(*mean.shape)) * 0.18215
|
99 |
-
return latent
|
100 |
-
|
101 |
-
def __call__(
|
102 |
-
self,
|
103 |
-
prompt,
|
104 |
-
init_image = None,
|
105 |
-
mask = None,
|
106 |
-
strength = 0.5,
|
107 |
-
num_inference_steps = 32,
|
108 |
-
guidance_scale = 7.5,
|
109 |
-
eta = 0.0
|
110 |
-
):
|
111 |
-
# extract condition
|
112 |
-
tokens = self.tokenizer(
|
113 |
-
prompt,
|
114 |
-
padding="max_length",
|
115 |
-
max_length=self.tokenizer.model_max_length,
|
116 |
-
truncation=True
|
117 |
-
).input_ids
|
118 |
-
text_embeddings = result(
|
119 |
-
self.text_encoder.infer_new_request({"tokens": np.array([tokens])})
|
120 |
-
)
|
121 |
-
|
122 |
-
# do classifier free guidance
|
123 |
-
if guidance_scale > 1.0:
|
124 |
-
tokens_uncond = self.tokenizer(
|
125 |
-
"",
|
126 |
-
padding="max_length",
|
127 |
-
max_length=self.tokenizer.model_max_length,
|
128 |
-
truncation=True
|
129 |
-
).input_ids
|
130 |
-
uncond_embeddings = result(
|
131 |
-
self.text_encoder.infer_new_request({"tokens": np.array([tokens_uncond])})
|
132 |
-
)
|
133 |
-
text_embeddings = np.concatenate((uncond_embeddings, text_embeddings), axis=0)
|
134 |
-
|
135 |
-
# set timesteps
|
136 |
-
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
137 |
-
extra_set_kwargs = {}
|
138 |
-
offset = 0
|
139 |
-
if accepts_offset:
|
140 |
-
offset = 1
|
141 |
-
extra_set_kwargs["offset"] = 1
|
142 |
-
|
143 |
-
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
144 |
-
|
145 |
-
# initialize latent latent
|
146 |
-
if init_image is None:
|
147 |
-
latents = np.random.randn(*self.latent_shape)
|
148 |
-
init_timestep = num_inference_steps
|
149 |
-
else:
|
150 |
-
init_latents = self._encode_image(init_image)
|
151 |
-
init_timestep = int(num_inference_steps * strength) + offset
|
152 |
-
init_timestep = min(init_timestep, num_inference_steps)
|
153 |
-
timesteps = np.array([[self.scheduler.timesteps[-init_timestep]]]).astype(np.long)
|
154 |
-
noise = np.random.randn(*self.latent_shape)
|
155 |
-
latents = self.scheduler.add_noise(init_latents, noise, timesteps)[0]
|
156 |
-
|
157 |
-
if init_image is not None and mask is not None:
|
158 |
-
mask = self._preprocess_mask(mask)
|
159 |
-
else:
|
160 |
-
mask = None
|
161 |
-
|
162 |
-
# if we use LMSDiscreteScheduler, let's make sure latents are mulitplied by sigmas
|
163 |
-
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
164 |
-
latents = latents * self.scheduler.sigmas[0]
|
165 |
-
|
166 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
167 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
168 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
169 |
-
# and should be between [0, 1]
|
170 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
171 |
-
extra_step_kwargs = {}
|
172 |
-
if accepts_eta:
|
173 |
-
extra_step_kwargs["eta"] = eta
|
174 |
-
|
175 |
-
t_start = max(num_inference_steps - init_timestep + offset, 0)
|
176 |
-
for i, t in tqdm(enumerate(self.scheduler.timesteps[t_start:])):
|
177 |
-
# expand the latents if we are doing classifier free guidance
|
178 |
-
latent_model_input = np.stack([latents, latents], 0) if guidance_scale > 1.0 else latents[None]
|
179 |
-
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
180 |
-
sigma = self.scheduler.sigmas[i]
|
181 |
-
latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)
|
182 |
-
|
183 |
-
# predict the noise residual
|
184 |
-
noise_pred = result(self.unet.infer_new_request({
|
185 |
-
"latent_model_input": latent_model_input,
|
186 |
-
"t": t,
|
187 |
-
"encoder_hidden_states": text_embeddings
|
188 |
-
}))
|
189 |
-
|
190 |
-
# perform guidance
|
191 |
-
if guidance_scale > 1.0:
|
192 |
-
noise_pred = noise_pred[0] + guidance_scale * (noise_pred[1] - noise_pred[0])
|
193 |
-
|
194 |
-
# compute the previous noisy sample x_t -> x_t-1
|
195 |
-
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
196 |
-
latents = self.scheduler.step(noise_pred, i, latents, **extra_step_kwargs)["prev_sample"]
|
197 |
-
else:
|
198 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)["prev_sample"]
|
199 |
-
|
200 |
-
# masking for inapinting
|
201 |
-
if mask is not None:
|
202 |
-
init_latents_proper = self.scheduler.add_noise(init_latents, noise, t)
|
203 |
-
latents = ((init_latents_proper * mask) + (latents * (1 - mask)))[0]
|
204 |
-
|
205 |
-
image = result(self.vae_decoder.infer_new_request({
|
206 |
-
"latents": np.expand_dims(latents, 0)
|
207 |
-
}))
|
208 |
-
|
209 |
-
# convert tensor to opencv's image format
|
210 |
-
image = (image / 2 + 0.5).clip(0, 1)
|
211 |
-
image = (image[0].transpose(1, 2, 0)[:, :, ::-1] * 255).astype(np.uint8)
|
212 |
-
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7thHeaven/GPT2WordPress/constraints.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
# 制約
|
2 |
-
|
3 |
-
- あなたはブログ記事生成アシスタントです
|
4 |
-
- あなたはユーザーが与えるプロンプトをブログ記事のタイトルとして解釈し、ブログ記事本文を生成します
|
5 |
-
- 返信はブログ記事本文のみです
|
6 |
-
- あなたは優しい性格のブロガーです
|
7 |
-
- あなたは好奇心旺盛で、人々が見逃してしまいそうな小さな幸せを発見することが得意です。作成する記事も、そのような特色が現れます
|
8 |
-
- あなたは、なんでもITに紐づけてしまう癖を持っています
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/CONTRIBUTING.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# Contributing to AudioCraft
|
2 |
-
|
3 |
-
We want to make contributing to this project as easy and transparent as
|
4 |
-
possible.
|
5 |
-
|
6 |
-
## Pull Requests
|
7 |
-
|
8 |
-
AudioCraft is the implementation of a research paper.
|
9 |
-
Therefore, we do not plan on accepting many pull requests for new features.
|
10 |
-
We certainly welcome them for bug fixes.
|
11 |
-
|
12 |
-
1. Fork the repo and create your branch from `main`.
|
13 |
-
2. If you've added code that should be tested, add tests.
|
14 |
-
3. If you've changed APIs, update the documentation.
|
15 |
-
4. Ensure the test suite passes.
|
16 |
-
5. Make sure your code lints.
|
17 |
-
6. If you haven't already, complete the Contributor License Agreement ("CLA").
|
18 |
-
|
19 |
-
## Contributor License Agreement ("CLA")
|
20 |
-
In order to accept your pull request, we need you to submit a CLA. You only need
|
21 |
-
to do this once to work on any of Meta's open source projects.
|
22 |
-
|
23 |
-
Complete your CLA here: <https://code.facebook.com/cla>
|
24 |
-
|
25 |
-
## Issues
|
26 |
-
We use GitHub issues to track public bugs. Please ensure your description is
|
27 |
-
clear and has sufficient instructions to be able to reproduce the issue.
|
28 |
-
|
29 |
-
Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe
|
30 |
-
disclosure of security bugs. In those cases, please go through the process
|
31 |
-
outlined on that page and do not file a public issue.
|
32 |
-
|
33 |
-
## License
|
34 |
-
By contributing to encodec, you agree that your contributions will be licensed
|
35 |
-
under the LICENSE file in the root directory of this source tree.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/fused_act.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.autograd import Function
|
6 |
-
from torch.utils.cpp_extension import load
|
7 |
-
|
8 |
-
module_path = os.path.dirname(__file__)
|
9 |
-
fused = load(
|
10 |
-
'fused',
|
11 |
-
sources=[
|
12 |
-
os.path.join(module_path, 'fused_bias_act.cpp'),
|
13 |
-
os.path.join(module_path, 'fused_bias_act_kernel.cu'),
|
14 |
-
],
|
15 |
-
)
|
16 |
-
|
17 |
-
|
18 |
-
class FusedLeakyReLUFunctionBackward(Function):
|
19 |
-
@staticmethod
|
20 |
-
def forward(ctx, grad_output, out, negative_slope, scale):
|
21 |
-
ctx.save_for_backward(out)
|
22 |
-
ctx.negative_slope = negative_slope
|
23 |
-
ctx.scale = scale
|
24 |
-
|
25 |
-
empty = grad_output.new_empty(0)
|
26 |
-
|
27 |
-
grad_input = fused.fused_bias_act(
|
28 |
-
grad_output, empty, out, 3, 1, negative_slope, scale
|
29 |
-
)
|
30 |
-
|
31 |
-
dim = [0]
|
32 |
-
|
33 |
-
if grad_input.ndim > 2:
|
34 |
-
dim += list(range(2, grad_input.ndim))
|
35 |
-
|
36 |
-
grad_bias = grad_input.sum(dim).detach()
|
37 |
-
|
38 |
-
return grad_input, grad_bias
|
39 |
-
|
40 |
-
@staticmethod
|
41 |
-
def backward(ctx, gradgrad_input, gradgrad_bias):
|
42 |
-
out, = ctx.saved_tensors
|
43 |
-
gradgrad_out = fused.fused_bias_act(
|
44 |
-
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
|
45 |
-
)
|
46 |
-
|
47 |
-
return gradgrad_out, None, None, None
|
48 |
-
|
49 |
-
|
50 |
-
class FusedLeakyReLUFunction(Function):
|
51 |
-
@staticmethod
|
52 |
-
def forward(ctx, input, bias, negative_slope, scale):
|
53 |
-
empty = input.new_empty(0)
|
54 |
-
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
|
55 |
-
ctx.save_for_backward(out)
|
56 |
-
ctx.negative_slope = negative_slope
|
57 |
-
ctx.scale = scale
|
58 |
-
|
59 |
-
return out
|
60 |
-
|
61 |
-
@staticmethod
|
62 |
-
def backward(ctx, grad_output):
|
63 |
-
out, = ctx.saved_tensors
|
64 |
-
|
65 |
-
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
|
66 |
-
grad_output, out, ctx.negative_slope, ctx.scale
|
67 |
-
)
|
68 |
-
|
69 |
-
return grad_input, grad_bias, None, None
|
70 |
-
|
71 |
-
|
72 |
-
class FusedLeakyReLU(nn.Module):
|
73 |
-
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
|
74 |
-
super().__init__()
|
75 |
-
|
76 |
-
self.bias = nn.Parameter(torch.zeros(channel))
|
77 |
-
self.negative_slope = negative_slope
|
78 |
-
self.scale = scale
|
79 |
-
|
80 |
-
def forward(self, input):
|
81 |
-
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
|
82 |
-
|
83 |
-
|
84 |
-
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
|
85 |
-
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/tts_utils.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
|
3 |
-
from text_to_speech.data_gen.tts.base_binarizer import BaseBinarizer
|
4 |
-
from text_to_speech.data_gen.tts.base_preprocess import BasePreprocessor
|
5 |
-
from text_to_speech.data_gen.tts.txt_processors.base_text_processor import get_txt_processor_cls
|
6 |
-
from text_to_speech.utils.commons.hparams import hparams
|
7 |
-
|
8 |
-
|
9 |
-
def parse_dataset_configs():
|
10 |
-
max_tokens = hparams['max_tokens']
|
11 |
-
max_sentences = hparams['max_sentences']
|
12 |
-
max_valid_tokens = hparams['max_valid_tokens']
|
13 |
-
if max_valid_tokens == -1:
|
14 |
-
hparams['max_valid_tokens'] = max_valid_tokens = max_tokens
|
15 |
-
max_valid_sentences = hparams['max_valid_sentences']
|
16 |
-
if max_valid_sentences == -1:
|
17 |
-
hparams['max_valid_sentences'] = max_valid_sentences = max_sentences
|
18 |
-
return max_tokens, max_sentences, max_valid_tokens, max_valid_sentences
|
19 |
-
|
20 |
-
|
21 |
-
def parse_mel_losses():
|
22 |
-
mel_losses = hparams['mel_losses'].split("|")
|
23 |
-
loss_and_lambda = {}
|
24 |
-
for i, l in enumerate(mel_losses):
|
25 |
-
if l == '':
|
26 |
-
continue
|
27 |
-
if ':' in l:
|
28 |
-
l, lbd = l.split(":")
|
29 |
-
lbd = float(lbd)
|
30 |
-
else:
|
31 |
-
lbd = 1.0
|
32 |
-
loss_and_lambda[l] = lbd
|
33 |
-
print("| Mel losses:", loss_and_lambda)
|
34 |
-
return loss_and_lambda
|
35 |
-
|
36 |
-
|
37 |
-
def load_data_preprocessor():
|
38 |
-
preprocess_cls = hparams["preprocess_cls"]
|
39 |
-
pkg = ".".join(preprocess_cls.split(".")[:-1])
|
40 |
-
cls_name = preprocess_cls.split(".")[-1]
|
41 |
-
preprocessor: BasePreprocessor = getattr(importlib.import_module(pkg), cls_name)()
|
42 |
-
preprocess_args = {}
|
43 |
-
preprocess_args.update(hparams['preprocess_args'])
|
44 |
-
return preprocessor, preprocess_args
|
45 |
-
|
46 |
-
|
47 |
-
def load_data_binarizer():
|
48 |
-
binarizer_cls = hparams['binarizer_cls']
|
49 |
-
pkg = ".".join(binarizer_cls.split(".")[:-1])
|
50 |
-
cls_name = binarizer_cls.split(".")[-1]
|
51 |
-
binarizer: BaseBinarizer = getattr(importlib.import_module(pkg), cls_name)()
|
52 |
-
binarization_args = {}
|
53 |
-
binarization_args.update(hparams['binarization_args'])
|
54 |
-
return binarizer, binarization_args
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/image_degradation/bsrgan_light.py
DELETED
@@ -1,650 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
import numpy as np
|
3 |
-
import cv2
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from functools import partial
|
7 |
-
import random
|
8 |
-
from scipy import ndimage
|
9 |
-
import scipy
|
10 |
-
import scipy.stats as ss
|
11 |
-
from scipy.interpolate import interp2d
|
12 |
-
from scipy.linalg import orth
|
13 |
-
import albumentations
|
14 |
-
|
15 |
-
import ldm.modules.image_degradation.utils_image as util
|
16 |
-
|
17 |
-
"""
|
18 |
-
# --------------------------------------------
|
19 |
-
# Super-Resolution
|
20 |
-
# --------------------------------------------
|
21 |
-
#
|
22 |
-
# Kai Zhang ([email protected])
|
23 |
-
# https://github.com/cszn
|
24 |
-
# From 2019/03--2021/08
|
25 |
-
# --------------------------------------------
|
26 |
-
"""
|
27 |
-
|
28 |
-
|
29 |
-
def modcrop_np(img, sf):
|
30 |
-
'''
|
31 |
-
Args:
|
32 |
-
img: numpy image, WxH or WxHxC
|
33 |
-
sf: scale factor
|
34 |
-
Return:
|
35 |
-
cropped image
|
36 |
-
'''
|
37 |
-
w, h = img.shape[:2]
|
38 |
-
im = np.copy(img)
|
39 |
-
return im[:w - w % sf, :h - h % sf, ...]
|
40 |
-
|
41 |
-
|
42 |
-
"""
|
43 |
-
# --------------------------------------------
|
44 |
-
# anisotropic Gaussian kernels
|
45 |
-
# --------------------------------------------
|
46 |
-
"""
|
47 |
-
|
48 |
-
|
49 |
-
def analytic_kernel(k):
|
50 |
-
"""Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
|
51 |
-
k_size = k.shape[0]
|
52 |
-
# Calculate the big kernels size
|
53 |
-
big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
|
54 |
-
# Loop over the small kernel to fill the big one
|
55 |
-
for r in range(k_size):
|
56 |
-
for c in range(k_size):
|
57 |
-
big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
|
58 |
-
# Crop the edges of the big kernel to ignore very small values and increase run time of SR
|
59 |
-
crop = k_size // 2
|
60 |
-
cropped_big_k = big_k[crop:-crop, crop:-crop]
|
61 |
-
# Normalize to 1
|
62 |
-
return cropped_big_k / cropped_big_k.sum()
|
63 |
-
|
64 |
-
|
65 |
-
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
|
66 |
-
""" generate an anisotropic Gaussian kernel
|
67 |
-
Args:
|
68 |
-
ksize : e.g., 15, kernel size
|
69 |
-
theta : [0, pi], rotation angle range
|
70 |
-
l1 : [0.1,50], scaling of eigenvalues
|
71 |
-
l2 : [0.1,l1], scaling of eigenvalues
|
72 |
-
If l1 = l2, will get an isotropic Gaussian kernel.
|
73 |
-
Returns:
|
74 |
-
k : kernel
|
75 |
-
"""
|
76 |
-
|
77 |
-
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
|
78 |
-
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
|
79 |
-
D = np.array([[l1, 0], [0, l2]])
|
80 |
-
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
|
81 |
-
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
|
82 |
-
|
83 |
-
return k
|
84 |
-
|
85 |
-
|
86 |
-
def gm_blur_kernel(mean, cov, size=15):
|
87 |
-
center = size / 2.0 + 0.5
|
88 |
-
k = np.zeros([size, size])
|
89 |
-
for y in range(size):
|
90 |
-
for x in range(size):
|
91 |
-
cy = y - center + 1
|
92 |
-
cx = x - center + 1
|
93 |
-
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
|
94 |
-
|
95 |
-
k = k / np.sum(k)
|
96 |
-
return k
|
97 |
-
|
98 |
-
|
99 |
-
def shift_pixel(x, sf, upper_left=True):
|
100 |
-
"""shift pixel for super-resolution with different scale factors
|
101 |
-
Args:
|
102 |
-
x: WxHxC or WxH
|
103 |
-
sf: scale factor
|
104 |
-
upper_left: shift direction
|
105 |
-
"""
|
106 |
-
h, w = x.shape[:2]
|
107 |
-
shift = (sf - 1) * 0.5
|
108 |
-
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
|
109 |
-
if upper_left:
|
110 |
-
x1 = xv + shift
|
111 |
-
y1 = yv + shift
|
112 |
-
else:
|
113 |
-
x1 = xv - shift
|
114 |
-
y1 = yv - shift
|
115 |
-
|
116 |
-
x1 = np.clip(x1, 0, w - 1)
|
117 |
-
y1 = np.clip(y1, 0, h - 1)
|
118 |
-
|
119 |
-
if x.ndim == 2:
|
120 |
-
x = interp2d(xv, yv, x)(x1, y1)
|
121 |
-
if x.ndim == 3:
|
122 |
-
for i in range(x.shape[-1]):
|
123 |
-
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
|
124 |
-
|
125 |
-
return x
|
126 |
-
|
127 |
-
|
128 |
-
def blur(x, k):
|
129 |
-
'''
|
130 |
-
x: image, NxcxHxW
|
131 |
-
k: kernel, Nx1xhxw
|
132 |
-
'''
|
133 |
-
n, c = x.shape[:2]
|
134 |
-
p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
|
135 |
-
x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
|
136 |
-
k = k.repeat(1, c, 1, 1)
|
137 |
-
k = k.view(-1, 1, k.shape[2], k.shape[3])
|
138 |
-
x = x.view(1, -1, x.shape[2], x.shape[3])
|
139 |
-
x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
|
140 |
-
x = x.view(n, c, x.shape[2], x.shape[3])
|
141 |
-
|
142 |
-
return x
|
143 |
-
|
144 |
-
|
145 |
-
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
|
146 |
-
""""
|
147 |
-
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
|
148 |
-
# Kai Zhang
|
149 |
-
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
|
150 |
-
# max_var = 2.5 * sf
|
151 |
-
"""
|
152 |
-
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
|
153 |
-
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
|
154 |
-
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
|
155 |
-
theta = np.random.rand() * np.pi # random theta
|
156 |
-
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
|
157 |
-
|
158 |
-
# Set COV matrix using Lambdas and Theta
|
159 |
-
LAMBDA = np.diag([lambda_1, lambda_2])
|
160 |
-
Q = np.array([[np.cos(theta), -np.sin(theta)],
|
161 |
-
[np.sin(theta), np.cos(theta)]])
|
162 |
-
SIGMA = Q @ LAMBDA @ Q.T
|
163 |
-
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
|
164 |
-
|
165 |
-
# Set expectation position (shifting kernel for aligned image)
|
166 |
-
MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
|
167 |
-
MU = MU[None, None, :, None]
|
168 |
-
|
169 |
-
# Create meshgrid for Gaussian
|
170 |
-
[X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
|
171 |
-
Z = np.stack([X, Y], 2)[:, :, :, None]
|
172 |
-
|
173 |
-
# Calcualte Gaussian for every pixel of the kernel
|
174 |
-
ZZ = Z - MU
|
175 |
-
ZZ_t = ZZ.transpose(0, 1, 3, 2)
|
176 |
-
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
|
177 |
-
|
178 |
-
# shift the kernel so it will be centered
|
179 |
-
# raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
|
180 |
-
|
181 |
-
# Normalize the kernel and return
|
182 |
-
# kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
|
183 |
-
kernel = raw_kernel / np.sum(raw_kernel)
|
184 |
-
return kernel
|
185 |
-
|
186 |
-
|
187 |
-
def fspecial_gaussian(hsize, sigma):
|
188 |
-
hsize = [hsize, hsize]
|
189 |
-
siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
|
190 |
-
std = sigma
|
191 |
-
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
|
192 |
-
arg = -(x * x + y * y) / (2 * std * std)
|
193 |
-
h = np.exp(arg)
|
194 |
-
h[h < scipy.finfo(float).eps * h.max()] = 0
|
195 |
-
sumh = h.sum()
|
196 |
-
if sumh != 0:
|
197 |
-
h = h / sumh
|
198 |
-
return h
|
199 |
-
|
200 |
-
|
201 |
-
def fspecial_laplacian(alpha):
|
202 |
-
alpha = max([0, min([alpha, 1])])
|
203 |
-
h1 = alpha / (alpha + 1)
|
204 |
-
h2 = (1 - alpha) / (alpha + 1)
|
205 |
-
h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
|
206 |
-
h = np.array(h)
|
207 |
-
return h
|
208 |
-
|
209 |
-
|
210 |
-
def fspecial(filter_type, *args, **kwargs):
|
211 |
-
'''
|
212 |
-
python code from:
|
213 |
-
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
|
214 |
-
'''
|
215 |
-
if filter_type == 'gaussian':
|
216 |
-
return fspecial_gaussian(*args, **kwargs)
|
217 |
-
if filter_type == 'laplacian':
|
218 |
-
return fspecial_laplacian(*args, **kwargs)
|
219 |
-
|
220 |
-
|
221 |
-
"""
|
222 |
-
# --------------------------------------------
|
223 |
-
# degradation models
|
224 |
-
# --------------------------------------------
|
225 |
-
"""
|
226 |
-
|
227 |
-
|
228 |
-
def bicubic_degradation(x, sf=3):
|
229 |
-
'''
|
230 |
-
Args:
|
231 |
-
x: HxWxC image, [0, 1]
|
232 |
-
sf: down-scale factor
|
233 |
-
Return:
|
234 |
-
bicubicly downsampled LR image
|
235 |
-
'''
|
236 |
-
x = util.imresize_np(x, scale=1 / sf)
|
237 |
-
return x
|
238 |
-
|
239 |
-
|
240 |
-
def srmd_degradation(x, k, sf=3):
|
241 |
-
''' blur + bicubic downsampling
|
242 |
-
Args:
|
243 |
-
x: HxWxC image, [0, 1]
|
244 |
-
k: hxw, double
|
245 |
-
sf: down-scale factor
|
246 |
-
Return:
|
247 |
-
downsampled LR image
|
248 |
-
Reference:
|
249 |
-
@inproceedings{zhang2018learning,
|
250 |
-
title={Learning a single convolutional super-resolution network for multiple degradations},
|
251 |
-
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
|
252 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
253 |
-
pages={3262--3271},
|
254 |
-
year={2018}
|
255 |
-
}
|
256 |
-
'''
|
257 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
|
258 |
-
x = bicubic_degradation(x, sf=sf)
|
259 |
-
return x
|
260 |
-
|
261 |
-
|
262 |
-
def dpsr_degradation(x, k, sf=3):
|
263 |
-
''' bicubic downsampling + blur
|
264 |
-
Args:
|
265 |
-
x: HxWxC image, [0, 1]
|
266 |
-
k: hxw, double
|
267 |
-
sf: down-scale factor
|
268 |
-
Return:
|
269 |
-
downsampled LR image
|
270 |
-
Reference:
|
271 |
-
@inproceedings{zhang2019deep,
|
272 |
-
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
|
273 |
-
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
|
274 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
275 |
-
pages={1671--1681},
|
276 |
-
year={2019}
|
277 |
-
}
|
278 |
-
'''
|
279 |
-
x = bicubic_degradation(x, sf=sf)
|
280 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
|
281 |
-
return x
|
282 |
-
|
283 |
-
|
284 |
-
def classical_degradation(x, k, sf=3):
|
285 |
-
''' blur + downsampling
|
286 |
-
Args:
|
287 |
-
x: HxWxC image, [0, 1]/[0, 255]
|
288 |
-
k: hxw, double
|
289 |
-
sf: down-scale factor
|
290 |
-
Return:
|
291 |
-
downsampled LR image
|
292 |
-
'''
|
293 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
|
294 |
-
# x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
|
295 |
-
st = 0
|
296 |
-
return x[st::sf, st::sf, ...]
|
297 |
-
|
298 |
-
|
299 |
-
def add_sharpening(img, weight=0.5, radius=50, threshold=10):
|
300 |
-
"""USM sharpening. borrowed from real-ESRGAN
|
301 |
-
Input image: I; Blurry image: B.
|
302 |
-
1. K = I + weight * (I - B)
|
303 |
-
2. Mask = 1 if abs(I - B) > threshold, else: 0
|
304 |
-
3. Blur mask:
|
305 |
-
4. Out = Mask * K + (1 - Mask) * I
|
306 |
-
Args:
|
307 |
-
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
|
308 |
-
weight (float): Sharp weight. Default: 1.
|
309 |
-
radius (float): Kernel size of Gaussian blur. Default: 50.
|
310 |
-
threshold (int):
|
311 |
-
"""
|
312 |
-
if radius % 2 == 0:
|
313 |
-
radius += 1
|
314 |
-
blur = cv2.GaussianBlur(img, (radius, radius), 0)
|
315 |
-
residual = img - blur
|
316 |
-
mask = np.abs(residual) * 255 > threshold
|
317 |
-
mask = mask.astype('float32')
|
318 |
-
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
|
319 |
-
|
320 |
-
K = img + weight * residual
|
321 |
-
K = np.clip(K, 0, 1)
|
322 |
-
return soft_mask * K + (1 - soft_mask) * img
|
323 |
-
|
324 |
-
|
325 |
-
def add_blur(img, sf=4):
|
326 |
-
wd2 = 4.0 + sf
|
327 |
-
wd = 2.0 + 0.2 * sf
|
328 |
-
|
329 |
-
wd2 = wd2/4
|
330 |
-
wd = wd/4
|
331 |
-
|
332 |
-
if random.random() < 0.5:
|
333 |
-
l1 = wd2 * random.random()
|
334 |
-
l2 = wd2 * random.random()
|
335 |
-
k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
|
336 |
-
else:
|
337 |
-
k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
|
338 |
-
img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
|
339 |
-
|
340 |
-
return img
|
341 |
-
|
342 |
-
|
343 |
-
def add_resize(img, sf=4):
|
344 |
-
rnum = np.random.rand()
|
345 |
-
if rnum > 0.8: # up
|
346 |
-
sf1 = random.uniform(1, 2)
|
347 |
-
elif rnum < 0.7: # down
|
348 |
-
sf1 = random.uniform(0.5 / sf, 1)
|
349 |
-
else:
|
350 |
-
sf1 = 1.0
|
351 |
-
img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
|
352 |
-
img = np.clip(img, 0.0, 1.0)
|
353 |
-
|
354 |
-
return img
|
355 |
-
|
356 |
-
|
357 |
-
# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
358 |
-
# noise_level = random.randint(noise_level1, noise_level2)
|
359 |
-
# rnum = np.random.rand()
|
360 |
-
# if rnum > 0.6: # add color Gaussian noise
|
361 |
-
# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
362 |
-
# elif rnum < 0.4: # add grayscale Gaussian noise
|
363 |
-
# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
364 |
-
# else: # add noise
|
365 |
-
# L = noise_level2 / 255.
|
366 |
-
# D = np.diag(np.random.rand(3))
|
367 |
-
# U = orth(np.random.rand(3, 3))
|
368 |
-
# conv = np.dot(np.dot(np.transpose(U), D), U)
|
369 |
-
# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
370 |
-
# img = np.clip(img, 0.0, 1.0)
|
371 |
-
# return img
|
372 |
-
|
373 |
-
def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
374 |
-
noise_level = random.randint(noise_level1, noise_level2)
|
375 |
-
rnum = np.random.rand()
|
376 |
-
if rnum > 0.6: # add color Gaussian noise
|
377 |
-
img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
378 |
-
elif rnum < 0.4: # add grayscale Gaussian noise
|
379 |
-
img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
380 |
-
else: # add noise
|
381 |
-
L = noise_level2 / 255.
|
382 |
-
D = np.diag(np.random.rand(3))
|
383 |
-
U = orth(np.random.rand(3, 3))
|
384 |
-
conv = np.dot(np.dot(np.transpose(U), D), U)
|
385 |
-
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
386 |
-
img = np.clip(img, 0.0, 1.0)
|
387 |
-
return img
|
388 |
-
|
389 |
-
|
390 |
-
def add_speckle_noise(img, noise_level1=2, noise_level2=25):
|
391 |
-
noise_level = random.randint(noise_level1, noise_level2)
|
392 |
-
img = np.clip(img, 0.0, 1.0)
|
393 |
-
rnum = random.random()
|
394 |
-
if rnum > 0.6:
|
395 |
-
img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
396 |
-
elif rnum < 0.4:
|
397 |
-
img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
398 |
-
else:
|
399 |
-
L = noise_level2 / 255.
|
400 |
-
D = np.diag(np.random.rand(3))
|
401 |
-
U = orth(np.random.rand(3, 3))
|
402 |
-
conv = np.dot(np.dot(np.transpose(U), D), U)
|
403 |
-
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
404 |
-
img = np.clip(img, 0.0, 1.0)
|
405 |
-
return img
|
406 |
-
|
407 |
-
|
408 |
-
def add_Poisson_noise(img):
|
409 |
-
img = np.clip((img * 255.0).round(), 0, 255) / 255.
|
410 |
-
vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
|
411 |
-
if random.random() < 0.5:
|
412 |
-
img = np.random.poisson(img * vals).astype(np.float32) / vals
|
413 |
-
else:
|
414 |
-
img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
|
415 |
-
img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
|
416 |
-
noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
|
417 |
-
img += noise_gray[:, :, np.newaxis]
|
418 |
-
img = np.clip(img, 0.0, 1.0)
|
419 |
-
return img
|
420 |
-
|
421 |
-
|
422 |
-
def add_JPEG_noise(img):
|
423 |
-
quality_factor = random.randint(80, 95)
|
424 |
-
img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
|
425 |
-
result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
|
426 |
-
img = cv2.imdecode(encimg, 1)
|
427 |
-
img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
|
428 |
-
return img
|
429 |
-
|
430 |
-
|
431 |
-
def random_crop(lq, hq, sf=4, lq_patchsize=64):
|
432 |
-
h, w = lq.shape[:2]
|
433 |
-
rnd_h = random.randint(0, h - lq_patchsize)
|
434 |
-
rnd_w = random.randint(0, w - lq_patchsize)
|
435 |
-
lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
|
436 |
-
|
437 |
-
rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
|
438 |
-
hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
|
439 |
-
return lq, hq
|
440 |
-
|
441 |
-
|
442 |
-
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
|
443 |
-
"""
|
444 |
-
This is the degradation model of BSRGAN from the paper
|
445 |
-
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
|
446 |
-
----------
|
447 |
-
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
|
448 |
-
sf: scale factor
|
449 |
-
isp_model: camera ISP model
|
450 |
-
Returns
|
451 |
-
-------
|
452 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
453 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
454 |
-
"""
|
455 |
-
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
456 |
-
sf_ori = sf
|
457 |
-
|
458 |
-
h1, w1 = img.shape[:2]
|
459 |
-
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
460 |
-
h, w = img.shape[:2]
|
461 |
-
|
462 |
-
if h < lq_patchsize * sf or w < lq_patchsize * sf:
|
463 |
-
raise ValueError(f'img size ({h1}X{w1}) is too small!')
|
464 |
-
|
465 |
-
hq = img.copy()
|
466 |
-
|
467 |
-
if sf == 4 and random.random() < scale2_prob: # downsample1
|
468 |
-
if np.random.rand() < 0.5:
|
469 |
-
img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
|
470 |
-
interpolation=random.choice([1, 2, 3]))
|
471 |
-
else:
|
472 |
-
img = util.imresize_np(img, 1 / 2, True)
|
473 |
-
img = np.clip(img, 0.0, 1.0)
|
474 |
-
sf = 2
|
475 |
-
|
476 |
-
shuffle_order = random.sample(range(7), 7)
|
477 |
-
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
|
478 |
-
if idx1 > idx2: # keep downsample3 last
|
479 |
-
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
|
480 |
-
|
481 |
-
for i in shuffle_order:
|
482 |
-
|
483 |
-
if i == 0:
|
484 |
-
img = add_blur(img, sf=sf)
|
485 |
-
|
486 |
-
elif i == 1:
|
487 |
-
img = add_blur(img, sf=sf)
|
488 |
-
|
489 |
-
elif i == 2:
|
490 |
-
a, b = img.shape[1], img.shape[0]
|
491 |
-
# downsample2
|
492 |
-
if random.random() < 0.75:
|
493 |
-
sf1 = random.uniform(1, 2 * sf)
|
494 |
-
img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
|
495 |
-
interpolation=random.choice([1, 2, 3]))
|
496 |
-
else:
|
497 |
-
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
|
498 |
-
k_shifted = shift_pixel(k, sf)
|
499 |
-
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
|
500 |
-
img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
|
501 |
-
img = img[0::sf, 0::sf, ...] # nearest downsampling
|
502 |
-
img = np.clip(img, 0.0, 1.0)
|
503 |
-
|
504 |
-
elif i == 3:
|
505 |
-
# downsample3
|
506 |
-
img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
|
507 |
-
img = np.clip(img, 0.0, 1.0)
|
508 |
-
|
509 |
-
elif i == 4:
|
510 |
-
# add Gaussian noise
|
511 |
-
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
|
512 |
-
|
513 |
-
elif i == 5:
|
514 |
-
# add JPEG noise
|
515 |
-
if random.random() < jpeg_prob:
|
516 |
-
img = add_JPEG_noise(img)
|
517 |
-
|
518 |
-
elif i == 6:
|
519 |
-
# add processed camera sensor noise
|
520 |
-
if random.random() < isp_prob and isp_model is not None:
|
521 |
-
with torch.no_grad():
|
522 |
-
img, hq = isp_model.forward(img.copy(), hq)
|
523 |
-
|
524 |
-
# add final JPEG compression noise
|
525 |
-
img = add_JPEG_noise(img)
|
526 |
-
|
527 |
-
# random crop
|
528 |
-
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
|
529 |
-
|
530 |
-
return img, hq
|
531 |
-
|
532 |
-
|
533 |
-
# todo no isp_model?
|
534 |
-
def degradation_bsrgan_variant(image, sf=4, isp_model=None):
|
535 |
-
"""
|
536 |
-
This is the degradation model of BSRGAN from the paper
|
537 |
-
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
|
538 |
-
----------
|
539 |
-
sf: scale factor
|
540 |
-
isp_model: camera ISP model
|
541 |
-
Returns
|
542 |
-
-------
|
543 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
544 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
545 |
-
"""
|
546 |
-
image = util.uint2single(image)
|
547 |
-
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
548 |
-
sf_ori = sf
|
549 |
-
|
550 |
-
h1, w1 = image.shape[:2]
|
551 |
-
image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
552 |
-
h, w = image.shape[:2]
|
553 |
-
|
554 |
-
hq = image.copy()
|
555 |
-
|
556 |
-
if sf == 4 and random.random() < scale2_prob: # downsample1
|
557 |
-
if np.random.rand() < 0.5:
|
558 |
-
image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
|
559 |
-
interpolation=random.choice([1, 2, 3]))
|
560 |
-
else:
|
561 |
-
image = util.imresize_np(image, 1 / 2, True)
|
562 |
-
image = np.clip(image, 0.0, 1.0)
|
563 |
-
sf = 2
|
564 |
-
|
565 |
-
shuffle_order = random.sample(range(7), 7)
|
566 |
-
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
|
567 |
-
if idx1 > idx2: # keep downsample3 last
|
568 |
-
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
|
569 |
-
|
570 |
-
for i in shuffle_order:
|
571 |
-
|
572 |
-
if i == 0:
|
573 |
-
image = add_blur(image, sf=sf)
|
574 |
-
|
575 |
-
# elif i == 1:
|
576 |
-
# image = add_blur(image, sf=sf)
|
577 |
-
|
578 |
-
if i == 0:
|
579 |
-
pass
|
580 |
-
|
581 |
-
elif i == 2:
|
582 |
-
a, b = image.shape[1], image.shape[0]
|
583 |
-
# downsample2
|
584 |
-
if random.random() < 0.8:
|
585 |
-
sf1 = random.uniform(1, 2 * sf)
|
586 |
-
image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
|
587 |
-
interpolation=random.choice([1, 2, 3]))
|
588 |
-
else:
|
589 |
-
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
|
590 |
-
k_shifted = shift_pixel(k, sf)
|
591 |
-
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
|
592 |
-
image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
|
593 |
-
image = image[0::sf, 0::sf, ...] # nearest downsampling
|
594 |
-
|
595 |
-
image = np.clip(image, 0.0, 1.0)
|
596 |
-
|
597 |
-
elif i == 3:
|
598 |
-
# downsample3
|
599 |
-
image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
|
600 |
-
image = np.clip(image, 0.0, 1.0)
|
601 |
-
|
602 |
-
elif i == 4:
|
603 |
-
# add Gaussian noise
|
604 |
-
image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
|
605 |
-
|
606 |
-
elif i == 5:
|
607 |
-
# add JPEG noise
|
608 |
-
if random.random() < jpeg_prob:
|
609 |
-
image = add_JPEG_noise(image)
|
610 |
-
#
|
611 |
-
# elif i == 6:
|
612 |
-
# # add processed camera sensor noise
|
613 |
-
# if random.random() < isp_prob and isp_model is not None:
|
614 |
-
# with torch.no_grad():
|
615 |
-
# img, hq = isp_model.forward(img.copy(), hq)
|
616 |
-
|
617 |
-
# add final JPEG compression noise
|
618 |
-
image = add_JPEG_noise(image)
|
619 |
-
image = util.single2uint(image)
|
620 |
-
example = {"image": image}
|
621 |
-
return example
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
if __name__ == '__main__':
|
627 |
-
print("hey")
|
628 |
-
img = util.imread_uint('utils/test.png', 3)
|
629 |
-
img = img[:448, :448]
|
630 |
-
h = img.shape[0] // 4
|
631 |
-
print("resizing to", h)
|
632 |
-
sf = 4
|
633 |
-
deg_fn = partial(degradation_bsrgan_variant, sf=sf)
|
634 |
-
for i in range(20):
|
635 |
-
print(i)
|
636 |
-
img_hq = img
|
637 |
-
img_lq = deg_fn(img)["image"]
|
638 |
-
img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
|
639 |
-
print(img_lq)
|
640 |
-
img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
|
641 |
-
print(img_lq.shape)
|
642 |
-
print("bicubic", img_lq_bicubic.shape)
|
643 |
-
print(img_hq.shape)
|
644 |
-
lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
645 |
-
interpolation=0)
|
646 |
-
lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
|
647 |
-
(int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
648 |
-
interpolation=0)
|
649 |
-
img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
|
650 |
-
util.imsave(img_concat, str(i) + '.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZ2H/05-SOTA-Question-Answer-From-TextFileContext/app.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
|
4 |
-
context = "This could be any large text corpus to use as subject matter to ask questions about. You can load it as well from text file to isolate it from code changes like in the next line"
|
5 |
-
|
6 |
-
with open('Context.txt', 'r') as file:
|
7 |
-
context = file.read()
|
8 |
-
|
9 |
-
question = "What should be documented in a care plan?"
|
10 |
-
|
11 |
-
API_KEY = os.environ.get("HF_TOKEN")
|
12 |
-
gr.Interface.load(
|
13 |
-
"huggingface/deepset/roberta-base-squad2",
|
14 |
-
api_key=API_KEY,
|
15 |
-
theme="default",
|
16 |
-
css=".footer{display:none !important}",
|
17 |
-
inputs=[gr.inputs.Textbox(lines=12, default=context, label="Context paragraph"), gr.inputs.Textbox(lines=3, default=question, label="Question")],
|
18 |
-
outputs=[gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Score")],
|
19 |
-
title=None,
|
20 |
-
description="Provide your own paragraph and ask any question about the text. How well does the model answer?").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aadi1149/Arkenbrien-text-to-image-Arkenbrien/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/Arkenbrien/text-to-image-Arkenbrien").launch()
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/6.js
DELETED
File without changes
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/methods/ConfigurationMethods.js
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
var methods = {
|
2 |
-
// Color picker
|
3 |
-
setCreateColorPickerBackgroundCallback(callback) {
|
4 |
-
this.colorPickerCreateBackgroundCallback = callback;
|
5 |
-
return this;
|
6 |
-
},
|
7 |
-
|
8 |
-
setColorPickerHPalettePosition(position) {
|
9 |
-
this.colorPickerHPalettePosition = position;
|
10 |
-
return this;
|
11 |
-
},
|
12 |
-
|
13 |
-
setColorPickerExpandDirection(direction) {
|
14 |
-
if (typeof (direction) === 'string') {
|
15 |
-
direction = ColorPickerExpandDirections[direction];
|
16 |
-
}
|
17 |
-
this.colorPickerExpandDirection = direction;
|
18 |
-
return this;
|
19 |
-
},
|
20 |
-
|
21 |
-
setColorPickerEaseInDuration(duration) {
|
22 |
-
if (duration === undefined) {
|
23 |
-
duration = 0;
|
24 |
-
}
|
25 |
-
this.colorPickerEaseInDuration = duration;
|
26 |
-
return this;
|
27 |
-
},
|
28 |
-
|
29 |
-
setColorPickerEaseOutDuration(duration) {
|
30 |
-
if (duration === undefined) {
|
31 |
-
duration = 0;
|
32 |
-
}
|
33 |
-
this.colorPickerEaseOutDuration = duration;
|
34 |
-
return this;
|
35 |
-
},
|
36 |
-
|
37 |
-
setColorPickerTransitInCallback(callback) {
|
38 |
-
this.colorPickerTransitInCallback = callback;
|
39 |
-
// callback = function(gameObject, duration) {}
|
40 |
-
return this;
|
41 |
-
},
|
42 |
-
|
43 |
-
setColorPickerTransitOutCallback(callback) {
|
44 |
-
this.colorPickerTransitOutCallback = callback;
|
45 |
-
// callback = function(gameObject, duration) {}
|
46 |
-
return this;
|
47 |
-
},
|
48 |
-
|
49 |
-
setColorPickerBounds(bounds) {
|
50 |
-
this.colorPickerBounds = bounds;
|
51 |
-
return this;
|
52 |
-
},
|
53 |
-
|
54 |
-
setColorPickerWidth(width) {
|
55 |
-
this.colorPickerWidth = width;
|
56 |
-
return this;
|
57 |
-
},
|
58 |
-
|
59 |
-
setColorPickerHeight(height) {
|
60 |
-
this.colorPickerHeight = height;
|
61 |
-
return this;
|
62 |
-
},
|
63 |
-
|
64 |
-
setColorPickerSize(width, height) {
|
65 |
-
this.setColorPickerWidth(width).setColorPickerHeight(height);
|
66 |
-
return this;
|
67 |
-
},
|
68 |
-
|
69 |
-
setColorPickerSpace(space) {
|
70 |
-
if (space === undefined) {
|
71 |
-
space = {};
|
72 |
-
}
|
73 |
-
this.colorPickerSpace = space;
|
74 |
-
return this;
|
75 |
-
},
|
76 |
-
|
77 |
-
// Color components
|
78 |
-
setColorComponentsHeight(height) {
|
79 |
-
this.colorComponentsHeight = height;
|
80 |
-
return this;
|
81 |
-
},
|
82 |
-
|
83 |
-
setColorComponentsFormatLabelConfig(config) {
|
84 |
-
this.colorComponentsFormatLabelConfig = config;
|
85 |
-
return this;
|
86 |
-
},
|
87 |
-
|
88 |
-
setColorComponentsInputTextConfig(config) {
|
89 |
-
this.colorComponentsInputTextConfig = config;
|
90 |
-
return this;
|
91 |
-
},
|
92 |
-
|
93 |
-
setColorComponentsSpace(space) {
|
94 |
-
if (space === undefined) {
|
95 |
-
space = {};
|
96 |
-
}
|
97 |
-
this.colorComponentsSpace = space;
|
98 |
-
return this;
|
99 |
-
},
|
100 |
-
}
|
101 |
-
|
102 |
-
const ColorPickerExpandDirections = {
|
103 |
-
down: 0,
|
104 |
-
up: 1
|
105 |
-
}
|
106 |
-
|
107 |
-
export default methods;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/GetThumbAlignPoint.js
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import AlignIn from '../../../plugins/utils/actions/AlignIn.js';
|
2 |
-
|
3 |
-
var GetThumbAlignPoint = function (align, out) {
|
4 |
-
if (out === undefined) {
|
5 |
-
out = tmpPoint;
|
6 |
-
}
|
7 |
-
var thumb = this.childrenMap.thumb;
|
8 |
-
var currentX = thumb.x;
|
9 |
-
var currentY = thumb.y;
|
10 |
-
|
11 |
-
AlignIn(thumb, this.innerLeft, this.innerTop, this.innerWidth, this.innerHeight, align);
|
12 |
-
out.x = thumb.x;
|
13 |
-
out.y = thumb.y;
|
14 |
-
|
15 |
-
thumb.x = currentX;
|
16 |
-
thumb.y = currentY;
|
17 |
-
|
18 |
-
return out;
|
19 |
-
}
|
20 |
-
|
21 |
-
var tmpPoint = {};
|
22 |
-
|
23 |
-
export default GetThumbAlignPoint;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlawnCN/webui-docker/oh-no.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
block = gr.Blocks()
|
4 |
-
|
5 |
-
def run():
|
6 |
-
with block:
|
7 |
-
gr.Markdown(
|
8 |
-
"""
|
9 |
-
<p>oh no 😐 something wrong with the 🤗 hugging face servers 😐 hopefully, it will be fixed soon</p>
|
10 |
-
""")
|
11 |
-
block.launch(server_name="0.0.0.0", server_port=7860)
|
12 |
-
|
13 |
-
if __name__ == "__main__":
|
14 |
-
run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ali-Omrani/CCR/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: CCR
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/facerender/sync_batchnorm/batchnorm.py
DELETED
@@ -1,315 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# File : batchnorm.py
|
3 |
-
# Author : Jiayuan Mao
|
4 |
-
# Email : [email protected]
|
5 |
-
# Date : 27/01/2018
|
6 |
-
#
|
7 |
-
# This file is part of Synchronized-BatchNorm-PyTorch.
|
8 |
-
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
|
9 |
-
# Distributed under MIT License.
|
10 |
-
|
11 |
-
import collections
|
12 |
-
|
13 |
-
import torch
|
14 |
-
import torch.nn.functional as F
|
15 |
-
|
16 |
-
from torch.nn.modules.batchnorm import _BatchNorm
|
17 |
-
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
|
18 |
-
|
19 |
-
from .comm import SyncMaster
|
20 |
-
|
21 |
-
__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
|
22 |
-
|
23 |
-
|
24 |
-
def _sum_ft(tensor):
|
25 |
-
"""sum over the first and last dimention"""
|
26 |
-
return tensor.sum(dim=0).sum(dim=-1)
|
27 |
-
|
28 |
-
|
29 |
-
def _unsqueeze_ft(tensor):
|
30 |
-
"""add new dementions at the front and the tail"""
|
31 |
-
return tensor.unsqueeze(0).unsqueeze(-1)
|
32 |
-
|
33 |
-
|
34 |
-
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
|
35 |
-
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
|
36 |
-
|
37 |
-
|
38 |
-
class _SynchronizedBatchNorm(_BatchNorm):
|
39 |
-
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
|
40 |
-
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
|
41 |
-
|
42 |
-
self._sync_master = SyncMaster(self._data_parallel_master)
|
43 |
-
|
44 |
-
self._is_parallel = False
|
45 |
-
self._parallel_id = None
|
46 |
-
self._slave_pipe = None
|
47 |
-
|
48 |
-
def forward(self, input):
|
49 |
-
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
|
50 |
-
if not (self._is_parallel and self.training):
|
51 |
-
return F.batch_norm(
|
52 |
-
input, self.running_mean, self.running_var, self.weight, self.bias,
|
53 |
-
self.training, self.momentum, self.eps)
|
54 |
-
|
55 |
-
# Resize the input to (B, C, -1).
|
56 |
-
input_shape = input.size()
|
57 |
-
input = input.view(input.size(0), self.num_features, -1)
|
58 |
-
|
59 |
-
# Compute the sum and square-sum.
|
60 |
-
sum_size = input.size(0) * input.size(2)
|
61 |
-
input_sum = _sum_ft(input)
|
62 |
-
input_ssum = _sum_ft(input ** 2)
|
63 |
-
|
64 |
-
# Reduce-and-broadcast the statistics.
|
65 |
-
if self._parallel_id == 0:
|
66 |
-
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
|
67 |
-
else:
|
68 |
-
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
|
69 |
-
|
70 |
-
# Compute the output.
|
71 |
-
if self.affine:
|
72 |
-
# MJY:: Fuse the multiplication for speed.
|
73 |
-
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
|
74 |
-
else:
|
75 |
-
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
|
76 |
-
|
77 |
-
# Reshape it.
|
78 |
-
return output.view(input_shape)
|
79 |
-
|
80 |
-
def __data_parallel_replicate__(self, ctx, copy_id):
|
81 |
-
self._is_parallel = True
|
82 |
-
self._parallel_id = copy_id
|
83 |
-
|
84 |
-
# parallel_id == 0 means master device.
|
85 |
-
if self._parallel_id == 0:
|
86 |
-
ctx.sync_master = self._sync_master
|
87 |
-
else:
|
88 |
-
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
|
89 |
-
|
90 |
-
def _data_parallel_master(self, intermediates):
|
91 |
-
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
|
92 |
-
|
93 |
-
# Always using same "device order" makes the ReduceAdd operation faster.
|
94 |
-
# Thanks to:: Tete Xiao (http://tetexiao.com/)
|
95 |
-
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
|
96 |
-
|
97 |
-
to_reduce = [i[1][:2] for i in intermediates]
|
98 |
-
to_reduce = [j for i in to_reduce for j in i] # flatten
|
99 |
-
target_gpus = [i[1].sum.get_device() for i in intermediates]
|
100 |
-
|
101 |
-
sum_size = sum([i[1].sum_size for i in intermediates])
|
102 |
-
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
|
103 |
-
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
|
104 |
-
|
105 |
-
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
|
106 |
-
|
107 |
-
outputs = []
|
108 |
-
for i, rec in enumerate(intermediates):
|
109 |
-
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
|
110 |
-
|
111 |
-
return outputs
|
112 |
-
|
113 |
-
def _compute_mean_std(self, sum_, ssum, size):
|
114 |
-
"""Compute the mean and standard-deviation with sum and square-sum. This method
|
115 |
-
also maintains the moving average on the master device."""
|
116 |
-
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
|
117 |
-
mean = sum_ / size
|
118 |
-
sumvar = ssum - sum_ * mean
|
119 |
-
unbias_var = sumvar / (size - 1)
|
120 |
-
bias_var = sumvar / size
|
121 |
-
|
122 |
-
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
|
123 |
-
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
|
124 |
-
|
125 |
-
return mean, bias_var.clamp(self.eps) ** -0.5
|
126 |
-
|
127 |
-
|
128 |
-
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
|
129 |
-
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
|
130 |
-
mini-batch.
|
131 |
-
|
132 |
-
.. math::
|
133 |
-
|
134 |
-
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
|
135 |
-
|
136 |
-
This module differs from the built-in PyTorch BatchNorm1d as the mean and
|
137 |
-
standard-deviation are reduced across all devices during training.
|
138 |
-
|
139 |
-
For example, when one uses `nn.DataParallel` to wrap the network during
|
140 |
-
training, PyTorch's implementation normalize the tensor on each device using
|
141 |
-
the statistics only on that device, which accelerated the computation and
|
142 |
-
is also easy to implement, but the statistics might be inaccurate.
|
143 |
-
Instead, in this synchronized version, the statistics will be computed
|
144 |
-
over all training samples distributed on multiple devices.
|
145 |
-
|
146 |
-
Note that, for one-GPU or CPU-only case, this module behaves exactly same
|
147 |
-
as the built-in PyTorch implementation.
|
148 |
-
|
149 |
-
The mean and standard-deviation are calculated per-dimension over
|
150 |
-
the mini-batches and gamma and beta are learnable parameter vectors
|
151 |
-
of size C (where C is the input size).
|
152 |
-
|
153 |
-
During training, this layer keeps a running estimate of its computed mean
|
154 |
-
and variance. The running sum is kept with a default momentum of 0.1.
|
155 |
-
|
156 |
-
During evaluation, this running mean/variance is used for normalization.
|
157 |
-
|
158 |
-
Because the BatchNorm is done over the `C` dimension, computing statistics
|
159 |
-
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
|
160 |
-
|
161 |
-
Args:
|
162 |
-
num_features: num_features from an expected input of size
|
163 |
-
`batch_size x num_features [x width]`
|
164 |
-
eps: a value added to the denominator for numerical stability.
|
165 |
-
Default: 1e-5
|
166 |
-
momentum: the value used for the running_mean and running_var
|
167 |
-
computation. Default: 0.1
|
168 |
-
affine: a boolean value that when set to ``True``, gives the layer learnable
|
169 |
-
affine parameters. Default: ``True``
|
170 |
-
|
171 |
-
Shape:
|
172 |
-
- Input: :math:`(N, C)` or :math:`(N, C, L)`
|
173 |
-
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
|
174 |
-
|
175 |
-
Examples:
|
176 |
-
>>> # With Learnable Parameters
|
177 |
-
>>> m = SynchronizedBatchNorm1d(100)
|
178 |
-
>>> # Without Learnable Parameters
|
179 |
-
>>> m = SynchronizedBatchNorm1d(100, affine=False)
|
180 |
-
>>> input = torch.autograd.Variable(torch.randn(20, 100))
|
181 |
-
>>> output = m(input)
|
182 |
-
"""
|
183 |
-
|
184 |
-
def _check_input_dim(self, input):
|
185 |
-
if input.dim() != 2 and input.dim() != 3:
|
186 |
-
raise ValueError('expected 2D or 3D input (got {}D input)'
|
187 |
-
.format(input.dim()))
|
188 |
-
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
|
189 |
-
|
190 |
-
|
191 |
-
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
|
192 |
-
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
|
193 |
-
of 3d inputs
|
194 |
-
|
195 |
-
.. math::
|
196 |
-
|
197 |
-
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
|
198 |
-
|
199 |
-
This module differs from the built-in PyTorch BatchNorm2d as the mean and
|
200 |
-
standard-deviation are reduced across all devices during training.
|
201 |
-
|
202 |
-
For example, when one uses `nn.DataParallel` to wrap the network during
|
203 |
-
training, PyTorch's implementation normalize the tensor on each device using
|
204 |
-
the statistics only on that device, which accelerated the computation and
|
205 |
-
is also easy to implement, but the statistics might be inaccurate.
|
206 |
-
Instead, in this synchronized version, the statistics will be computed
|
207 |
-
over all training samples distributed on multiple devices.
|
208 |
-
|
209 |
-
Note that, for one-GPU or CPU-only case, this module behaves exactly same
|
210 |
-
as the built-in PyTorch implementation.
|
211 |
-
|
212 |
-
The mean and standard-deviation are calculated per-dimension over
|
213 |
-
the mini-batches and gamma and beta are learnable parameter vectors
|
214 |
-
of size C (where C is the input size).
|
215 |
-
|
216 |
-
During training, this layer keeps a running estimate of its computed mean
|
217 |
-
and variance. The running sum is kept with a default momentum of 0.1.
|
218 |
-
|
219 |
-
During evaluation, this running mean/variance is used for normalization.
|
220 |
-
|
221 |
-
Because the BatchNorm is done over the `C` dimension, computing statistics
|
222 |
-
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
|
223 |
-
|
224 |
-
Args:
|
225 |
-
num_features: num_features from an expected input of
|
226 |
-
size batch_size x num_features x height x width
|
227 |
-
eps: a value added to the denominator for numerical stability.
|
228 |
-
Default: 1e-5
|
229 |
-
momentum: the value used for the running_mean and running_var
|
230 |
-
computation. Default: 0.1
|
231 |
-
affine: a boolean value that when set to ``True``, gives the layer learnable
|
232 |
-
affine parameters. Default: ``True``
|
233 |
-
|
234 |
-
Shape:
|
235 |
-
- Input: :math:`(N, C, H, W)`
|
236 |
-
- Output: :math:`(N, C, H, W)` (same shape as input)
|
237 |
-
|
238 |
-
Examples:
|
239 |
-
>>> # With Learnable Parameters
|
240 |
-
>>> m = SynchronizedBatchNorm2d(100)
|
241 |
-
>>> # Without Learnable Parameters
|
242 |
-
>>> m = SynchronizedBatchNorm2d(100, affine=False)
|
243 |
-
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
|
244 |
-
>>> output = m(input)
|
245 |
-
"""
|
246 |
-
|
247 |
-
def _check_input_dim(self, input):
|
248 |
-
if input.dim() != 4:
|
249 |
-
raise ValueError('expected 4D input (got {}D input)'
|
250 |
-
.format(input.dim()))
|
251 |
-
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
|
252 |
-
|
253 |
-
|
254 |
-
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
|
255 |
-
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
|
256 |
-
of 4d inputs
|
257 |
-
|
258 |
-
.. math::
|
259 |
-
|
260 |
-
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
|
261 |
-
|
262 |
-
This module differs from the built-in PyTorch BatchNorm3d as the mean and
|
263 |
-
standard-deviation are reduced across all devices during training.
|
264 |
-
|
265 |
-
For example, when one uses `nn.DataParallel` to wrap the network during
|
266 |
-
training, PyTorch's implementation normalize the tensor on each device using
|
267 |
-
the statistics only on that device, which accelerated the computation and
|
268 |
-
is also easy to implement, but the statistics might be inaccurate.
|
269 |
-
Instead, in this synchronized version, the statistics will be computed
|
270 |
-
over all training samples distributed on multiple devices.
|
271 |
-
|
272 |
-
Note that, for one-GPU or CPU-only case, this module behaves exactly same
|
273 |
-
as the built-in PyTorch implementation.
|
274 |
-
|
275 |
-
The mean and standard-deviation are calculated per-dimension over
|
276 |
-
the mini-batches and gamma and beta are learnable parameter vectors
|
277 |
-
of size C (where C is the input size).
|
278 |
-
|
279 |
-
During training, this layer keeps a running estimate of its computed mean
|
280 |
-
and variance. The running sum is kept with a default momentum of 0.1.
|
281 |
-
|
282 |
-
During evaluation, this running mean/variance is used for normalization.
|
283 |
-
|
284 |
-
Because the BatchNorm is done over the `C` dimension, computing statistics
|
285 |
-
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
|
286 |
-
or Spatio-temporal BatchNorm
|
287 |
-
|
288 |
-
Args:
|
289 |
-
num_features: num_features from an expected input of
|
290 |
-
size batch_size x num_features x depth x height x width
|
291 |
-
eps: a value added to the denominator for numerical stability.
|
292 |
-
Default: 1e-5
|
293 |
-
momentum: the value used for the running_mean and running_var
|
294 |
-
computation. Default: 0.1
|
295 |
-
affine: a boolean value that when set to ``True``, gives the layer learnable
|
296 |
-
affine parameters. Default: ``True``
|
297 |
-
|
298 |
-
Shape:
|
299 |
-
- Input: :math:`(N, C, D, H, W)`
|
300 |
-
- Output: :math:`(N, C, D, H, W)` (same shape as input)
|
301 |
-
|
302 |
-
Examples:
|
303 |
-
>>> # With Learnable Parameters
|
304 |
-
>>> m = SynchronizedBatchNorm3d(100)
|
305 |
-
>>> # Without Learnable Parameters
|
306 |
-
>>> m = SynchronizedBatchNorm3d(100, affine=False)
|
307 |
-
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
|
308 |
-
>>> output = m(input)
|
309 |
-
"""
|
310 |
-
|
311 |
-
def _check_input_dim(self, input):
|
312 |
-
if input.dim() != 5:
|
313 |
-
raise ValueError('expected 5D input (got {}D input)'
|
314 |
-
.format(input.dim()))
|
315 |
-
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/tutorials/tutorial_overview.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Overview
|
14 |
-
|
15 |
-
Welcome to 🧨 Diffusers! If you're new to diffusion models and generative AI, and want to learn more, then you've come to the right place. These beginner-friendly tutorials are designed to provide a gentle introduction to diffusion models and help you understand the library fundamentals - the core components and how 🧨 Diffusers is meant to be used.
|
16 |
-
|
17 |
-
You'll learn how to use a pipeline for inference to rapidly generate things, and then deconstruct that pipeline to really understand how to use the library as a modular toolbox for building your own diffusion systems. In the next lesson, you'll learn how to train your own diffusion model to generate what you want.
|
18 |
-
|
19 |
-
After completing the tutorials, you'll have gained the necessary skills to start exploring the library on your own and see how to use it for your own projects and applications.
|
20 |
-
|
21 |
-
Feel free to join our community on [Discord](https://discord.com/invite/JfAtkvEtRb) or the [forums](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) to connect and collaborate with other users and developers!
|
22 |
-
|
23 |
-
Let's start diffusing! 🧨
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_euler_ancestral.py
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from diffusers import EulerAncestralDiscreteScheduler
|
4 |
-
from diffusers.utils import torch_device
|
5 |
-
|
6 |
-
from .test_schedulers import SchedulerCommonTest
|
7 |
-
|
8 |
-
|
9 |
-
class EulerAncestralDiscreteSchedulerTest(SchedulerCommonTest):
|
10 |
-
scheduler_classes = (EulerAncestralDiscreteScheduler,)
|
11 |
-
num_inference_steps = 10
|
12 |
-
|
13 |
-
def get_scheduler_config(self, **kwargs):
|
14 |
-
config = {
|
15 |
-
"num_train_timesteps": 1100,
|
16 |
-
"beta_start": 0.0001,
|
17 |
-
"beta_end": 0.02,
|
18 |
-
"beta_schedule": "linear",
|
19 |
-
}
|
20 |
-
|
21 |
-
config.update(**kwargs)
|
22 |
-
return config
|
23 |
-
|
24 |
-
def test_timesteps(self):
|
25 |
-
for timesteps in [10, 50, 100, 1000]:
|
26 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
27 |
-
|
28 |
-
def test_betas(self):
|
29 |
-
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
|
30 |
-
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
|
31 |
-
|
32 |
-
def test_schedules(self):
|
33 |
-
for schedule in ["linear", "scaled_linear"]:
|
34 |
-
self.check_over_configs(beta_schedule=schedule)
|
35 |
-
|
36 |
-
def test_prediction_type(self):
|
37 |
-
for prediction_type in ["epsilon", "v_prediction"]:
|
38 |
-
self.check_over_configs(prediction_type=prediction_type)
|
39 |
-
|
40 |
-
def test_full_loop_no_noise(self):
|
41 |
-
scheduler_class = self.scheduler_classes[0]
|
42 |
-
scheduler_config = self.get_scheduler_config()
|
43 |
-
scheduler = scheduler_class(**scheduler_config)
|
44 |
-
|
45 |
-
scheduler.set_timesteps(self.num_inference_steps)
|
46 |
-
|
47 |
-
generator = torch.manual_seed(0)
|
48 |
-
|
49 |
-
model = self.dummy_model()
|
50 |
-
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
|
51 |
-
sample = sample.to(torch_device)
|
52 |
-
|
53 |
-
for i, t in enumerate(scheduler.timesteps):
|
54 |
-
sample = scheduler.scale_model_input(sample, t)
|
55 |
-
|
56 |
-
model_output = model(sample, t)
|
57 |
-
|
58 |
-
output = scheduler.step(model_output, t, sample, generator=generator)
|
59 |
-
sample = output.prev_sample
|
60 |
-
|
61 |
-
result_sum = torch.sum(torch.abs(sample))
|
62 |
-
result_mean = torch.mean(torch.abs(sample))
|
63 |
-
|
64 |
-
assert abs(result_sum.item() - 152.3192) < 1e-2
|
65 |
-
assert abs(result_mean.item() - 0.1983) < 1e-3
|
66 |
-
|
67 |
-
def test_full_loop_with_v_prediction(self):
|
68 |
-
scheduler_class = self.scheduler_classes[0]
|
69 |
-
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
|
70 |
-
scheduler = scheduler_class(**scheduler_config)
|
71 |
-
|
72 |
-
scheduler.set_timesteps(self.num_inference_steps)
|
73 |
-
|
74 |
-
generator = torch.manual_seed(0)
|
75 |
-
|
76 |
-
model = self.dummy_model()
|
77 |
-
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
|
78 |
-
sample = sample.to(torch_device)
|
79 |
-
|
80 |
-
for i, t in enumerate(scheduler.timesteps):
|
81 |
-
sample = scheduler.scale_model_input(sample, t)
|
82 |
-
|
83 |
-
model_output = model(sample, t)
|
84 |
-
|
85 |
-
output = scheduler.step(model_output, t, sample, generator=generator)
|
86 |
-
sample = output.prev_sample
|
87 |
-
|
88 |
-
result_sum = torch.sum(torch.abs(sample))
|
89 |
-
result_mean = torch.mean(torch.abs(sample))
|
90 |
-
|
91 |
-
assert abs(result_sum.item() - 108.4439) < 1e-2
|
92 |
-
assert abs(result_mean.item() - 0.1412) < 1e-3
|
93 |
-
|
94 |
-
def test_full_loop_device(self):
|
95 |
-
scheduler_class = self.scheduler_classes[0]
|
96 |
-
scheduler_config = self.get_scheduler_config()
|
97 |
-
scheduler = scheduler_class(**scheduler_config)
|
98 |
-
|
99 |
-
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
|
100 |
-
generator = torch.manual_seed(0)
|
101 |
-
|
102 |
-
model = self.dummy_model()
|
103 |
-
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
|
104 |
-
sample = sample.to(torch_device)
|
105 |
-
|
106 |
-
for t in scheduler.timesteps:
|
107 |
-
sample = scheduler.scale_model_input(sample, t)
|
108 |
-
|
109 |
-
model_output = model(sample, t)
|
110 |
-
|
111 |
-
output = scheduler.step(model_output, t, sample, generator=generator)
|
112 |
-
sample = output.prev_sample
|
113 |
-
|
114 |
-
result_sum = torch.sum(torch.abs(sample))
|
115 |
-
result_mean = torch.mean(torch.abs(sample))
|
116 |
-
|
117 |
-
assert abs(result_sum.item() - 152.3192) < 1e-2
|
118 |
-
assert abs(result_mean.item() - 0.1983) < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/mask_rcnn_r50_caffe_c4.py',
|
3 |
-
'../_base_/datasets/coco_instance.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
# use caffe img_norm
|
7 |
-
img_norm_cfg = dict(
|
8 |
-
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
|
9 |
-
train_pipeline = [
|
10 |
-
dict(type='LoadImageFromFile'),
|
11 |
-
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
|
12 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
13 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
14 |
-
dict(type='Normalize', **img_norm_cfg),
|
15 |
-
dict(type='Pad', size_divisor=32),
|
16 |
-
dict(type='DefaultFormatBundle'),
|
17 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
|
18 |
-
]
|
19 |
-
test_pipeline = [
|
20 |
-
dict(type='LoadImageFromFile'),
|
21 |
-
dict(
|
22 |
-
type='MultiScaleFlipAug',
|
23 |
-
img_scale=(1333, 800),
|
24 |
-
flip=False,
|
25 |
-
transforms=[
|
26 |
-
dict(type='Resize', keep_ratio=True),
|
27 |
-
dict(type='RandomFlip'),
|
28 |
-
dict(type='Normalize', **img_norm_cfg),
|
29 |
-
dict(type='Pad', size_divisor=32),
|
30 |
-
dict(type='ImageToTensor', keys=['img']),
|
31 |
-
dict(type='Collect', keys=['img']),
|
32 |
-
])
|
33 |
-
]
|
34 |
-
data = dict(
|
35 |
-
train=dict(pipeline=train_pipeline),
|
36 |
-
val=dict(pipeline=test_pipeline),
|
37 |
-
test=dict(pipeline=test_pipeline))
|
38 |
-
# optimizer
|
39 |
-
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/pisa_retinanet_head.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from mmcv.runner import force_fp32
|
3 |
-
|
4 |
-
from mmdet.core import images_to_levels
|
5 |
-
from ..builder import HEADS
|
6 |
-
from ..losses import carl_loss, isr_p
|
7 |
-
from .retina_head import RetinaHead
|
8 |
-
|
9 |
-
|
10 |
-
@HEADS.register_module()
|
11 |
-
class PISARetinaHead(RetinaHead):
|
12 |
-
"""PISA Retinanet Head.
|
13 |
-
|
14 |
-
The head owns the same structure with Retinanet Head, but differs in two
|
15 |
-
aspects:
|
16 |
-
1. Importance-based Sample Reweighting Positive (ISR-P) is applied to
|
17 |
-
change the positive loss weights.
|
18 |
-
2. Classification-aware regression loss is adopted as a third loss.
|
19 |
-
"""
|
20 |
-
|
21 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
|
22 |
-
def loss(self,
|
23 |
-
cls_scores,
|
24 |
-
bbox_preds,
|
25 |
-
gt_bboxes,
|
26 |
-
gt_labels,
|
27 |
-
img_metas,
|
28 |
-
gt_bboxes_ignore=None):
|
29 |
-
"""Compute losses of the head.
|
30 |
-
|
31 |
-
Args:
|
32 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
33 |
-
Has shape (N, num_anchors * num_classes, H, W)
|
34 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
35 |
-
level with shape (N, num_anchors * 4, H, W)
|
36 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes of each image
|
37 |
-
with shape (num_obj, 4).
|
38 |
-
gt_labels (list[Tensor]): Ground truth labels of each image
|
39 |
-
with shape (num_obj, 4).
|
40 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
41 |
-
image size, scaling factor, etc.
|
42 |
-
gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
|
43 |
-
Default: None.
|
44 |
-
|
45 |
-
Returns:
|
46 |
-
dict: Loss dict, comprise classification loss, regression loss and
|
47 |
-
carl loss.
|
48 |
-
"""
|
49 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
50 |
-
assert len(featmap_sizes) == self.anchor_generator.num_levels
|
51 |
-
|
52 |
-
device = cls_scores[0].device
|
53 |
-
|
54 |
-
anchor_list, valid_flag_list = self.get_anchors(
|
55 |
-
featmap_sizes, img_metas, device=device)
|
56 |
-
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
|
57 |
-
cls_reg_targets = self.get_targets(
|
58 |
-
anchor_list,
|
59 |
-
valid_flag_list,
|
60 |
-
gt_bboxes,
|
61 |
-
img_metas,
|
62 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
63 |
-
gt_labels_list=gt_labels,
|
64 |
-
label_channels=label_channels,
|
65 |
-
return_sampling_results=True)
|
66 |
-
if cls_reg_targets is None:
|
67 |
-
return None
|
68 |
-
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
|
69 |
-
num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
|
70 |
-
num_total_samples = (
|
71 |
-
num_total_pos + num_total_neg if self.sampling else num_total_pos)
|
72 |
-
|
73 |
-
# anchor number of multi levels
|
74 |
-
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
|
75 |
-
# concat all level anchors and flags to a single tensor
|
76 |
-
concat_anchor_list = []
|
77 |
-
for i in range(len(anchor_list)):
|
78 |
-
concat_anchor_list.append(torch.cat(anchor_list[i]))
|
79 |
-
all_anchor_list = images_to_levels(concat_anchor_list,
|
80 |
-
num_level_anchors)
|
81 |
-
|
82 |
-
num_imgs = len(img_metas)
|
83 |
-
flatten_cls_scores = [
|
84 |
-
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)
|
85 |
-
for cls_score in cls_scores
|
86 |
-
]
|
87 |
-
flatten_cls_scores = torch.cat(
|
88 |
-
flatten_cls_scores, dim=1).reshape(-1,
|
89 |
-
flatten_cls_scores[0].size(-1))
|
90 |
-
flatten_bbox_preds = [
|
91 |
-
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
|
92 |
-
for bbox_pred in bbox_preds
|
93 |
-
]
|
94 |
-
flatten_bbox_preds = torch.cat(
|
95 |
-
flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))
|
96 |
-
flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)
|
97 |
-
flatten_label_weights = torch.cat(
|
98 |
-
label_weights_list, dim=1).reshape(-1)
|
99 |
-
flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)
|
100 |
-
flatten_bbox_targets = torch.cat(
|
101 |
-
bbox_targets_list, dim=1).reshape(-1, 4)
|
102 |
-
flatten_bbox_weights = torch.cat(
|
103 |
-
bbox_weights_list, dim=1).reshape(-1, 4)
|
104 |
-
|
105 |
-
# Apply ISR-P
|
106 |
-
isr_cfg = self.train_cfg.get('isr', None)
|
107 |
-
if isr_cfg is not None:
|
108 |
-
all_targets = (flatten_labels, flatten_label_weights,
|
109 |
-
flatten_bbox_targets, flatten_bbox_weights)
|
110 |
-
with torch.no_grad():
|
111 |
-
all_targets = isr_p(
|
112 |
-
flatten_cls_scores,
|
113 |
-
flatten_bbox_preds,
|
114 |
-
all_targets,
|
115 |
-
flatten_anchors,
|
116 |
-
sampling_results_list,
|
117 |
-
bbox_coder=self.bbox_coder,
|
118 |
-
loss_cls=self.loss_cls,
|
119 |
-
num_class=self.num_classes,
|
120 |
-
**self.train_cfg.isr)
|
121 |
-
(flatten_labels, flatten_label_weights, flatten_bbox_targets,
|
122 |
-
flatten_bbox_weights) = all_targets
|
123 |
-
|
124 |
-
# For convenience we compute loss once instead separating by fpn level,
|
125 |
-
# so that we don't need to separate the weights by level again.
|
126 |
-
# The result should be the same
|
127 |
-
losses_cls = self.loss_cls(
|
128 |
-
flatten_cls_scores,
|
129 |
-
flatten_labels,
|
130 |
-
flatten_label_weights,
|
131 |
-
avg_factor=num_total_samples)
|
132 |
-
losses_bbox = self.loss_bbox(
|
133 |
-
flatten_bbox_preds,
|
134 |
-
flatten_bbox_targets,
|
135 |
-
flatten_bbox_weights,
|
136 |
-
avg_factor=num_total_samples)
|
137 |
-
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
|
138 |
-
|
139 |
-
# CARL Loss
|
140 |
-
carl_cfg = self.train_cfg.get('carl', None)
|
141 |
-
if carl_cfg is not None:
|
142 |
-
loss_carl = carl_loss(
|
143 |
-
flatten_cls_scores,
|
144 |
-
flatten_labels,
|
145 |
-
flatten_bbox_preds,
|
146 |
-
flatten_bbox_targets,
|
147 |
-
self.loss_bbox,
|
148 |
-
**self.train_cfg.carl,
|
149 |
-
avg_factor=num_total_pos,
|
150 |
-
sigmoid=True,
|
151 |
-
num_class=self.num_classes)
|
152 |
-
loss_dict.update(loss_carl)
|
153 |
-
|
154 |
-
return loss_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3_r50-d8_512x1024_40k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnet101_v1c',
|
4 |
-
backbone=dict(
|
5 |
-
depth=101,
|
6 |
-
dilations=(1, 1, 1, 2),
|
7 |
-
strides=(1, 2, 2, 1),
|
8 |
-
multi_grid=(1, 2, 4)),
|
9 |
-
decode_head=dict(
|
10 |
-
dilations=(1, 6, 12, 18),
|
11 |
-
sampler=dict(type='OHEMPixelSampler', min_kept=100000)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnnonSubmission/xai-cl/utils.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import numpy as np
|
5 |
-
from PIL import Image
|
6 |
-
import random
|
7 |
-
import cv2
|
8 |
-
import io
|
9 |
-
from ssl_models.simclr2 import get_simclr2_model
|
10 |
-
from ssl_models.barlow_twins import get_barlow_twins_model
|
11 |
-
from ssl_models.simsiam import get_simsiam
|
12 |
-
from ssl_models.dino import get_dino_model_without_loss, get_dino_model_with_loss
|
13 |
-
|
14 |
-
def get_ssl_model(network, variant):
|
15 |
-
|
16 |
-
if network == 'simclrv2':
|
17 |
-
if variant == '1x':
|
18 |
-
ssl_model = get_simclr2_model('r50_1x_sk0_ema.pth').eval()
|
19 |
-
else:
|
20 |
-
ssl_model = get_simclr2_model('r50_2x_sk0_ema.pth').eval()
|
21 |
-
elif network == 'barlow_twins':
|
22 |
-
ssl_model = get_barlow_twins_model().eval()
|
23 |
-
elif network == 'simsiam':
|
24 |
-
ssl_model = get_simsiam().eval()
|
25 |
-
elif network == 'dino':
|
26 |
-
ssl_model = get_dino_model_without_loss().eval()
|
27 |
-
elif network == 'dino+loss':
|
28 |
-
ssl_model, dino_score = get_dino_model_with_loss()
|
29 |
-
ssl_model = ssl_model.eval()
|
30 |
-
|
31 |
-
return ssl_model
|
32 |
-
|
33 |
-
def overlay_heatmap(img, heatmap, denormalize = False):
|
34 |
-
loaded_img = img.squeeze(0).cpu().numpy().transpose((1, 2, 0))
|
35 |
-
|
36 |
-
if denormalize:
|
37 |
-
mean = np.array([0.485, 0.456, 0.406])
|
38 |
-
std = np.array([0.229, 0.224, 0.225])
|
39 |
-
loaded_img = std * loaded_img + mean
|
40 |
-
|
41 |
-
loaded_img = (loaded_img.clip(0, 1) * 255).astype(np.uint8)
|
42 |
-
cam = heatmap / heatmap.max()
|
43 |
-
cam = cv2.resize(cam, (224, 224))
|
44 |
-
cam = np.uint8(255 * cam)
|
45 |
-
cam = cv2.applyColorMap(cam, cv2.COLORMAP_JET) # jet: blue --> red
|
46 |
-
cam = cv2.cvtColor(cam, cv2.COLOR_BGR2RGB)
|
47 |
-
added_image = cv2.addWeighted(cam, 0.5, loaded_img, 0.5, 0)
|
48 |
-
return added_image
|
49 |
-
|
50 |
-
def viz_map(img_path, heatmap):
|
51 |
-
"For pixel invariance"
|
52 |
-
img = np.array(Image.open(img_path).resize((224,224))) if isinstance(img_path, str) else np.array(img_path.resize((224,224)))
|
53 |
-
width, height, _ = img.shape
|
54 |
-
cam = heatmap.detach().cpu().numpy()
|
55 |
-
cam = cam / cam.max()
|
56 |
-
cam = cv2.resize(cam, (height, width))
|
57 |
-
heatmap = np.uint8(255 * cam)
|
58 |
-
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
|
59 |
-
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
|
60 |
-
added_image = cv2.addWeighted(heatmap, 0.5, img, 0.7, 0)
|
61 |
-
return added_image
|
62 |
-
|
63 |
-
def show_image(x, squeeze = True, denormalize = False):
|
64 |
-
|
65 |
-
if squeeze:
|
66 |
-
x = x.squeeze(0)
|
67 |
-
|
68 |
-
x = x.cpu().numpy().transpose((1, 2, 0))
|
69 |
-
|
70 |
-
if denormalize:
|
71 |
-
mean = np.array([0.485, 0.456, 0.406])
|
72 |
-
std = np.array([0.229, 0.224, 0.225])
|
73 |
-
x = std * x + mean
|
74 |
-
|
75 |
-
return x.clip(0, 1)
|
76 |
-
|
77 |
-
def deprocess(inp, to_numpy = True, to_PIL = False, denormalize = False):
|
78 |
-
|
79 |
-
if to_numpy:
|
80 |
-
inp = inp.detach().cpu().numpy()
|
81 |
-
|
82 |
-
inp = inp.squeeze(0).transpose((1, 2, 0))
|
83 |
-
|
84 |
-
if denormalize:
|
85 |
-
mean = np.array([0.485, 0.456, 0.406])
|
86 |
-
std = np.array([0.229, 0.224, 0.225])
|
87 |
-
inp = std * inp + mean
|
88 |
-
|
89 |
-
inp = (inp.clip(0, 1) * 255).astype(np.uint8)
|
90 |
-
|
91 |
-
if to_PIL:
|
92 |
-
return Image.fromarray(inp)
|
93 |
-
return inp
|
94 |
-
|
95 |
-
def fig2img(fig):
|
96 |
-
"""Convert a Matplotlib figure to a PIL Image and return it"""
|
97 |
-
buf = io.BytesIO()
|
98 |
-
fig.savefig(buf, bbox_inches='tight', pad_inches=0)
|
99 |
-
buf.seek(0)
|
100 |
-
img = Image.open(buf)
|
101 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/vit.py
DELETED
@@ -1,491 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import timm
|
4 |
-
import types
|
5 |
-
import math
|
6 |
-
import torch.nn.functional as F
|
7 |
-
|
8 |
-
|
9 |
-
class Slice(nn.Module):
|
10 |
-
def __init__(self, start_index=1):
|
11 |
-
super(Slice, self).__init__()
|
12 |
-
self.start_index = start_index
|
13 |
-
|
14 |
-
def forward(self, x):
|
15 |
-
return x[:, self.start_index :]
|
16 |
-
|
17 |
-
|
18 |
-
class AddReadout(nn.Module):
|
19 |
-
def __init__(self, start_index=1):
|
20 |
-
super(AddReadout, self).__init__()
|
21 |
-
self.start_index = start_index
|
22 |
-
|
23 |
-
def forward(self, x):
|
24 |
-
if self.start_index == 2:
|
25 |
-
readout = (x[:, 0] + x[:, 1]) / 2
|
26 |
-
else:
|
27 |
-
readout = x[:, 0]
|
28 |
-
return x[:, self.start_index :] + readout.unsqueeze(1)
|
29 |
-
|
30 |
-
|
31 |
-
class ProjectReadout(nn.Module):
|
32 |
-
def __init__(self, in_features, start_index=1):
|
33 |
-
super(ProjectReadout, self).__init__()
|
34 |
-
self.start_index = start_index
|
35 |
-
|
36 |
-
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
|
37 |
-
|
38 |
-
def forward(self, x):
|
39 |
-
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
|
40 |
-
features = torch.cat((x[:, self.start_index :], readout), -1)
|
41 |
-
|
42 |
-
return self.project(features)
|
43 |
-
|
44 |
-
|
45 |
-
class Transpose(nn.Module):
|
46 |
-
def __init__(self, dim0, dim1):
|
47 |
-
super(Transpose, self).__init__()
|
48 |
-
self.dim0 = dim0
|
49 |
-
self.dim1 = dim1
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
x = x.transpose(self.dim0, self.dim1)
|
53 |
-
return x
|
54 |
-
|
55 |
-
|
56 |
-
def forward_vit(pretrained, x):
|
57 |
-
b, c, h, w = x.shape
|
58 |
-
|
59 |
-
glob = pretrained.model.forward_flex(x)
|
60 |
-
|
61 |
-
layer_1 = pretrained.activations["1"]
|
62 |
-
layer_2 = pretrained.activations["2"]
|
63 |
-
layer_3 = pretrained.activations["3"]
|
64 |
-
layer_4 = pretrained.activations["4"]
|
65 |
-
|
66 |
-
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
|
67 |
-
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
|
68 |
-
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
|
69 |
-
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
|
70 |
-
|
71 |
-
unflatten = nn.Sequential(
|
72 |
-
nn.Unflatten(
|
73 |
-
2,
|
74 |
-
torch.Size(
|
75 |
-
[
|
76 |
-
h // pretrained.model.patch_size[1],
|
77 |
-
w // pretrained.model.patch_size[0],
|
78 |
-
]
|
79 |
-
),
|
80 |
-
)
|
81 |
-
)
|
82 |
-
|
83 |
-
if layer_1.ndim == 3:
|
84 |
-
layer_1 = unflatten(layer_1)
|
85 |
-
if layer_2.ndim == 3:
|
86 |
-
layer_2 = unflatten(layer_2)
|
87 |
-
if layer_3.ndim == 3:
|
88 |
-
layer_3 = unflatten(layer_3)
|
89 |
-
if layer_4.ndim == 3:
|
90 |
-
layer_4 = unflatten(layer_4)
|
91 |
-
|
92 |
-
layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
|
93 |
-
layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
|
94 |
-
layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
|
95 |
-
layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
|
96 |
-
|
97 |
-
return layer_1, layer_2, layer_3, layer_4
|
98 |
-
|
99 |
-
|
100 |
-
def _resize_pos_embed(self, posemb, gs_h, gs_w):
|
101 |
-
posemb_tok, posemb_grid = (
|
102 |
-
posemb[:, : self.start_index],
|
103 |
-
posemb[0, self.start_index :],
|
104 |
-
)
|
105 |
-
|
106 |
-
gs_old = int(math.sqrt(len(posemb_grid)))
|
107 |
-
|
108 |
-
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
|
109 |
-
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
|
110 |
-
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
|
111 |
-
|
112 |
-
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
|
113 |
-
|
114 |
-
return posemb
|
115 |
-
|
116 |
-
|
117 |
-
def forward_flex(self, x):
|
118 |
-
b, c, h, w = x.shape
|
119 |
-
|
120 |
-
pos_embed = self._resize_pos_embed(
|
121 |
-
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
|
122 |
-
)
|
123 |
-
|
124 |
-
B = x.shape[0]
|
125 |
-
|
126 |
-
if hasattr(self.patch_embed, "backbone"):
|
127 |
-
x = self.patch_embed.backbone(x)
|
128 |
-
if isinstance(x, (list, tuple)):
|
129 |
-
x = x[-1] # last feature if backbone outputs list/tuple of features
|
130 |
-
|
131 |
-
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
|
132 |
-
|
133 |
-
if getattr(self, "dist_token", None) is not None:
|
134 |
-
cls_tokens = self.cls_token.expand(
|
135 |
-
B, -1, -1
|
136 |
-
) # stole cls_tokens impl from Phil Wang, thanks
|
137 |
-
dist_token = self.dist_token.expand(B, -1, -1)
|
138 |
-
x = torch.cat((cls_tokens, dist_token, x), dim=1)
|
139 |
-
else:
|
140 |
-
cls_tokens = self.cls_token.expand(
|
141 |
-
B, -1, -1
|
142 |
-
) # stole cls_tokens impl from Phil Wang, thanks
|
143 |
-
x = torch.cat((cls_tokens, x), dim=1)
|
144 |
-
|
145 |
-
x = x + pos_embed
|
146 |
-
x = self.pos_drop(x)
|
147 |
-
|
148 |
-
for blk in self.blocks:
|
149 |
-
x = blk(x)
|
150 |
-
|
151 |
-
x = self.norm(x)
|
152 |
-
|
153 |
-
return x
|
154 |
-
|
155 |
-
|
156 |
-
activations = {}
|
157 |
-
|
158 |
-
|
159 |
-
def get_activation(name):
|
160 |
-
def hook(model, input, output):
|
161 |
-
activations[name] = output
|
162 |
-
|
163 |
-
return hook
|
164 |
-
|
165 |
-
|
166 |
-
def get_readout_oper(vit_features, features, use_readout, start_index=1):
|
167 |
-
if use_readout == "ignore":
|
168 |
-
readout_oper = [Slice(start_index)] * len(features)
|
169 |
-
elif use_readout == "add":
|
170 |
-
readout_oper = [AddReadout(start_index)] * len(features)
|
171 |
-
elif use_readout == "project":
|
172 |
-
readout_oper = [
|
173 |
-
ProjectReadout(vit_features, start_index) for out_feat in features
|
174 |
-
]
|
175 |
-
else:
|
176 |
-
assert (
|
177 |
-
False
|
178 |
-
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
|
179 |
-
|
180 |
-
return readout_oper
|
181 |
-
|
182 |
-
|
183 |
-
def _make_vit_b16_backbone(
|
184 |
-
model,
|
185 |
-
features=[96, 192, 384, 768],
|
186 |
-
size=[384, 384],
|
187 |
-
hooks=[2, 5, 8, 11],
|
188 |
-
vit_features=768,
|
189 |
-
use_readout="ignore",
|
190 |
-
start_index=1,
|
191 |
-
):
|
192 |
-
pretrained = nn.Module()
|
193 |
-
|
194 |
-
pretrained.model = model
|
195 |
-
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
196 |
-
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
197 |
-
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
198 |
-
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
199 |
-
|
200 |
-
pretrained.activations = activations
|
201 |
-
|
202 |
-
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
203 |
-
|
204 |
-
# 32, 48, 136, 384
|
205 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
206 |
-
readout_oper[0],
|
207 |
-
Transpose(1, 2),
|
208 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
209 |
-
nn.Conv2d(
|
210 |
-
in_channels=vit_features,
|
211 |
-
out_channels=features[0],
|
212 |
-
kernel_size=1,
|
213 |
-
stride=1,
|
214 |
-
padding=0,
|
215 |
-
),
|
216 |
-
nn.ConvTranspose2d(
|
217 |
-
in_channels=features[0],
|
218 |
-
out_channels=features[0],
|
219 |
-
kernel_size=4,
|
220 |
-
stride=4,
|
221 |
-
padding=0,
|
222 |
-
bias=True,
|
223 |
-
dilation=1,
|
224 |
-
groups=1,
|
225 |
-
),
|
226 |
-
)
|
227 |
-
|
228 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
229 |
-
readout_oper[1],
|
230 |
-
Transpose(1, 2),
|
231 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
232 |
-
nn.Conv2d(
|
233 |
-
in_channels=vit_features,
|
234 |
-
out_channels=features[1],
|
235 |
-
kernel_size=1,
|
236 |
-
stride=1,
|
237 |
-
padding=0,
|
238 |
-
),
|
239 |
-
nn.ConvTranspose2d(
|
240 |
-
in_channels=features[1],
|
241 |
-
out_channels=features[1],
|
242 |
-
kernel_size=2,
|
243 |
-
stride=2,
|
244 |
-
padding=0,
|
245 |
-
bias=True,
|
246 |
-
dilation=1,
|
247 |
-
groups=1,
|
248 |
-
),
|
249 |
-
)
|
250 |
-
|
251 |
-
pretrained.act_postprocess3 = nn.Sequential(
|
252 |
-
readout_oper[2],
|
253 |
-
Transpose(1, 2),
|
254 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
255 |
-
nn.Conv2d(
|
256 |
-
in_channels=vit_features,
|
257 |
-
out_channels=features[2],
|
258 |
-
kernel_size=1,
|
259 |
-
stride=1,
|
260 |
-
padding=0,
|
261 |
-
),
|
262 |
-
)
|
263 |
-
|
264 |
-
pretrained.act_postprocess4 = nn.Sequential(
|
265 |
-
readout_oper[3],
|
266 |
-
Transpose(1, 2),
|
267 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
268 |
-
nn.Conv2d(
|
269 |
-
in_channels=vit_features,
|
270 |
-
out_channels=features[3],
|
271 |
-
kernel_size=1,
|
272 |
-
stride=1,
|
273 |
-
padding=0,
|
274 |
-
),
|
275 |
-
nn.Conv2d(
|
276 |
-
in_channels=features[3],
|
277 |
-
out_channels=features[3],
|
278 |
-
kernel_size=3,
|
279 |
-
stride=2,
|
280 |
-
padding=1,
|
281 |
-
),
|
282 |
-
)
|
283 |
-
|
284 |
-
pretrained.model.start_index = start_index
|
285 |
-
pretrained.model.patch_size = [16, 16]
|
286 |
-
|
287 |
-
# We inject this function into the VisionTransformer instances so that
|
288 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
289 |
-
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
290 |
-
pretrained.model._resize_pos_embed = types.MethodType(
|
291 |
-
_resize_pos_embed, pretrained.model
|
292 |
-
)
|
293 |
-
|
294 |
-
return pretrained
|
295 |
-
|
296 |
-
|
297 |
-
def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
|
298 |
-
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
|
299 |
-
|
300 |
-
hooks = [5, 11, 17, 23] if hooks == None else hooks
|
301 |
-
return _make_vit_b16_backbone(
|
302 |
-
model,
|
303 |
-
features=[256, 512, 1024, 1024],
|
304 |
-
hooks=hooks,
|
305 |
-
vit_features=1024,
|
306 |
-
use_readout=use_readout,
|
307 |
-
)
|
308 |
-
|
309 |
-
|
310 |
-
def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
|
311 |
-
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
|
312 |
-
|
313 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
314 |
-
return _make_vit_b16_backbone(
|
315 |
-
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
316 |
-
)
|
317 |
-
|
318 |
-
|
319 |
-
def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
|
320 |
-
model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
|
321 |
-
|
322 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
323 |
-
return _make_vit_b16_backbone(
|
324 |
-
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
325 |
-
)
|
326 |
-
|
327 |
-
|
328 |
-
def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
|
329 |
-
model = timm.create_model(
|
330 |
-
"vit_deit_base_distilled_patch16_384", pretrained=pretrained
|
331 |
-
)
|
332 |
-
|
333 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
334 |
-
return _make_vit_b16_backbone(
|
335 |
-
model,
|
336 |
-
features=[96, 192, 384, 768],
|
337 |
-
hooks=hooks,
|
338 |
-
use_readout=use_readout,
|
339 |
-
start_index=2,
|
340 |
-
)
|
341 |
-
|
342 |
-
|
343 |
-
def _make_vit_b_rn50_backbone(
|
344 |
-
model,
|
345 |
-
features=[256, 512, 768, 768],
|
346 |
-
size=[384, 384],
|
347 |
-
hooks=[0, 1, 8, 11],
|
348 |
-
vit_features=768,
|
349 |
-
use_vit_only=False,
|
350 |
-
use_readout="ignore",
|
351 |
-
start_index=1,
|
352 |
-
):
|
353 |
-
pretrained = nn.Module()
|
354 |
-
|
355 |
-
pretrained.model = model
|
356 |
-
|
357 |
-
if use_vit_only == True:
|
358 |
-
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
359 |
-
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
360 |
-
else:
|
361 |
-
pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
|
362 |
-
get_activation("1")
|
363 |
-
)
|
364 |
-
pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
|
365 |
-
get_activation("2")
|
366 |
-
)
|
367 |
-
|
368 |
-
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
369 |
-
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
370 |
-
|
371 |
-
pretrained.activations = activations
|
372 |
-
|
373 |
-
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
374 |
-
|
375 |
-
if use_vit_only == True:
|
376 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
377 |
-
readout_oper[0],
|
378 |
-
Transpose(1, 2),
|
379 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
380 |
-
nn.Conv2d(
|
381 |
-
in_channels=vit_features,
|
382 |
-
out_channels=features[0],
|
383 |
-
kernel_size=1,
|
384 |
-
stride=1,
|
385 |
-
padding=0,
|
386 |
-
),
|
387 |
-
nn.ConvTranspose2d(
|
388 |
-
in_channels=features[0],
|
389 |
-
out_channels=features[0],
|
390 |
-
kernel_size=4,
|
391 |
-
stride=4,
|
392 |
-
padding=0,
|
393 |
-
bias=True,
|
394 |
-
dilation=1,
|
395 |
-
groups=1,
|
396 |
-
),
|
397 |
-
)
|
398 |
-
|
399 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
400 |
-
readout_oper[1],
|
401 |
-
Transpose(1, 2),
|
402 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
403 |
-
nn.Conv2d(
|
404 |
-
in_channels=vit_features,
|
405 |
-
out_channels=features[1],
|
406 |
-
kernel_size=1,
|
407 |
-
stride=1,
|
408 |
-
padding=0,
|
409 |
-
),
|
410 |
-
nn.ConvTranspose2d(
|
411 |
-
in_channels=features[1],
|
412 |
-
out_channels=features[1],
|
413 |
-
kernel_size=2,
|
414 |
-
stride=2,
|
415 |
-
padding=0,
|
416 |
-
bias=True,
|
417 |
-
dilation=1,
|
418 |
-
groups=1,
|
419 |
-
),
|
420 |
-
)
|
421 |
-
else:
|
422 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
423 |
-
nn.Identity(), nn.Identity(), nn.Identity()
|
424 |
-
)
|
425 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
426 |
-
nn.Identity(), nn.Identity(), nn.Identity()
|
427 |
-
)
|
428 |
-
|
429 |
-
pretrained.act_postprocess3 = nn.Sequential(
|
430 |
-
readout_oper[2],
|
431 |
-
Transpose(1, 2),
|
432 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
433 |
-
nn.Conv2d(
|
434 |
-
in_channels=vit_features,
|
435 |
-
out_channels=features[2],
|
436 |
-
kernel_size=1,
|
437 |
-
stride=1,
|
438 |
-
padding=0,
|
439 |
-
),
|
440 |
-
)
|
441 |
-
|
442 |
-
pretrained.act_postprocess4 = nn.Sequential(
|
443 |
-
readout_oper[3],
|
444 |
-
Transpose(1, 2),
|
445 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
446 |
-
nn.Conv2d(
|
447 |
-
in_channels=vit_features,
|
448 |
-
out_channels=features[3],
|
449 |
-
kernel_size=1,
|
450 |
-
stride=1,
|
451 |
-
padding=0,
|
452 |
-
),
|
453 |
-
nn.Conv2d(
|
454 |
-
in_channels=features[3],
|
455 |
-
out_channels=features[3],
|
456 |
-
kernel_size=3,
|
457 |
-
stride=2,
|
458 |
-
padding=1,
|
459 |
-
),
|
460 |
-
)
|
461 |
-
|
462 |
-
pretrained.model.start_index = start_index
|
463 |
-
pretrained.model.patch_size = [16, 16]
|
464 |
-
|
465 |
-
# We inject this function into the VisionTransformer instances so that
|
466 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
467 |
-
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
468 |
-
|
469 |
-
# We inject this function into the VisionTransformer instances so that
|
470 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
471 |
-
pretrained.model._resize_pos_embed = types.MethodType(
|
472 |
-
_resize_pos_embed, pretrained.model
|
473 |
-
)
|
474 |
-
|
475 |
-
return pretrained
|
476 |
-
|
477 |
-
|
478 |
-
def _make_pretrained_vitb_rn50_384(
|
479 |
-
pretrained, use_readout="ignore", hooks=None, use_vit_only=False
|
480 |
-
):
|
481 |
-
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
|
482 |
-
|
483 |
-
hooks = [0, 1, 8, 11] if hooks == None else hooks
|
484 |
-
return _make_vit_b_rn50_backbone(
|
485 |
-
model,
|
486 |
-
features=[256, 512, 768, 768],
|
487 |
-
size=[384, 384],
|
488 |
-
hooks=hooks,
|
489 |
-
use_vit_only=use_vit_only,
|
490 |
-
use_readout=use_readout,
|
491 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/GODROOP/roop/predictor.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import threading
|
2 |
-
import numpy
|
3 |
-
from PIL import Image
|
4 |
-
|
5 |
-
from roop.typing import Frame
|
6 |
-
|
7 |
-
# Define any other necessary variables or constants here
|
8 |
-
|
9 |
-
def predict_frame(target_frame: Frame) -> bool:
|
10 |
-
# Modify this function as needed for your specific use case, without NSFW prediction
|
11 |
-
# For example, you can implement custom image analysis or processing here
|
12 |
-
return False
|
13 |
-
|
14 |
-
def predict_image(target_path: str) -> bool:
|
15 |
-
# Modify this function as needed for your specific use case, without NSFW prediction
|
16 |
-
# For example, you can check the image based on your application's requirements
|
17 |
-
return False
|
18 |
-
|
19 |
-
def predict_video(target_path: str) -> bool:
|
20 |
-
# Modify this function as needed for your specific use case, without NSFW prediction
|
21 |
-
# For example, you can analyze video frames for other purposes
|
22 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArcanAlt/arcanDream/server.js
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
const express = require('express');
|
2 |
-
const proxy = require('express-http-proxy');
|
3 |
-
const app = express();
|
4 |
-
const targetUrl = 'https://api.openai.com';
|
5 |
-
const openaiKey = process.env.OPENAI_KEY
|
6 |
-
const port = 7860;
|
7 |
-
const baseUrl = getExternalUrl(process.env.SPACE_ID);
|
8 |
-
|
9 |
-
app.use('/api', proxy(targetUrl, {
|
10 |
-
proxyReqOptDecorator: (proxyReqOpts, srcReq) => {
|
11 |
-
// Modify the request headers if necessary
|
12 |
-
proxyReqOpts.headers['Authorization'] = 'Bearer '+openaiKey;
|
13 |
-
return proxyReqOpts;
|
14 |
-
},
|
15 |
-
}));
|
16 |
-
|
17 |
-
app.get("/", (req, res) => {
|
18 |
-
res.send(`This is your OpenAI Reverse Proxy URL: ${baseUrl}`);
|
19 |
-
});
|
20 |
-
|
21 |
-
function getExternalUrl(spaceId) {
|
22 |
-
try {
|
23 |
-
const [username, spacename] = spaceId.split("/");
|
24 |
-
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space/api/v1`;
|
25 |
-
} catch (e) {
|
26 |
-
return "";
|
27 |
-
}
|
28 |
-
}
|
29 |
-
|
30 |
-
app.listen(port, () => {
|
31 |
-
console.log(`Reverse proxy server running on ${baseUrl}`);
|
32 |
-
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/scope.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
from collections.abc import Mapping
|
2 |
-
from typing import TYPE_CHECKING, Any, Optional, Tuple
|
3 |
-
|
4 |
-
from .highlighter import ReprHighlighter
|
5 |
-
from .panel import Panel
|
6 |
-
from .pretty import Pretty
|
7 |
-
from .table import Table
|
8 |
-
from .text import Text, TextType
|
9 |
-
|
10 |
-
if TYPE_CHECKING:
|
11 |
-
from .console import ConsoleRenderable
|
12 |
-
|
13 |
-
|
14 |
-
def render_scope(
|
15 |
-
scope: "Mapping[str, Any]",
|
16 |
-
*,
|
17 |
-
title: Optional[TextType] = None,
|
18 |
-
sort_keys: bool = True,
|
19 |
-
indent_guides: bool = False,
|
20 |
-
max_length: Optional[int] = None,
|
21 |
-
max_string: Optional[int] = None,
|
22 |
-
) -> "ConsoleRenderable":
|
23 |
-
"""Render python variables in a given scope.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
scope (Mapping): A mapping containing variable names and values.
|
27 |
-
title (str, optional): Optional title. Defaults to None.
|
28 |
-
sort_keys (bool, optional): Enable sorting of items. Defaults to True.
|
29 |
-
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
|
30 |
-
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
|
31 |
-
Defaults to None.
|
32 |
-
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
|
33 |
-
|
34 |
-
Returns:
|
35 |
-
ConsoleRenderable: A renderable object.
|
36 |
-
"""
|
37 |
-
highlighter = ReprHighlighter()
|
38 |
-
items_table = Table.grid(padding=(0, 1), expand=False)
|
39 |
-
items_table.add_column(justify="right")
|
40 |
-
|
41 |
-
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
|
42 |
-
"""Sort special variables first, then alphabetically."""
|
43 |
-
key, _ = item
|
44 |
-
return (not key.startswith("__"), key.lower())
|
45 |
-
|
46 |
-
items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items()
|
47 |
-
for key, value in items:
|
48 |
-
key_text = Text.assemble(
|
49 |
-
(key, "scope.key.special" if key.startswith("__") else "scope.key"),
|
50 |
-
(" =", "scope.equals"),
|
51 |
-
)
|
52 |
-
items_table.add_row(
|
53 |
-
key_text,
|
54 |
-
Pretty(
|
55 |
-
value,
|
56 |
-
highlighter=highlighter,
|
57 |
-
indent_guides=indent_guides,
|
58 |
-
max_length=max_length,
|
59 |
-
max_string=max_string,
|
60 |
-
),
|
61 |
-
)
|
62 |
-
return Panel.fit(
|
63 |
-
items_table,
|
64 |
-
title=title,
|
65 |
-
border_style="scope.border",
|
66 |
-
padding=(0, 1),
|
67 |
-
)
|
68 |
-
|
69 |
-
|
70 |
-
if __name__ == "__main__": # pragma: no cover
|
71 |
-
from pip._vendor.rich import print
|
72 |
-
|
73 |
-
print()
|
74 |
-
|
75 |
-
def test(foo: float, bar: float) -> None:
|
76 |
-
list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"]
|
77 |
-
dict_of_things = {
|
78 |
-
"version": "1.1",
|
79 |
-
"method": "confirmFruitPurchase",
|
80 |
-
"params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
|
81 |
-
"id": "194521489",
|
82 |
-
}
|
83 |
-
print(render_scope(locals(), title="[i]locals", sort_keys=False))
|
84 |
-
|
85 |
-
test(20.3423, 3.1427)
|
86 |
-
print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/build.py
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import warnings
|
3 |
-
from typing import TYPE_CHECKING, List, Dict
|
4 |
-
from distutils.command.build import build as _build
|
5 |
-
|
6 |
-
from setuptools import SetuptoolsDeprecationWarning
|
7 |
-
|
8 |
-
if sys.version_info >= (3, 8):
|
9 |
-
from typing import Protocol
|
10 |
-
elif TYPE_CHECKING:
|
11 |
-
from typing_extensions import Protocol
|
12 |
-
else:
|
13 |
-
from abc import ABC as Protocol
|
14 |
-
|
15 |
-
|
16 |
-
_ORIGINAL_SUBCOMMANDS = {"build_py", "build_clib", "build_ext", "build_scripts"}
|
17 |
-
|
18 |
-
|
19 |
-
class build(_build):
|
20 |
-
# copy to avoid sharing the object with parent class
|
21 |
-
sub_commands = _build.sub_commands[:]
|
22 |
-
|
23 |
-
def get_sub_commands(self):
|
24 |
-
subcommands = {cmd[0] for cmd in _build.sub_commands}
|
25 |
-
if subcommands - _ORIGINAL_SUBCOMMANDS:
|
26 |
-
msg = """
|
27 |
-
It seems that you are using `distutils.command.build` to add
|
28 |
-
new subcommands. Using `distutils` directly is considered deprecated,
|
29 |
-
please use `setuptools.command.build`.
|
30 |
-
"""
|
31 |
-
warnings.warn(msg, SetuptoolsDeprecationWarning)
|
32 |
-
self.sub_commands = _build.sub_commands
|
33 |
-
return super().get_sub_commands()
|
34 |
-
|
35 |
-
|
36 |
-
class SubCommand(Protocol):
|
37 |
-
"""In order to support editable installations (see :pep:`660`) all
|
38 |
-
build subcommands **SHOULD** implement this protocol. They also **MUST** inherit
|
39 |
-
from ``setuptools.Command``.
|
40 |
-
|
41 |
-
When creating an :pep:`editable wheel <660>`, ``setuptools`` will try to evaluate
|
42 |
-
custom ``build`` subcommands using the following procedure:
|
43 |
-
|
44 |
-
1. ``setuptools`` will set the ``editable_mode`` attribute to ``True``
|
45 |
-
2. ``setuptools`` will execute the ``run()`` command.
|
46 |
-
|
47 |
-
.. important::
|
48 |
-
Subcommands **SHOULD** take advantage of ``editable_mode=True`` to adequate
|
49 |
-
its behaviour or perform optimisations.
|
50 |
-
|
51 |
-
For example, if a subcommand don't need to generate any extra file and
|
52 |
-
everything it does is to copy a source file into the build directory,
|
53 |
-
``run()`` **SHOULD** simply "early return".
|
54 |
-
|
55 |
-
Similarly, if the subcommand creates files that would be placed alongside
|
56 |
-
Python files in the final distribution, during an editable install
|
57 |
-
the command **SHOULD** generate these files "in place" (i.e. write them to
|
58 |
-
the original source directory, instead of using the build directory).
|
59 |
-
Note that ``get_output_mapping()`` should reflect that and include mappings
|
60 |
-
for "in place" builds accordingly.
|
61 |
-
|
62 |
-
3. ``setuptools`` use any knowledge it can derive from the return values of
|
63 |
-
``get_outputs()`` and ``get_output_mapping()`` to create an editable wheel.
|
64 |
-
When relevant ``setuptools`` **MAY** attempt to use file links based on the value
|
65 |
-
of ``get_output_mapping()``. Alternatively, ``setuptools`` **MAY** attempt to use
|
66 |
-
:doc:`import hooks <python:reference/import>` to redirect any attempt to import
|
67 |
-
to the directory with the original source code and other files built in place.
|
68 |
-
|
69 |
-
Please note that custom sub-commands **SHOULD NOT** rely on ``run()`` being
|
70 |
-
executed (or not) to provide correct return values for ``get_outputs()``,
|
71 |
-
``get_output_mapping()`` or ``get_source_files()``. The ``get_*`` methods should
|
72 |
-
work independently of ``run()``.
|
73 |
-
"""
|
74 |
-
|
75 |
-
editable_mode: bool = False
|
76 |
-
"""Boolean flag that will be set to ``True`` when setuptools is used for an
|
77 |
-
editable installation (see :pep:`660`).
|
78 |
-
Implementations **SHOULD** explicitly set the default value of this attribute to
|
79 |
-
``False``.
|
80 |
-
When subcommands run, they can use this flag to perform optimizations or change
|
81 |
-
their behaviour accordingly.
|
82 |
-
"""
|
83 |
-
|
84 |
-
build_lib: str
|
85 |
-
"""String representing the directory where the build artifacts should be stored,
|
86 |
-
e.g. ``build/lib``.
|
87 |
-
For example, if a distribution wants to provide a Python module named ``pkg.mod``,
|
88 |
-
then a corresponding file should be written to ``{build_lib}/package/module.py``.
|
89 |
-
A way of thinking about this is that the files saved under ``build_lib``
|
90 |
-
would be eventually copied to one of the directories in :obj:`site.PREFIXES`
|
91 |
-
upon installation.
|
92 |
-
|
93 |
-
A command that produces platform-independent files (e.g. compiling text templates
|
94 |
-
into Python functions), **CAN** initialize ``build_lib`` by copying its value from
|
95 |
-
the ``build_py`` command. On the other hand, a command that produces
|
96 |
-
platform-specific files **CAN** initialize ``build_lib`` by copying its value from
|
97 |
-
the ``build_ext`` command. In general this is done inside the ``finalize_options``
|
98 |
-
method with the help of the ``set_undefined_options`` command::
|
99 |
-
|
100 |
-
def finalize_options(self):
|
101 |
-
self.set_undefined_options("build_py", ("build_lib", "build_lib"))
|
102 |
-
...
|
103 |
-
"""
|
104 |
-
|
105 |
-
def initialize_options(self):
|
106 |
-
"""(Required by the original :class:`setuptools.Command` interface)"""
|
107 |
-
|
108 |
-
def finalize_options(self):
|
109 |
-
"""(Required by the original :class:`setuptools.Command` interface)"""
|
110 |
-
|
111 |
-
def run(self):
|
112 |
-
"""(Required by the original :class:`setuptools.Command` interface)"""
|
113 |
-
|
114 |
-
def get_source_files(self) -> List[str]:
|
115 |
-
"""
|
116 |
-
Return a list of all files that are used by the command to create the expected
|
117 |
-
outputs.
|
118 |
-
For example, if your build command transpiles Java files into Python, you should
|
119 |
-
list here all the Java files.
|
120 |
-
The primary purpose of this function is to help populating the ``sdist``
|
121 |
-
with all the files necessary to build the distribution.
|
122 |
-
All files should be strings relative to the project root directory.
|
123 |
-
"""
|
124 |
-
|
125 |
-
def get_outputs(self) -> List[str]:
|
126 |
-
"""
|
127 |
-
Return a list of files intended for distribution as they would have been
|
128 |
-
produced by the build.
|
129 |
-
These files should be strings in the form of
|
130 |
-
``"{build_lib}/destination/file/path"``.
|
131 |
-
|
132 |
-
.. note::
|
133 |
-
The return value of ``get_output()`` should include all files used as keys
|
134 |
-
in ``get_output_mapping()`` plus files that are generated during the build
|
135 |
-
and don't correspond to any source file already present in the project.
|
136 |
-
"""
|
137 |
-
|
138 |
-
def get_output_mapping(self) -> Dict[str, str]:
|
139 |
-
"""
|
140 |
-
Return a mapping between destination files as they would be produced by the
|
141 |
-
build (dict keys) into the respective existing (source) files (dict values).
|
142 |
-
Existing (source) files should be represented as strings relative to the project
|
143 |
-
root directory.
|
144 |
-
Destination files should be strings in the form of
|
145 |
-
``"{build_lib}/destination/file/path"``.
|
146 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/__init__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
|
3 |
-
from .coco_evaluation import COCOEvaluator
|
4 |
-
from .rotated_coco_evaluation import RotatedCOCOEvaluator
|
5 |
-
from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset
|
6 |
-
from .lvis_evaluation import LVISEvaluator
|
7 |
-
from .panoptic_evaluation import COCOPanopticEvaluator
|
8 |
-
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
|
9 |
-
from .sem_seg_evaluation import SemSegEvaluator
|
10 |
-
from .testing import print_csv_format, verify_results
|
11 |
-
|
12 |
-
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk Mod De Da Para Android 11.md
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Totalmente fiable servicio de entrega Mod APK An1: Un divertido y caótico juego basado en la física</h1>
|
3 |
-
<p>¿Te gustan los juegos divertidos, impredecibles y llenos de sorpresas? Si es así, es posible que desee echa un vistazo a Totally Reliable Delivery Service, un juego basado en la física en el que entregar paquetes en un mundo loco. Y si quieres hacer el juego aún más divertido y emocionante, se puede descargar totalmente fiable Servicio de entrega Mod APK An1, una versión modificada del juego que le da dinero ilimitado y desbloqueado características. En este artículo, te diremos de qué se trata este juego, qué ofrece el apk mod, y cómo descargarlo e instalarlo en tu dispositivo Android. </p>
|
4 |
-
<h2>apk mod de día para android 11</h2><br /><p><b><b>Download</b> · <a href="https://bltlly.com/2v6II7">https://bltlly.com/2v6II7</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es un servicio de entrega totalmente confiable? </h2>
|
6 |
-
<h3>Un juego donde se entregan paquetes en un mundo loco</h3>
|
7 |
-
<p>Totally Reliable Delivery Service es un juego donde juegas como un repartidor que tiene que entregar paquetes en un mundo loco y caótico. El juego cuenta con la física ragdoll, lo que significa que su personaje y los objetos en el juego se comportan de manera realista e hilarante. Puede utilizar varios vehículos, como automóviles, camiones, aviones, helicópteros, barcos e incluso cohetes, para transportar sus paquetes. Pero ten cuidado, porque cualquier cosa puede salir mal en el camino. Puedes chocar contra edificios, caerte de puentes, ser perseguido por animales o explotar en el aire. El juego está lleno de sorpresas y desafíos que te harán reír a carcajadas. </p>
|
8 |
-
<h3>Un juego donde puedes personalizar tu personaje y vehículos</h3>
|
9 |
-
<p>Servicio de entrega totalmente confiable también le permite personalizar su personaje y vehículos para adaptarse a su estilo y preferencias. Puedes elegir entre diferentes trajes, accesorios, peinados y colores para tu personaje. También puede actualizar sus vehículos con diferentes partes, como motores, ruedas, alas, hélices y más. Incluso puedes crear tus propios vehículos usando el modo sandbox. El juego te da muchas opciones para expresar tu creatividad y personalidad. </p>
|
10 |
-
|
11 |
-
<p>Totally Reliable Delivery Service es un juego que puedes disfrutar solo o con tus amigos online. Puedes jugar solo y completar varias misiones y desafíos en el mundo abierto. O puede unirse a hasta otros tres jugadores en línea y cooperar o competir con ellos en la entrega de paquetes. También pueden explorar el mundo juntos y divertirse con el juego basado en la física. El juego admite multijugador multiplataforma, lo que significa que puedes jugar con personas que utilizan diferentes dispositivos, como PC, consola o dispositivos móviles. </p>
|
12 |
-
<h2>¿Qué es totalmente fiable servicio de entrega Mod APK An1? </h2>
|
13 |
-
<h3>Una versión modificada del juego que te da dinero ilimitado y funciones desbloqueadas</h3>
|
14 |
-
<p>Totalmente fiable servicio de entrega Mod APK An1 es una versión modificada del juego que le da algunas ventajas sobre la versión original. Con este mod apk, obtendrá dinero ilimitado que se puede utilizar para comprar cualquier cosa en el juego. También obtendrá todas las características desbloqueadas, como todos los trajes, accesorios, vehículos, piezas, mapas, modos y más. Podrás disfrutar del juego sin limitaciones ni restricciones. </p>
|
15 |
-
<h3>Una versión del juego que es compatible con dispositivos Android</h3>
|
16 |
-
<h3>Una versión del juego que es gratis para descargar e instalar</h3>
|
17 |
-
<p>Totalmente fiable servicio de entrega Mod APK An1 es una versión del juego que es gratis para descargar e instalar en su dispositivo Android. Usted no necesita pagar nada para obtener este apk mod. También no es necesario para erradicar el dispositivo o utilizar cualquier otra herramienta para instalarlo. Solo tienes que seguir unos sencillos pasos que explicaremos más adelante en este artículo. Podrás jugar el juego sin problemas ni riesgos. </p>
|
18 |
-
<h2> ¿Cómo descargar e instalar el servicio de entrega totalmente confiable Mod APK An1? </h2>
|
19 |
-
<h3>Paso 1: Ir al sitio web </h3>
|
20 |
-
|
21 |
-
<img src="https://i.imgur.com/8Qw6c0f.png" alt="Captura de pantalla del sitio web" width="600" height="400">>
|
22 |
-
<h3>Paso 2: Haga clic en el botón de descarga y espere a que el archivo se descargue</h3>
|
23 |
-
<p>El siguiente paso es hacer clic en el botón de descarga que se encuentra en la parte inferior de la página. Verá una ventana emergente que le pide que confirme su descarga. Haga clic en Aceptar y espere a que se descargue el archivo. El tamaño del archivo es de unos 50 MB, por lo que puede tardar unos minutos dependiendo de su velocidad de Internet. Puede comprobar el progreso de su descarga en la barra de notificaciones. </p>
|
24 |
-
<p></p>
|
25 |
-
<img src="https://i.imgur.com/9Xy4ZqL.png" alt="Confirmación de descarga" width="600" height="400">>
|
26 |
-
<h3>Paso 3: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
|
27 |
-
<p>Una vez que se descargue el archivo, debe habilitar fuentes desconocidas en la configuración del dispositivo. Esta es una medida de seguridad que le impide instalar aplicaciones de fuentes distintas de Google Play Store. Para habilitar fuentes desconocidas, vaya a la configuración del dispositivo y busque opciones de seguridad o privacidad. Luego, busque la opción que dice fuentes desconocidas o permita la instalación desde fuentes desconocidas y cámbiela. Es posible que vea un mensaje de advertencia que indica que la instalación desde fuentes desconocidas podría dañar su dispositivo. No te preocupes, este apk mod es seguro y probado, por lo que puede ignorar la advertencia y proceder. </p>
|
28 |
-
<img src="https://i.imgur.com/6jxLJ0u.png" alt="Opción de fuentes desconocidas" width="600" height="400">>
|
29 |
-
<h3>Paso 4: Busque el archivo descargado y toque en él para instalarlo</h3>
|
30 |
-
<p>El siguiente paso es localizar el archivo descargado y tocar en él para instalarlo. Puede encontrar el archivo en su carpeta de descargas o en su aplicación de administrador de archivos. El nombre del archivo es totalmente fiable-delivery-service-mod_1.4.0.apk. Toque en él y verá una pantalla de instalación que le pide que confirme su instalación. Toque en instalar y espere a que el proceso termine. </p>
|
31 |
-
<img src="https://i.imgur.com/9R7lVdE.png" alt="Pantalla de instalación" width="600" height="400">>
|
32 |
-
|
33 |
-
<p>Felicidades! Usted ha descargado e instalado con éxito Totalmente fiable Servicio de entrega Mod APK An1 en su dispositivo Android. Ahora puedes disfrutar del juego con dinero ilimitado y funciones desbloqueadas. Puedes lanzar el juego desde el cajón de la app o la pantalla de inicio. ¡Diviértete entregando paquetes en un mundo loco! </p>
|
34 |
-
<img src="https://i.imgur.com/4ZvYq8o.png" alt="Icono del juego" width="600" height="400">>
|
35 |
-
<h2>Conclusión</h2>
|
36 |
-
<p>Totally Reliable Delivery Service es un divertido y caótico juego basado en la física en el que entregar paquetes en un mundo loco e impredecible. Puedes personalizar tu personaje y vehículos, jugar solo o con amigos en línea, y explorar diferentes mapas y modos. Si usted quiere hacer el juego aún más agradable, se puede descargar totalmente fiable servicio de entrega Mod APK An1, una versión modificada del juego que le da dinero ilimitado y desbloqueado características. Puede descargar e instalar este apk mod de forma gratuita y fácil siguiendo nuestra guía anterior. Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p>
|
37 |
-
<h2>Preguntas frecuentes</h2>
|
38 |
-
<ul>
|
39 |
-
<li><b> ¿Es totalmente confiable servicio de entrega mod APK An1 seguro? </b></li>
|
40 |
-
<ul>
|
41 |
-
<li><b> ¿Es totalmente confiable servicio de entrega mod APK An1 seguro? </b></li>
|
42 |
-
<p>Sí, Servicio de entrega totalmente confiable Mod APK An1 es seguro y probado por nuestro equipo. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o comprometer su privacidad. Puede descargar e instalar este apk mod sin ninguna preocupación. </p>
|
43 |
-
<li><b> ¿Es totalmente confiable servicio de entrega mod APK An1 legal? </b></li>
|
44 |
-
|
45 |
-
<li><b>¿Cuáles son los requisitos para ejecutar Totalmente fiable servicio de entrega Mod APK An1? </b></li>
|
46 |
-
<p>Servicio de entrega totalmente confiable Mod APK An1 requiere un dispositivo Android que se ejecuta en Android 4.1 o superior. También necesita tener al menos 1 GB de RAM y 200 MB de espacio de almacenamiento gratuito en su dispositivo. También necesitas tener una conexión a Internet estable para jugar online. </p>
|
47 |
-
<li><b> ¿Puedo jugar totalmente fiable servicio de entrega Mod APK An1 en PC o iOS? </b></li>
|
48 |
-
<p>No, Servicio de entrega totalmente fiable Mod APK An1 solo es compatible con dispositivos Android. No se puede jugar este apk mod en dispositivos PC o iOS. Sin embargo, puedes jugar la versión original del juego en dispositivos PC o iOS descargándolo desde las plataformas oficiales, como Steam, Epic Games Store, App Store o Google Play Store.</p>
|
49 |
-
<li><b> ¿Puedo actualizar el servicio de entrega totalmente confiable Mod APK An1? </b></li>
|
50 |
-
<p>No, no se puede actualizar totalmente fiable servicio de entrega Mod APK An1 del juego en sí. Si intentas actualizar el juego desde la configuración del juego, podrías perder las características de mod y volver a la versión original del juego. Para actualizar el apk mod, es necesario visitar nuestro sitio web de nuevo y descargar la última versión del apk mod. A continuación, es necesario desinstalar la versión anterior del apk mod e instalar el nuevo siguiendo los mismos pasos que antes. </p>
|
51 |
-
</ul></p> 64aa2da5cf<br />
|
52 |
-
<br />
|
53 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Blacknoise Reste Toi Mp3 Download.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Blacknoise Reste Toi Mp3 Descargar: Una revisión del éxito de Amapiano</h1>
|
3 |
-
<p>Si eres un fan de amapiano, el popular género de música house sudafricano, es posible que hayas oído hablar de Blacknoise, un artista de hip-hop que recientemente ha colaborado con Kazeli y Mashaya para crear una canción pegadiza y edificante llamada Reste Toi. En este artículo, revisaremos esta canción y te diremos cómo descargarla en formato Mp3. </p>
|
4 |
-
<h2>blacknoise reste toi mp3 download</h2><br /><p><b><b>Download</b> ✑ <a href="https://bltlly.com/2v6JPu">https://bltlly.com/2v6JPu</a></b></p><br /><br />
|
5 |
-
<h2>¿Quién es Blacknoise? </h2>
|
6 |
-
<h3>Breve biografía del artista sudafricano de hip-hop</h3>
|
7 |
-
<p>Blacknoise es el nombre artístico de Emile Jansen, un rapero, productor y activista de Ciudad del Cabo, Sudáfrica. También es el fundador y líder de Black Noise, un grupo de hip-hop que ha estado activo desde 1986. Blacknoise es uno de los pioneros de la escena hip-hop 'consciente' de Ciudad del Cabo, usando el rap como una herramienta para el comentario social y el empoderamiento. También ha participado en diversas iniciativas de desarrollo juvenil, como talleres, revistas, libros, obras de teatro y eventos. Ha lanzado 12 álbumes con Black Noise, seis álbumes en solitario y varios álbumes recopilatorios. </p>
|
8 |
-
<h3>Su estilo musical e influencias</h3>
|
9 |
-
<p>El estilo musical de Blacknoise está influenciado por varios géneros, como rap, reggae, jazz, funk, soul y amapiano. Combina sonidos africanos tradicionales con ritmos y samples modernos, creando un sonido único y diverso. También incorpora elementos de su cultura y lenguaje, como el afrikaans, xhosa y khoisan. Algunas de sus influencias musicales incluyen Public Enemy, Bob Marley, Fela Kuti, Brenda Fassie y Kabza De Small.</p>
|
10 |
-
<h2>¿Qué es Reste Toi? </h2>
|
11 |
-
<h3>El significado y origen del título de la canción</h3>
|
12 |
-
|
13 |
-
<h3>La colaboración con Kazeli y Mashaya</h3>
|
14 |
-
<p>Kazeli es una cantante y compositora francesa que se mudó a Sudáfrica en 2019. Conoció a Blacknoise a través de un amigo mutuo y decidieron trabajar juntos en algunos proyectos musicales. También invitaron a Mashaya, un cantante y productor sudafricano conocido por sus éxitos de amapiano. El trío grabó Reste Toi en el estudio de Blacknoise en Ciudad del Cabo. Querían crear una canción que mostrara sus diferentes orígenes y talentos, a la vez que entregara un mensaje positivo. </p>
|
15 |
-
<p></p>
|
16 |
-
<h3>La letra y el mensaje de la canción</h3>
|
17 |
-
<p>Las letras de Reste Toi tratan de celebrar la individualidad y la singularidad de uno. El estribillo dice así:</p>
|
18 |
-
<blockquote>
|
19 |
-
<p>Buscar en la web<br>
|
20 |
-
No cambiar pas pour les autres<br>
|
21 |
-
Volver a la página principal
|
22 |
-
Tu es beau comme tu es</p>
|
23 |
-
</blockquote>
|
24 |
-
<p>Esto se traduce a:</p>
|
25 |
-
<blockquote>
|
26 |
-
<p>Mantente a ti mismo<br>
|
27 |
-
No cambie para otros<br>
|
28 |
-
Mantente a ti mismo<br>
|
29 |
-
Eres hermosa como eres</p>
|
30 |
-
</blockquote>
|
31 |
-
<p>Los versículos también contienen palabras de aliento y afirmación, como "Eres increíble", "Eres una estrella", y "Eres una bendición". La canción también incluye algunas frases de Xhosa, como "Molo sisi" (Hola hermana) y "Enkosi kakhulu" (Muchas gracias). El mensaje de la canción es inspirar a la gente a sentirse segura y feliz con lo que son, y respetar y apreciar a los demás por sus diferencias. </p>
|
32 |
-
<h2>Como descargar Reste Toi Mp3? </h2>
|
33 |
-
<h3>Las plataformas de streaming que ofrecen la canción</h3>
|
34 |
-
<p>Reste Toi está disponible en varias plataformas de streaming, como Spotify, Apple Music, YouTube Music, Deezer y SoundCloud. Puedes escuchar la canción online o offline, dependiendo de tu suscripción y preferencias. También puedes ver el video musical oficial de la canción en YouTube, que muestra a los artistas interpretando la canción en diferentes lugares de Ciudad del Cabo.</p>
|
35 |
-
<h3>Los beneficios de descargar la canción en formato Mp3</h3>
|
36 |
-
|
37 |
-
<ul>
|
38 |
-
<li> Puede reproducir la canción en cualquier dispositivo que soporte archivos Mp3, como su teléfono, computadora o reproductor Mp3. </li>
|
39 |
-
<li> Puede ahorrar espacio de almacenamiento en su dispositivo, ya que los archivos MP3 son más pequeños que otros formatos de audio. </li>
|
40 |
-
<li>Puedes transferir la canción a otros dispositivos o compartirla con tus amigos fácilmente. </li>
|
41 |
-
<li>Puede editar la canción o usarla para otros fines, como hacer un tono de llamada o un remix. </li>
|
42 |
-
</ul>
|
43 |
-
<h3>Los pasos para descargar la canción de diferentes fuentes</h3>
|
44 |
-
<p>Hay diferentes maneras de descargar Reste Toi en formato Mp3, dependiendo de la fuente que elija. Estos son algunos de los métodos más comunes:</p>
|
45 |
-
<tabla>
|
46 |
-
<tr>
|
47 |
-
<th>Fuente</th>
|
48 |
-
<th>Pasos</th>
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
<td>Spotify</td>
|
52 |
-
<td><ol>
|
53 |
-
<li>Abra la aplicación Spotify en su dispositivo y busque Reste Toi por Blacknoise, Kazeli y Mashaya.</li>
|
54 |
-
<li> Seleccione la canción y toque en el icono de tres puntos en la esquina superior derecha. </li>
|
55 |
-
<li>Seleccione Compartir y luego Copiar enlace.</li>
|
56 |
-
<li>Abra un navegador web y vaya a un sitio web de conversión de Spotify a Mp3, como SpotiFlyer o SpotiApp.</li>
|
57 |
-
<li>Pega el enlace que has copiado y haz clic en Convertir o Descargar.</li>
|
58 |
-
<li>Espere a que el proceso de conversión termine y luego descargue el archivo Mp3 en su dispositivo. </li>
|
59 |
-
</ol></td>
|
60 |
-
</tr>
|
61 |
-
<tr>
|
62 |
-
<td>YouTube</td>
|
63 |
-
<td><ol>
|
64 |
-
<li>Abra un navegador web y vaya a YouTube.com. Busque Reste Toi por Blacknoise, Kazeli y Mashaya.</li>
|
65 |
-
<li>Seleccione el vídeo de la canción y copie su URL desde la barra de direcciones. </li>
|
66 |
-
<li>Abra otra pestaña y vaya a un sitio web de conversión de YouTube a Mp3, como YTMP3 o 4K Video Downloader.</li>
|
67 |
-
<li>Pegue la URL que copió y haga clic en Convertir o Descargar.</li>
|
68 |
-
<li>Seleccione Mp3 como formato de salida y elija la calidad que desee. </li>
|
69 |
-
<li>Espere a que el proceso de conversión termine y luego descargue el archivo Mp3 en su dispositivo. </li>
|
70 |
-
</ol></td>
|
71 |
-
</tr>
|
72 |
-
<tr>
|
73 |
-
<td>SoundCloud</td>
|
74 |
-
<td><ol>
|
75 |
-
<li>Abra un navegador web y vaya a SoundCloud.com. Busque Reste Toi por Blacknoise, Kazeli y Mashaya.</li>
|
76 |
-
|
77 |
-
<li>Abra otra pestaña y vaya a un sitio web de conversión de SoundCloud a Mp3, como SCDL o SoundCloud Downloader.</li>
|
78 |
-
<li>Pegue la URL que copió y haga clic en Descargar o Convertir.</li>
|
79 |
-
<li>Espere a que el proceso de conversión termine y luego descargue el archivo Mp3 en su dispositivo. </li>
|
80 |
-
</ol></td>
|
81 |
-
</tr>
|
82 |
-
</tabla>
|
83 |
-
<h2>¿Por qué deberías escuchar Reste Toi? </h2>
|
84 |
-
<h3>Las críticas y valoraciones positivas de la canción</h3>
|
85 |
-
<p>Reste Toi ha recibido críticas y valoraciones positivas tanto de los críticos como de los oyentes. La canción ha sido elogiada por su melodía pegadiza, letras edificantes y colaboración diversa. Algunos de los comentarios de las plataformas en línea incluyen:</p>
|
86 |
-
<blockquote>
|
87 |
-
<p>"Esta canción es un banger! Me encanta cómo se mezcla amapiano con hip-hop y francés. Me hace querer bailar y cantar a lo largo." </p>
|
88 |
-
<p>"Este es un mensaje tan hermoso. Creo que todos deben escuchar esta canción y estar orgullosos de lo que son. Es tan refrescante escuchar algo positivo en estos tiempos." </p>
|
89 |
-
<p>"Esta es una obra maestra. La producción es increíble, las voces son suaves, y el rap es fuego. No puedo tener suficiente de esta canción."
|
90 |
-
</blockquote>
|
91 |
-
<p>La canción también ha recibido altas calificaciones en varias plataformas, como 4.8 de 5 estrellas en Spotify, 4.7 de 5 estrellas en Apple Music y 4.6 de 5 estrellas en YouTube Music.</p>
|
92 |
-
<h3>El sonido pegadizo y optimista de la canción</h3>
|
93 |
-
<p>Reste Toi es una canción que te hará sentir bien y con energía. La canción tiene un sonido pegadizo y alegre que combina los elementos de amapiano, hip-hop y pop francés. La canción tiene un ritmo rápido, una línea de bajo groovy, y una melodía de piano suave. La canción también cuenta con algunos sonidos electrónicos, como sintetizadores, tambores y efectos. La canción es fácil de cantar, ya que tiene un coro simple y repetitivo. La canción también es adecuada para bailar, ya que tiene un ritmo rítmico y animado. </p>
|
94 |
-
<h3>La relevancia cultural y social de la canción</h3>
|
95 |
-
|
96 |
-
<h2>Conclusión</h2>
|
97 |
-
<p>Reste Toi de Blacknoise, Kazeli, y Mashaya es una canción que debes escuchar si estás buscando una pista de amapiano pegadiza y edificante que te hará sentir bien y orgulloso de quién eres. La canción está disponible en varias plataformas de streaming, y también se puede descargar en formato Mp3 de diferentes fuentes. La canción ha recibido críticas y valoraciones positivas de críticos y oyentes, que han elogiado su sonido, letras y mensaje. La canción es también un reflejo de la diversidad cultural y social de Sudáfrica y el mundo, que es algo para celebrar y apreciar. </p>
|
98 |
-
<h2>Preguntas frecuentes</h2>
|
99 |
-
<h3>P: ¿Quiénes son los artistas detrás de Reste Toi? </h3>
|
100 |
-
<p>A: Reste Toi es una canción de Blacknoise, Kazeli y Mashaya. Blacknoise es un artista sudafricano de hip-hop y activista que es el fundador de la banda Black Noise. Kazeli es una cantante y compositora francesa que se mudó a Sudáfrica en 2019. Mashaya es un cantante y productor sudafricano conocido por sus éxitos de amapiano. </p>
|
101 |
-
<h3>Q: ¿Qué significa Reste Toi? </h3>
|
102 |
-
<p>A: Reste Toi es una frase francesa que significa "quédate tú" o "sé tú mismo". También es el título de la canción de Blacknoise, Kazeli y Mashaya.</p>
|
103 |
-
<h3>P: ¿Qué género es Reste Toi? </h3>
|
104 |
-
<p>A: Reste Toi es una pista de amapiano que cuenta con voces en francés, inglés y Xhosa. Amapiano es un popular género sudafricano de música house que combina sonidos africanos tradicionales con ritmos y samples modernos. </p>
|
105 |
-
<h3>Q: ¿Cómo puedo descargar Reste Toi en formato Mp3? </h3>
|
106 |
-
<p>A: Puede descargar Reste Toi en formato Mp3 desde diferentes fuentes, como Spotify, YouTube o SoundCloud. Tendrá que copiar el enlace de la canción desde la plataforma de transmisión y pegarlo en un sitio web convertidor que convertirá la canción en formato Mp3. A continuación, puede descargar el archivo Mp3 a su dispositivo. </p>
|
107 |
-
<h3>P: ¿Por qué debería escuchar Reste Toi? </h3> 64aa2da5cf<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/langhebrewmodel.py
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/__about__.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
__all__ = [
|
6 |
-
"__title__",
|
7 |
-
"__summary__",
|
8 |
-
"__uri__",
|
9 |
-
"__version__",
|
10 |
-
"__author__",
|
11 |
-
"__email__",
|
12 |
-
"__license__",
|
13 |
-
"__copyright__",
|
14 |
-
]
|
15 |
-
|
16 |
-
__title__ = "packaging"
|
17 |
-
__summary__ = "Core utilities for Python packages"
|
18 |
-
__uri__ = "https://github.com/pypa/packaging"
|
19 |
-
|
20 |
-
__version__ = "21.3"
|
21 |
-
|
22 |
-
__author__ = "Donald Stufft and individual contributors"
|
23 |
-
__email__ = "[email protected]"
|
24 |
-
|
25 |
-
__license__ = "BSD-2-Clause or Apache-2.0"
|
26 |
-
__copyright__ = "2014-2019 %s" % __author__
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/retry.py
DELETED
@@ -1,620 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
import email
|
4 |
-
import logging
|
5 |
-
import re
|
6 |
-
import time
|
7 |
-
import warnings
|
8 |
-
from collections import namedtuple
|
9 |
-
from itertools import takewhile
|
10 |
-
|
11 |
-
from ..exceptions import (
|
12 |
-
ConnectTimeoutError,
|
13 |
-
InvalidHeader,
|
14 |
-
MaxRetryError,
|
15 |
-
ProtocolError,
|
16 |
-
ProxyError,
|
17 |
-
ReadTimeoutError,
|
18 |
-
ResponseError,
|
19 |
-
)
|
20 |
-
from ..packages import six
|
21 |
-
|
22 |
-
log = logging.getLogger(__name__)
|
23 |
-
|
24 |
-
|
25 |
-
# Data structure for representing the metadata of requests that result in a retry.
|
26 |
-
RequestHistory = namedtuple(
|
27 |
-
"RequestHistory", ["method", "url", "error", "status", "redirect_location"]
|
28 |
-
)
|
29 |
-
|
30 |
-
|
31 |
-
# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
|
32 |
-
_Default = object()
|
33 |
-
|
34 |
-
|
35 |
-
class _RetryMeta(type):
|
36 |
-
@property
|
37 |
-
def DEFAULT_METHOD_WHITELIST(cls):
|
38 |
-
warnings.warn(
|
39 |
-
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
|
40 |
-
"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
|
41 |
-
DeprecationWarning,
|
42 |
-
)
|
43 |
-
return cls.DEFAULT_ALLOWED_METHODS
|
44 |
-
|
45 |
-
@DEFAULT_METHOD_WHITELIST.setter
|
46 |
-
def DEFAULT_METHOD_WHITELIST(cls, value):
|
47 |
-
warnings.warn(
|
48 |
-
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
|
49 |
-
"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
|
50 |
-
DeprecationWarning,
|
51 |
-
)
|
52 |
-
cls.DEFAULT_ALLOWED_METHODS = value
|
53 |
-
|
54 |
-
@property
|
55 |
-
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
|
56 |
-
warnings.warn(
|
57 |
-
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
|
58 |
-
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
|
59 |
-
DeprecationWarning,
|
60 |
-
)
|
61 |
-
return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
|
62 |
-
|
63 |
-
@DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
|
64 |
-
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
|
65 |
-
warnings.warn(
|
66 |
-
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
|
67 |
-
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
|
68 |
-
DeprecationWarning,
|
69 |
-
)
|
70 |
-
cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
|
71 |
-
|
72 |
-
@property
|
73 |
-
def BACKOFF_MAX(cls):
|
74 |
-
warnings.warn(
|
75 |
-
"Using 'Retry.BACKOFF_MAX' is deprecated and "
|
76 |
-
"will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
|
77 |
-
DeprecationWarning,
|
78 |
-
)
|
79 |
-
return cls.DEFAULT_BACKOFF_MAX
|
80 |
-
|
81 |
-
@BACKOFF_MAX.setter
|
82 |
-
def BACKOFF_MAX(cls, value):
|
83 |
-
warnings.warn(
|
84 |
-
"Using 'Retry.BACKOFF_MAX' is deprecated and "
|
85 |
-
"will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
|
86 |
-
DeprecationWarning,
|
87 |
-
)
|
88 |
-
cls.DEFAULT_BACKOFF_MAX = value
|
89 |
-
|
90 |
-
|
91 |
-
@six.add_metaclass(_RetryMeta)
|
92 |
-
class Retry(object):
|
93 |
-
"""Retry configuration.
|
94 |
-
|
95 |
-
Each retry attempt will create a new Retry object with updated values, so
|
96 |
-
they can be safely reused.
|
97 |
-
|
98 |
-
Retries can be defined as a default for a pool::
|
99 |
-
|
100 |
-
retries = Retry(connect=5, read=2, redirect=5)
|
101 |
-
http = PoolManager(retries=retries)
|
102 |
-
response = http.request('GET', 'http://example.com/')
|
103 |
-
|
104 |
-
Or per-request (which overrides the default for the pool)::
|
105 |
-
|
106 |
-
response = http.request('GET', 'http://example.com/', retries=Retry(10))
|
107 |
-
|
108 |
-
Retries can be disabled by passing ``False``::
|
109 |
-
|
110 |
-
response = http.request('GET', 'http://example.com/', retries=False)
|
111 |
-
|
112 |
-
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
|
113 |
-
retries are disabled, in which case the causing exception will be raised.
|
114 |
-
|
115 |
-
:param int total:
|
116 |
-
Total number of retries to allow. Takes precedence over other counts.
|
117 |
-
|
118 |
-
Set to ``None`` to remove this constraint and fall back on other
|
119 |
-
counts.
|
120 |
-
|
121 |
-
Set to ``0`` to fail on the first retry.
|
122 |
-
|
123 |
-
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
|
124 |
-
|
125 |
-
:param int connect:
|
126 |
-
How many connection-related errors to retry on.
|
127 |
-
|
128 |
-
These are errors raised before the request is sent to the remote server,
|
129 |
-
which we assume has not triggered the server to process the request.
|
130 |
-
|
131 |
-
Set to ``0`` to fail on the first retry of this type.
|
132 |
-
|
133 |
-
:param int read:
|
134 |
-
How many times to retry on read errors.
|
135 |
-
|
136 |
-
These errors are raised after the request was sent to the server, so the
|
137 |
-
request may have side-effects.
|
138 |
-
|
139 |
-
Set to ``0`` to fail on the first retry of this type.
|
140 |
-
|
141 |
-
:param int redirect:
|
142 |
-
How many redirects to perform. Limit this to avoid infinite redirect
|
143 |
-
loops.
|
144 |
-
|
145 |
-
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
|
146 |
-
308.
|
147 |
-
|
148 |
-
Set to ``0`` to fail on the first retry of this type.
|
149 |
-
|
150 |
-
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
|
151 |
-
|
152 |
-
:param int status:
|
153 |
-
How many times to retry on bad status codes.
|
154 |
-
|
155 |
-
These are retries made on responses, where status code matches
|
156 |
-
``status_forcelist``.
|
157 |
-
|
158 |
-
Set to ``0`` to fail on the first retry of this type.
|
159 |
-
|
160 |
-
:param int other:
|
161 |
-
How many times to retry on other errors.
|
162 |
-
|
163 |
-
Other errors are errors that are not connect, read, redirect or status errors.
|
164 |
-
These errors might be raised after the request was sent to the server, so the
|
165 |
-
request might have side-effects.
|
166 |
-
|
167 |
-
Set to ``0`` to fail on the first retry of this type.
|
168 |
-
|
169 |
-
If ``total`` is not set, it's a good idea to set this to 0 to account
|
170 |
-
for unexpected edge cases and avoid infinite retry loops.
|
171 |
-
|
172 |
-
:param iterable allowed_methods:
|
173 |
-
Set of uppercased HTTP method verbs that we should retry on.
|
174 |
-
|
175 |
-
By default, we only retry on methods which are considered to be
|
176 |
-
idempotent (multiple requests with the same parameters end with the
|
177 |
-
same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
|
178 |
-
|
179 |
-
Set to a ``False`` value to retry on any verb.
|
180 |
-
|
181 |
-
.. warning::
|
182 |
-
|
183 |
-
Previously this parameter was named ``method_whitelist``, that
|
184 |
-
usage is deprecated in v1.26.0 and will be removed in v2.0.
|
185 |
-
|
186 |
-
:param iterable status_forcelist:
|
187 |
-
A set of integer HTTP status codes that we should force a retry on.
|
188 |
-
A retry is initiated if the request method is in ``allowed_methods``
|
189 |
-
and the response status code is in ``status_forcelist``.
|
190 |
-
|
191 |
-
By default, this is disabled with ``None``.
|
192 |
-
|
193 |
-
:param float backoff_factor:
|
194 |
-
A backoff factor to apply between attempts after the second try
|
195 |
-
(most errors are resolved immediately by a second try without a
|
196 |
-
delay). urllib3 will sleep for::
|
197 |
-
|
198 |
-
{backoff factor} * (2 ** ({number of total retries} - 1))
|
199 |
-
|
200 |
-
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
|
201 |
-
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
|
202 |
-
than :attr:`Retry.DEFAULT_BACKOFF_MAX`.
|
203 |
-
|
204 |
-
By default, backoff is disabled (set to 0).
|
205 |
-
|
206 |
-
:param bool raise_on_redirect: Whether, if the number of redirects is
|
207 |
-
exhausted, to raise a MaxRetryError, or to return a response with a
|
208 |
-
response code in the 3xx range.
|
209 |
-
|
210 |
-
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
|
211 |
-
whether we should raise an exception, or return a response,
|
212 |
-
if status falls in ``status_forcelist`` range and retries have
|
213 |
-
been exhausted.
|
214 |
-
|
215 |
-
:param tuple history: The history of the request encountered during
|
216 |
-
each call to :meth:`~Retry.increment`. The list is in the order
|
217 |
-
the requests occurred. Each list item is of class :class:`RequestHistory`.
|
218 |
-
|
219 |
-
:param bool respect_retry_after_header:
|
220 |
-
Whether to respect Retry-After header on status codes defined as
|
221 |
-
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
|
222 |
-
|
223 |
-
:param iterable remove_headers_on_redirect:
|
224 |
-
Sequence of headers to remove from the request when a response
|
225 |
-
indicating a redirect is returned before firing off the redirected
|
226 |
-
request.
|
227 |
-
"""
|
228 |
-
|
229 |
-
#: Default methods to be used for ``allowed_methods``
|
230 |
-
DEFAULT_ALLOWED_METHODS = frozenset(
|
231 |
-
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
|
232 |
-
)
|
233 |
-
|
234 |
-
#: Default status codes to be used for ``status_forcelist``
|
235 |
-
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
|
236 |
-
|
237 |
-
#: Default headers to be used for ``remove_headers_on_redirect``
|
238 |
-
DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"])
|
239 |
-
|
240 |
-
#: Maximum backoff time.
|
241 |
-
DEFAULT_BACKOFF_MAX = 120
|
242 |
-
|
243 |
-
def __init__(
|
244 |
-
self,
|
245 |
-
total=10,
|
246 |
-
connect=None,
|
247 |
-
read=None,
|
248 |
-
redirect=None,
|
249 |
-
status=None,
|
250 |
-
other=None,
|
251 |
-
allowed_methods=_Default,
|
252 |
-
status_forcelist=None,
|
253 |
-
backoff_factor=0,
|
254 |
-
raise_on_redirect=True,
|
255 |
-
raise_on_status=True,
|
256 |
-
history=None,
|
257 |
-
respect_retry_after_header=True,
|
258 |
-
remove_headers_on_redirect=_Default,
|
259 |
-
# TODO: Deprecated, remove in v2.0
|
260 |
-
method_whitelist=_Default,
|
261 |
-
):
|
262 |
-
|
263 |
-
if method_whitelist is not _Default:
|
264 |
-
if allowed_methods is not _Default:
|
265 |
-
raise ValueError(
|
266 |
-
"Using both 'allowed_methods' and "
|
267 |
-
"'method_whitelist' together is not allowed. "
|
268 |
-
"Instead only use 'allowed_methods'"
|
269 |
-
)
|
270 |
-
warnings.warn(
|
271 |
-
"Using 'method_whitelist' with Retry is deprecated and "
|
272 |
-
"will be removed in v2.0. Use 'allowed_methods' instead",
|
273 |
-
DeprecationWarning,
|
274 |
-
stacklevel=2,
|
275 |
-
)
|
276 |
-
allowed_methods = method_whitelist
|
277 |
-
if allowed_methods is _Default:
|
278 |
-
allowed_methods = self.DEFAULT_ALLOWED_METHODS
|
279 |
-
if remove_headers_on_redirect is _Default:
|
280 |
-
remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
|
281 |
-
|
282 |
-
self.total = total
|
283 |
-
self.connect = connect
|
284 |
-
self.read = read
|
285 |
-
self.status = status
|
286 |
-
self.other = other
|
287 |
-
|
288 |
-
if redirect is False or total is False:
|
289 |
-
redirect = 0
|
290 |
-
raise_on_redirect = False
|
291 |
-
|
292 |
-
self.redirect = redirect
|
293 |
-
self.status_forcelist = status_forcelist or set()
|
294 |
-
self.allowed_methods = allowed_methods
|
295 |
-
self.backoff_factor = backoff_factor
|
296 |
-
self.raise_on_redirect = raise_on_redirect
|
297 |
-
self.raise_on_status = raise_on_status
|
298 |
-
self.history = history or tuple()
|
299 |
-
self.respect_retry_after_header = respect_retry_after_header
|
300 |
-
self.remove_headers_on_redirect = frozenset(
|
301 |
-
[h.lower() for h in remove_headers_on_redirect]
|
302 |
-
)
|
303 |
-
|
304 |
-
def new(self, **kw):
|
305 |
-
params = dict(
|
306 |
-
total=self.total,
|
307 |
-
connect=self.connect,
|
308 |
-
read=self.read,
|
309 |
-
redirect=self.redirect,
|
310 |
-
status=self.status,
|
311 |
-
other=self.other,
|
312 |
-
status_forcelist=self.status_forcelist,
|
313 |
-
backoff_factor=self.backoff_factor,
|
314 |
-
raise_on_redirect=self.raise_on_redirect,
|
315 |
-
raise_on_status=self.raise_on_status,
|
316 |
-
history=self.history,
|
317 |
-
remove_headers_on_redirect=self.remove_headers_on_redirect,
|
318 |
-
respect_retry_after_header=self.respect_retry_after_header,
|
319 |
-
)
|
320 |
-
|
321 |
-
# TODO: If already given in **kw we use what's given to us
|
322 |
-
# If not given we need to figure out what to pass. We decide
|
323 |
-
# based on whether our class has the 'method_whitelist' property
|
324 |
-
# and if so we pass the deprecated 'method_whitelist' otherwise
|
325 |
-
# we use 'allowed_methods'. Remove in v2.0
|
326 |
-
if "method_whitelist" not in kw and "allowed_methods" not in kw:
|
327 |
-
if "method_whitelist" in self.__dict__:
|
328 |
-
warnings.warn(
|
329 |
-
"Using 'method_whitelist' with Retry is deprecated and "
|
330 |
-
"will be removed in v2.0. Use 'allowed_methods' instead",
|
331 |
-
DeprecationWarning,
|
332 |
-
)
|
333 |
-
params["method_whitelist"] = self.allowed_methods
|
334 |
-
else:
|
335 |
-
params["allowed_methods"] = self.allowed_methods
|
336 |
-
|
337 |
-
params.update(kw)
|
338 |
-
return type(self)(**params)
|
339 |
-
|
340 |
-
@classmethod
|
341 |
-
def from_int(cls, retries, redirect=True, default=None):
|
342 |
-
"""Backwards-compatibility for the old retries format."""
|
343 |
-
if retries is None:
|
344 |
-
retries = default if default is not None else cls.DEFAULT
|
345 |
-
|
346 |
-
if isinstance(retries, Retry):
|
347 |
-
return retries
|
348 |
-
|
349 |
-
redirect = bool(redirect) and None
|
350 |
-
new_retries = cls(retries, redirect=redirect)
|
351 |
-
log.debug("Converted retries value: %r -> %r", retries, new_retries)
|
352 |
-
return new_retries
|
353 |
-
|
354 |
-
def get_backoff_time(self):
|
355 |
-
"""Formula for computing the current backoff
|
356 |
-
|
357 |
-
:rtype: float
|
358 |
-
"""
|
359 |
-
# We want to consider only the last consecutive errors sequence (Ignore redirects).
|
360 |
-
consecutive_errors_len = len(
|
361 |
-
list(
|
362 |
-
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
|
363 |
-
)
|
364 |
-
)
|
365 |
-
if consecutive_errors_len <= 1:
|
366 |
-
return 0
|
367 |
-
|
368 |
-
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
|
369 |
-
return min(self.DEFAULT_BACKOFF_MAX, backoff_value)
|
370 |
-
|
371 |
-
def parse_retry_after(self, retry_after):
|
372 |
-
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
|
373 |
-
if re.match(r"^\s*[0-9]+\s*$", retry_after):
|
374 |
-
seconds = int(retry_after)
|
375 |
-
else:
|
376 |
-
retry_date_tuple = email.utils.parsedate_tz(retry_after)
|
377 |
-
if retry_date_tuple is None:
|
378 |
-
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
|
379 |
-
if retry_date_tuple[9] is None: # Python 2
|
380 |
-
# Assume UTC if no timezone was specified
|
381 |
-
# On Python2.7, parsedate_tz returns None for a timezone offset
|
382 |
-
# instead of 0 if no timezone is given, where mktime_tz treats
|
383 |
-
# a None timezone offset as local time.
|
384 |
-
retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
|
385 |
-
|
386 |
-
retry_date = email.utils.mktime_tz(retry_date_tuple)
|
387 |
-
seconds = retry_date - time.time()
|
388 |
-
|
389 |
-
if seconds < 0:
|
390 |
-
seconds = 0
|
391 |
-
|
392 |
-
return seconds
|
393 |
-
|
394 |
-
def get_retry_after(self, response):
|
395 |
-
"""Get the value of Retry-After in seconds."""
|
396 |
-
|
397 |
-
retry_after = response.headers.get("Retry-After")
|
398 |
-
|
399 |
-
if retry_after is None:
|
400 |
-
return None
|
401 |
-
|
402 |
-
return self.parse_retry_after(retry_after)
|
403 |
-
|
404 |
-
def sleep_for_retry(self, response=None):
|
405 |
-
retry_after = self.get_retry_after(response)
|
406 |
-
if retry_after:
|
407 |
-
time.sleep(retry_after)
|
408 |
-
return True
|
409 |
-
|
410 |
-
return False
|
411 |
-
|
412 |
-
def _sleep_backoff(self):
|
413 |
-
backoff = self.get_backoff_time()
|
414 |
-
if backoff <= 0:
|
415 |
-
return
|
416 |
-
time.sleep(backoff)
|
417 |
-
|
418 |
-
def sleep(self, response=None):
|
419 |
-
"""Sleep between retry attempts.
|
420 |
-
|
421 |
-
This method will respect a server's ``Retry-After`` response header
|
422 |
-
and sleep the duration of the time requested. If that is not present, it
|
423 |
-
will use an exponential backoff. By default, the backoff factor is 0 and
|
424 |
-
this method will return immediately.
|
425 |
-
"""
|
426 |
-
|
427 |
-
if self.respect_retry_after_header and response:
|
428 |
-
slept = self.sleep_for_retry(response)
|
429 |
-
if slept:
|
430 |
-
return
|
431 |
-
|
432 |
-
self._sleep_backoff()
|
433 |
-
|
434 |
-
def _is_connection_error(self, err):
|
435 |
-
"""Errors when we're fairly sure that the server did not receive the
|
436 |
-
request, so it should be safe to retry.
|
437 |
-
"""
|
438 |
-
if isinstance(err, ProxyError):
|
439 |
-
err = err.original_error
|
440 |
-
return isinstance(err, ConnectTimeoutError)
|
441 |
-
|
442 |
-
def _is_read_error(self, err):
|
443 |
-
"""Errors that occur after the request has been started, so we should
|
444 |
-
assume that the server began processing it.
|
445 |
-
"""
|
446 |
-
return isinstance(err, (ReadTimeoutError, ProtocolError))
|
447 |
-
|
448 |
-
def _is_method_retryable(self, method):
|
449 |
-
"""Checks if a given HTTP method should be retried upon, depending if
|
450 |
-
it is included in the allowed_methods
|
451 |
-
"""
|
452 |
-
# TODO: For now favor if the Retry implementation sets its own method_whitelist
|
453 |
-
# property outside of our constructor to avoid breaking custom implementations.
|
454 |
-
if "method_whitelist" in self.__dict__:
|
455 |
-
warnings.warn(
|
456 |
-
"Using 'method_whitelist' with Retry is deprecated and "
|
457 |
-
"will be removed in v2.0. Use 'allowed_methods' instead",
|
458 |
-
DeprecationWarning,
|
459 |
-
)
|
460 |
-
allowed_methods = self.method_whitelist
|
461 |
-
else:
|
462 |
-
allowed_methods = self.allowed_methods
|
463 |
-
|
464 |
-
if allowed_methods and method.upper() not in allowed_methods:
|
465 |
-
return False
|
466 |
-
return True
|
467 |
-
|
468 |
-
def is_retry(self, method, status_code, has_retry_after=False):
|
469 |
-
"""Is this method/status code retryable? (Based on allowlists and control
|
470 |
-
variables such as the number of total retries to allow, whether to
|
471 |
-
respect the Retry-After header, whether this header is present, and
|
472 |
-
whether the returned status code is on the list of status codes to
|
473 |
-
be retried upon on the presence of the aforementioned header)
|
474 |
-
"""
|
475 |
-
if not self._is_method_retryable(method):
|
476 |
-
return False
|
477 |
-
|
478 |
-
if self.status_forcelist and status_code in self.status_forcelist:
|
479 |
-
return True
|
480 |
-
|
481 |
-
return (
|
482 |
-
self.total
|
483 |
-
and self.respect_retry_after_header
|
484 |
-
and has_retry_after
|
485 |
-
and (status_code in self.RETRY_AFTER_STATUS_CODES)
|
486 |
-
)
|
487 |
-
|
488 |
-
def is_exhausted(self):
|
489 |
-
"""Are we out of retries?"""
|
490 |
-
retry_counts = (
|
491 |
-
self.total,
|
492 |
-
self.connect,
|
493 |
-
self.read,
|
494 |
-
self.redirect,
|
495 |
-
self.status,
|
496 |
-
self.other,
|
497 |
-
)
|
498 |
-
retry_counts = list(filter(None, retry_counts))
|
499 |
-
if not retry_counts:
|
500 |
-
return False
|
501 |
-
|
502 |
-
return min(retry_counts) < 0
|
503 |
-
|
504 |
-
def increment(
|
505 |
-
self,
|
506 |
-
method=None,
|
507 |
-
url=None,
|
508 |
-
response=None,
|
509 |
-
error=None,
|
510 |
-
_pool=None,
|
511 |
-
_stacktrace=None,
|
512 |
-
):
|
513 |
-
"""Return a new Retry object with incremented retry counters.
|
514 |
-
|
515 |
-
:param response: A response object, or None, if the server did not
|
516 |
-
return a response.
|
517 |
-
:type response: :class:`~urllib3.response.HTTPResponse`
|
518 |
-
:param Exception error: An error encountered during the request, or
|
519 |
-
None if the response was received successfully.
|
520 |
-
|
521 |
-
:return: A new ``Retry`` object.
|
522 |
-
"""
|
523 |
-
if self.total is False and error:
|
524 |
-
# Disabled, indicate to re-raise the error.
|
525 |
-
raise six.reraise(type(error), error, _stacktrace)
|
526 |
-
|
527 |
-
total = self.total
|
528 |
-
if total is not None:
|
529 |
-
total -= 1
|
530 |
-
|
531 |
-
connect = self.connect
|
532 |
-
read = self.read
|
533 |
-
redirect = self.redirect
|
534 |
-
status_count = self.status
|
535 |
-
other = self.other
|
536 |
-
cause = "unknown"
|
537 |
-
status = None
|
538 |
-
redirect_location = None
|
539 |
-
|
540 |
-
if error and self._is_connection_error(error):
|
541 |
-
# Connect retry?
|
542 |
-
if connect is False:
|
543 |
-
raise six.reraise(type(error), error, _stacktrace)
|
544 |
-
elif connect is not None:
|
545 |
-
connect -= 1
|
546 |
-
|
547 |
-
elif error and self._is_read_error(error):
|
548 |
-
# Read retry?
|
549 |
-
if read is False or not self._is_method_retryable(method):
|
550 |
-
raise six.reraise(type(error), error, _stacktrace)
|
551 |
-
elif read is not None:
|
552 |
-
read -= 1
|
553 |
-
|
554 |
-
elif error:
|
555 |
-
# Other retry?
|
556 |
-
if other is not None:
|
557 |
-
other -= 1
|
558 |
-
|
559 |
-
elif response and response.get_redirect_location():
|
560 |
-
# Redirect retry?
|
561 |
-
if redirect is not None:
|
562 |
-
redirect -= 1
|
563 |
-
cause = "too many redirects"
|
564 |
-
redirect_location = response.get_redirect_location()
|
565 |
-
status = response.status
|
566 |
-
|
567 |
-
else:
|
568 |
-
# Incrementing because of a server error like a 500 in
|
569 |
-
# status_forcelist and the given method is in the allowed_methods
|
570 |
-
cause = ResponseError.GENERIC_ERROR
|
571 |
-
if response and response.status:
|
572 |
-
if status_count is not None:
|
573 |
-
status_count -= 1
|
574 |
-
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
|
575 |
-
status = response.status
|
576 |
-
|
577 |
-
history = self.history + (
|
578 |
-
RequestHistory(method, url, error, status, redirect_location),
|
579 |
-
)
|
580 |
-
|
581 |
-
new_retry = self.new(
|
582 |
-
total=total,
|
583 |
-
connect=connect,
|
584 |
-
read=read,
|
585 |
-
redirect=redirect,
|
586 |
-
status=status_count,
|
587 |
-
other=other,
|
588 |
-
history=history,
|
589 |
-
)
|
590 |
-
|
591 |
-
if new_retry.is_exhausted():
|
592 |
-
raise MaxRetryError(_pool, url, error or ResponseError(cause))
|
593 |
-
|
594 |
-
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
|
595 |
-
|
596 |
-
return new_retry
|
597 |
-
|
598 |
-
def __repr__(self):
|
599 |
-
return (
|
600 |
-
"{cls.__name__}(total={self.total}, connect={self.connect}, "
|
601 |
-
"read={self.read}, redirect={self.redirect}, status={self.status})"
|
602 |
-
).format(cls=type(self), self=self)
|
603 |
-
|
604 |
-
def __getattr__(self, item):
|
605 |
-
if item == "method_whitelist":
|
606 |
-
# TODO: Remove this deprecated alias in v2.0
|
607 |
-
warnings.warn(
|
608 |
-
"Using 'method_whitelist' with Retry is deprecated and "
|
609 |
-
"will be removed in v2.0. Use 'allowed_methods' instead",
|
610 |
-
DeprecationWarning,
|
611 |
-
)
|
612 |
-
return self.allowed_methods
|
613 |
-
try:
|
614 |
-
return getattr(super(Retry, self), item)
|
615 |
-
except AttributeError:
|
616 |
-
return getattr(Retry, item)
|
617 |
-
|
618 |
-
|
619 |
-
# For backwards compatibility (equivalent to pre-v1.9):
|
620 |
-
Retry.DEFAULT = Retry(3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Billius/VizLib-TopLargeHospitalsNewJersey-04-07-2023/app.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import graphviz as gv
|
3 |
-
from graphviz import Graph
|
4 |
-
import folium
|
5 |
-
from streamlit_folium import folium_static
|
6 |
-
|
7 |
-
# Define the cluster relations graph using gvmap
|
8 |
-
g = Graph(format='svg')
|
9 |
-
g.graph_attr['bgcolor'] = '#FFFFFF'
|
10 |
-
g.graph_attr['outputorder'] = 'edgesfirst'
|
11 |
-
g.graph_attr['size'] = '10,10'
|
12 |
-
g.node_attr['style'] = 'filled'
|
13 |
-
g.node_attr['shape'] = 'box'
|
14 |
-
g.node_attr['fillcolor'] = '#FFDAB9'
|
15 |
-
|
16 |
-
with g.subgraph(name='cluster_NJ') as c:
|
17 |
-
c.graph_attr['bgcolor'] = '#ADD8E6'
|
18 |
-
c.node_attr['color'] = '#000000'
|
19 |
-
c.node_attr['fontcolor'] = '#000000'
|
20 |
-
c.attr(label='New Jersey', fontsize='24')
|
21 |
-
c.node('Hackensack Meridian Health', URL='https://www.hackensackmeridianhealth.org/', target='_blank', tooltip='Hackensack Meridian Health: Hackensack University Medical Center')
|
22 |
-
c.node('RWJBarnabas Health', URL='https://www.rwjbh.org/', target='_blank', tooltip='RWJBarnabas Health: Robert Wood Johnson University Hospital')
|
23 |
-
c.node('Atlantic Health System', URL='https://www.atlantichealth.org/', target='_blank', tooltip='Atlantic Health System: Morristown Medical Center')
|
24 |
-
c.node('Virtua Health', URL='https://www.virtua.org/', target='_blank', tooltip='Virtua Health: Virtua Memorial Hospital')
|
25 |
-
c.node('Inspira Health', URL='https://www.inspirahealthnetwork.org/', target='_blank', tooltip='Inspira Health: Inspira Medical Center Vineland')
|
26 |
-
c.node('Cooper University Health Care', URL='https://www.cooperhealth.org/', target='_blank', tooltip='Cooper University Health Care: Cooper University Hospital')
|
27 |
-
c.node('University Hospital', URL='https://www.uhnj.org/', target='_blank', tooltip='University Hospital: University Hospital')
|
28 |
-
c.node('Robert Wood Johnson University Hospital Hamilton', URL='https://www.rwjbh.org/robert-wood-johnson-university-hospital-hamilton/', target='_blank', tooltip='Robert Wood Johnson University Hospital Hamilton: Robert Wood Johnson University Hospital Hamilton')
|
29 |
-
c.node('Trinitas Regional Medical Center', URL='https://www.trinitasrmc.org/', target='_blank', tooltip='Trinitas Regional Medical Center: Trinitas Regional Medical Center')
|
30 |
-
c.node('Capital Health Regional Medical Center', URL='https://www.capitalhealth.org/', target='_blank', tooltip='Capital Health Regional Medical Center: Capital Health Regional Medical Center')
|
31 |
-
|
32 |
-
# Render the graph using streamlit
|
33 |
-
st.graphviz_chart(g)
|
34 |
-
|
35 |
-
# Define hospitals data
|
36 |
-
hospitals = [('Hackensack Meridian Health', 'Hackensack University Medical Center', 40.899886, -74.039179),
|
37 |
-
('RWJBarnabas Health', 'Robert Wood Johnson University Hospital', 40.491301, -74.450611),
|
38 |
-
('Atlantic Health System', 'Morristown Medical Center', 40.787231, -74.473851),
|
39 |
-
('Virtua Health', 'Virtua Memorial Hospital', 39.931229, -75.025831),
|
40 |
-
('Inspira Health', 'Inspira Medical Center Vineland', 39.460225, -75.035542),
|
41 |
-
('Cooper University Health Care', 'Cooper University Hospital', 39.942743, -75.119090),
|
42 |
-
('University Hospital', 'University Hospital', 40.742310, -74.177609),
|
43 |
-
('Robert Wood Johnson University Hospital Hamilton', 'Robert Wood Johnson University Hospital Hamilton', 40.214008, -74.679619),
|
44 |
-
('Trinitas Regional Medical Center', 'Trinitas Regional Medical Center', 40.661474, -74.215013),
|
45 |
-
('Capital Health Regional Medical Center', 'Capital Health Regional Medical Center', 40.266778, -74.796452)]
|
46 |
-
|
47 |
-
#Create a map centered on New Jersey
|
48 |
-
m = folium.Map(location=[40.0583, -74.4057], zoom_start=8)
|
49 |
-
|
50 |
-
#Add markers for each hospital
|
51 |
-
for hospital in hospitals:
|
52 |
-
folium.Marker(
|
53 |
-
location=[hospital[2], hospital[3]],
|
54 |
-
popup=f'{hospital[1]}<br>{hospital[2]},{hospital[3]}'
|
55 |
-
).add_to(m)
|
56 |
-
|
57 |
-
#Display the map in Streamlit
|
58 |
-
folium_static(m)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CAMP-ViL/Xplainer/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Xplainer
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.34.0
|
8 |
-
python_version: 3.7.16
|
9 |
-
app_file: app.py
|
10 |
-
pinned: false
|
11 |
-
license: mit
|
12 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_docstring_options.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
from pybind11_tests import docstring_options as m
|
3 |
-
|
4 |
-
|
5 |
-
def test_docstring_options():
|
6 |
-
# options.disable_function_signatures()
|
7 |
-
assert not m.test_function1.__doc__
|
8 |
-
|
9 |
-
assert m.test_function2.__doc__ == "A custom docstring"
|
10 |
-
|
11 |
-
# docstring specified on just the first overload definition:
|
12 |
-
assert m.test_overloaded1.__doc__ == "Overload docstring"
|
13 |
-
|
14 |
-
# docstring on both overloads:
|
15 |
-
assert m.test_overloaded2.__doc__ == "overload docstring 1\noverload docstring 2"
|
16 |
-
|
17 |
-
# docstring on only second overload:
|
18 |
-
assert m.test_overloaded3.__doc__ == "Overload docstr"
|
19 |
-
|
20 |
-
# options.enable_function_signatures()
|
21 |
-
assert m.test_function3.__doc__ .startswith("test_function3(a: int, b: int) -> None")
|
22 |
-
|
23 |
-
assert m.test_function4.__doc__ .startswith("test_function4(a: int, b: int) -> None")
|
24 |
-
assert m.test_function4.__doc__ .endswith("A custom docstring\n")
|
25 |
-
|
26 |
-
# options.disable_function_signatures()
|
27 |
-
# options.disable_user_defined_docstrings()
|
28 |
-
assert not m.test_function5.__doc__
|
29 |
-
|
30 |
-
# nested options.enable_user_defined_docstrings()
|
31 |
-
assert m.test_function6.__doc__ == "A custom docstring"
|
32 |
-
|
33 |
-
# RAII destructor
|
34 |
-
assert m.test_function7.__doc__ .startswith("test_function7(a: int, b: int) -> None")
|
35 |
-
assert m.test_function7.__doc__ .endswith("A custom docstring\n")
|
36 |
-
|
37 |
-
# Suppression of user-defined docstrings for non-function objects
|
38 |
-
assert not m.DocstringTestFoo.__doc__
|
39 |
-
assert not m.DocstringTestFoo.value_prop.__doc__
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/swap_ranges.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// omp inherits swap_ranges
|
22 |
-
#include <thrust/system/cpp/detail/swap_ranges.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/TokenCut/app_backup.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import requests
|
3 |
-
import pandas as pd
|
4 |
-
import gradio as gr
|
5 |
-
from huggingface_hub.hf_api import SpaceInfo
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
|
9 |
-
path = f"https://huggingface.co/api/spaces"
|
10 |
-
os.system("git clone https://github.com/YangtaoWANG95/TokenCut.git")
|
11 |
-
os.chdir("TokenCut")
|
12 |
-
os.system("wget https://raw.githubusercontent.com/YangtaoWANG95/TokenCut/master/examples/VOC07_000064.jpg -O parrot.jpg")
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
def get_blocks_party_spaces():
|
17 |
-
r = requests.get(path)
|
18 |
-
d = r.json()
|
19 |
-
spaces = [SpaceInfo(**x) for x in d]
|
20 |
-
blocks_spaces = {}
|
21 |
-
for i in range(0,len(spaces)):
|
22 |
-
if spaces[i].id.split('/')[0] == 'CVPR' and hasattr(spaces[i], 'likes') and spaces[i].id != 'CVPR/Leaderboard' and spaces[i].id != 'CVPR/README':
|
23 |
-
blocks_spaces[spaces[i].id]=spaces[i].likes
|
24 |
-
df = pd.DataFrame(
|
25 |
-
[{"Spaces_Name": Spaces, "likes": likes} for Spaces,likes in blocks_spaces.items()])
|
26 |
-
df = df.sort_values(by=['likes'],ascending=False)
|
27 |
-
return df
|
28 |
-
|
29 |
-
|
30 |
-
block = gr.Blocks()
|
31 |
-
|
32 |
-
with block:
|
33 |
-
gr.Markdown("""Leaderboard for the most popular CVPR Spaces. To learn more and join, see <a href="https://huggingface.co/CVPR" target="_blank" style="text-decoration: underline">CVPR Event</a>""")
|
34 |
-
with gr.Tabs():
|
35 |
-
with gr.TabItem("CVPR Leaderboard"):
|
36 |
-
with gr.Row():
|
37 |
-
data = gr.outputs.Dataframe(type="pandas")
|
38 |
-
with gr.Row():
|
39 |
-
data_run = gr.Button("Refresh")
|
40 |
-
data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)
|
41 |
-
|
42 |
-
block.load(get_blocks_party_spaces, inputs=None, outputs=data)
|
43 |
-
block.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/ml-talking-face/toxicity_estimator/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .module import PerspectiveAPI
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/projects/__init__.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import importlib
|
3 |
-
from pathlib import Path
|
4 |
-
|
5 |
-
_PROJECTS = {
|
6 |
-
"point_rend": "PointRend",
|
7 |
-
"deeplab": "DeepLab",
|
8 |
-
"panoptic_deeplab": "Panoptic-DeepLab",
|
9 |
-
}
|
10 |
-
_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent / "projects"
|
11 |
-
|
12 |
-
if _PROJECT_ROOT.is_dir():
|
13 |
-
# This is true only for in-place installation (pip install -e, setup.py develop),
|
14 |
-
# where setup(package_dir=) does not work: https://github.com/pypa/setuptools/issues/230
|
15 |
-
|
16 |
-
class _D2ProjectsFinder(importlib.abc.MetaPathFinder):
|
17 |
-
def find_spec(self, name, path, target=None):
|
18 |
-
if not name.startswith("detectron2.projects."):
|
19 |
-
return
|
20 |
-
project_name = name.split(".")[-1]
|
21 |
-
project_dir = _PROJECTS.get(project_name)
|
22 |
-
if not project_dir:
|
23 |
-
return
|
24 |
-
target_file = _PROJECT_ROOT / f"{project_dir}/{project_name}/__init__.py"
|
25 |
-
if not target_file.is_file():
|
26 |
-
return
|
27 |
-
return importlib.util.spec_from_file_location(name, target_file)
|
28 |
-
|
29 |
-
import sys
|
30 |
-
|
31 |
-
sys.meta_path.append(_D2ProjectsFinder())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|