Commit
·
4ffac1c
1
Parent(s):
e668779
Update parquet files (step 86 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1gistliPinn/ChatGPT4/Examples/Adobe Photoshop Cs 9 Free Free Download.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Free !!HOT!! Download 007 Facebook Hack V1.0 With Full Cracked.md +0 -108
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bed Wars Mod APK How to Get Unlimited Money and Gcubes in the Best Block Game for Android.md +0 -131
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cribbage King The Ultimate Cribbage Game for your iPhone..md +0 -118
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 3D Chess Game for PC and Play in Stunning Scenes and Graphics.md +0 -153
- spaces/1phancelerku/anime-remove-background/9xbuddy Music Download A Review of the Features Benefits and Limitations.md +0 -112
- spaces/232labs/VToonify/vtoonify/model/dualstylegan.py +0 -203
- spaces/232labs/VToonify/vtoonify/model/raft/core/__init__.py +0 -0
- spaces/ANDRYHA/FakeNewsClassifier/app.py +0 -71
- spaces/Abhilashvj/planogram-compliance/models/common.py +0 -1268
- spaces/Abhilashvj/planogram-compliance/utils/flask_rest_api/restapi.py +0 -61
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/overview.md +0 -92
- spaces/Andy1621/uniformer_image_detection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py +0 -112
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/data_processor.py +0 -209
- spaces/Archan/ArXivAudio/README.md +0 -13
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/filesystem.py +0 -153
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py +0 -127
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/models.py +0 -1034
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/retry.py +0 -272
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/extern/__init__.py +0 -76
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_coco.py +0 -139
- spaces/Bagus/speaker-verification-demo/README.md +0 -39
- spaces/BalaBhaskarudu/mygenAIChatbot/README.md +0 -12
- spaces/BartPoint/VoiceChange/infer_pack/onnx_inference.py +0 -139
- spaces/Benson/text-generation/Examples/Bubble Sort C.md +0 -80
- spaces/Benson/text-generation/Examples/Descargar Camioneros De Europa 3 Apk Obb.md +0 -45
- spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/functions.py +0 -362
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py +0 -111
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/config.py +0 -35
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/triggers.py +0 -340
- spaces/CVPR/LIVE/thrust/thrust/detail/complex/cexpf.h +0 -161
- spaces/CVPR/LIVE/thrust/thrust/random/linear_feedback_shift_engine.h +0 -230
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/fill.h +0 -60
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/partition.h +0 -339
- spaces/Caoyunkang/Segment-Any-Anomaly/SAM/CONTRIBUTING.md +0 -31
- spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/__init__.py +0 -0
- spaces/Cletrason/Cletrason-toad-in-the-mario-movie/trainer_seq2seq.py +0 -246
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/momentsPen.c +0 -0
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_r_e_p.py +0 -7
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/archive.py +0 -73
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3ca142e0.css +0 -1
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-ff630227.js +0 -3
- spaces/DataScienceGuild/ARIMA_test/README.md +0 -13
- spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/modules/swg_transformer.py +0 -49
- spaces/Dorado607/ChuanhuChatGPT/run_Linux.sh +0 -31
- spaces/DragGan/DragGan-Inversion/PTI/torch_utils/__init__.py +0 -9
- spaces/DragGan/DragGan-Inversion/PTI/training/__init__.py +0 -0
- spaces/EAraid12/LoRA-DreamBooth-Training-UI/README.md +0 -15
- spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/models/realesrgan_model.py +0 -258
- spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/layers_new.py +0 -125
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Photoshop Cs 9 Free Free Download.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>photoshop is considered one of the most powerful image editing software packages available. it is used by digital photographers, graphic designers, and just about anyone who has an interest in editing images. photoshop has the capability to change almost any aspect of a photo, making it an impressive software package.</p>
|
3 |
-
<h2>adobe photoshop cs 9 free download</h2><br /><p><b><b>Download</b> ►►► <a href="https://imgfil.com/2uxX4P">https://imgfil.com/2uxX4P</a></b></p><br /><br />
|
4 |
-
<p>adobe photoshop is one of the most popular software programs available. it is used for a variety of tasks including retouching your images, creating graphics and images, creating websites, and much more. the good thing about photoshop is that the program is very easy to use and has a lot of features that allow users to edit any type of file or project. the application is also capable of handling enormous files.</p>
|
5 |
-
<p>as you know, photoshop is the top photoshopping software for the mac. but it has been made available for os x. the demo version of photoshop cs6 can be downloaded through the website for mac os x. the latest versions of photoshop elements and photoshop cs6 are on sale for $39.99 and $179.99 respectively. so if you are not the most technology savvy person, fear not. the software is fairly simple to use and it can be installed on a mac os x 10.6 and up or windows xp and up.</p>
|
6 |
-
<p>the latest version of photoshop cs6 (adobe photoshop cs6 + adobe photoshop cs6 extended + adobe photoshop cs6 essentials) are on sale for $119.99, $399.99, $219.99 respectively. they can be downloaded directly at the adobe website, otherwise you can also choose to pay through the adobe acrobat connect site. if you have adobe acrobat you can get the software to run on your computer. you also get access to acrobat reader which lets you read, save, print, and annotate pdf files. the adobe photoshop cs6 gives you the ability to work and create images on different layers or files which allows you to combine, edit or reposition any number of images and text on layers. you can also crop, adjust or rotate the layer contents. it lets you share your finished image or component with others. you can read the help files or you can simply make use of the extensive menus to access the software. the whole software is free of all adware, malware or spyware.</p>
|
7 |
-
<p></p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Free !!HOT!! Download 007 Facebook Hack V1.0 With Full Cracked.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Free Download 007 Facebook Hack v1.0 with Full Cracked</h1>
|
3 |
-
<p>If you are looking for a way to hack into any Facebook account, you have come to the right place. In this article, we will show you how to use 007 Facebook Hack v1.0 with Full Cracked, a powerful and easy-to-use tool that can help you spy on anyone's Facebook activities.</p>
|
4 |
-
<h2>What is 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
5 |
-
<p>007 Facebook Hack v1.0 with Full Cracked is a software that allows you to hack any Facebook account by simply entering the email address or username of the target. You don't need to know the password or any other details of the account. The software will automatically retrieve the login information and display it on your screen.</p>
|
6 |
-
<h2>free download 007 facebook hack v1.0 with full cracked</h2><br /><p><b><b>Download Zip</b> ⚙⚙⚙ <a href="https://imgfil.com/2uy0zG">https://imgfil.com/2uy0zG</a></b></p><br /><br />
|
7 |
-
<p>With 007 Facebook Hack v1.0 with Full Cracked, you can access the private messages, photos, videos, friends list, wall posts, comments, likes, groups, events, and more of any Facebook user. You can also change the password, profile picture, status, and other settings of the hacked account.</p>
|
8 |
-
<h2>Why use 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
9 |
-
<p>There are many reasons why you might want to use 007 Facebook Hack v1.0 with Full Cracked. For example, you might want to:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Monitor your spouse, partner, children, or friends' online activities and find out if they are cheating on you, lying to you, or hiding something from you.</li>
|
12 |
-
<li>Recover your own Facebook account if you forgot your password or lost access to your email or phone number.</li>
|
13 |
-
<li>Protect your privacy and security by checking if someone else is using your Facebook account without your permission.</li>
|
14 |
-
<li>Have fun and prank your friends by changing their profile picture, status, or sending funny messages to their contacts.</li>
|
15 |
-
<li>Learn more about someone you are interested in by viewing their personal information, interests, hobbies, preferences, etc.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>How to use 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
18 |
-
<p>Using 007 Facebook Hack v1.0 with Full Cracked is very simple and straightforward. Just follow these steps:</p>
|
19 |
-
<ol>
|
20 |
-
<li>Download 007 Facebook Hack v1.0 with Full Cracked from the link below.</li>
|
21 |
-
<li>Extract the zip file and run the setup.exe file to install the software on your computer.</li>
|
22 |
-
<li>Open the software and enter the email address or username of the Facebook account you want to hack.</li>
|
23 |
-
<li>Click on the "Hack" button and wait for a few seconds.</li>
|
24 |
-
<li>The software will display the password and other details of the hacked account on your screen.</li>
|
25 |
-
<li>Enjoy!</li>
|
26 |
-
</ol>
|
27 |
-
<h2>Where to download 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
28 |
-
<p>You can download 007 Facebook Hack v1.0 with Full Cracked for free from the link below. The software is safe and virus-free. It works on Windows XP, Vista, 7, 8, 10 and Mac OS X. It is compatible with all browsers and devices that support Facebook.</p>
|
29 |
-
<p><a href="https://example.com/download/007-facebook-hack-v10-with-full-cracked">Download 007 Facebook Hack v1.0 with Full Cracked Here</a></p>
|
30 |
-
<h2>Conclusion</h2>
|
31 |
-
<p>007 Facebook Hack v1.0 with Full Cracked is a powerful and easy-to-use tool that can help you hack any Facebook account in minutes. You can use it for various purposes such as monitoring, recovering, protecting, or having fun with your Facebook accounts. You can download it for free from the link above and enjoy hacking!</p>
|
32 |
-
<h2>Is 007 Facebook Hack v1.0 with Full Cracked legal?</h2>
|
33 |
-
<p>Before you download and use 007 Facebook Hack v1.0 with Full Cracked, you might be wondering if it is legal or not. The answer is: it depends. Hacking someone's Facebook account without their consent is illegal and unethical in most countries. You could face legal consequences if you are caught or reported by the victim or Facebook. Therefore, we do not recommend or endorse using this tool for malicious purposes.</p>
|
34 |
-
<p>However, there are some situations where using 007 Facebook Hack v1.0 with Full Cracked might be legal or acceptable. For example, if you are hacking your own account that you lost access to, or if you have the permission of the account owner to hack their account for educational or testing purposes. In these cases, you are not violating anyone's privacy or rights, and you are using the tool responsibly and ethically.</p>
|
35 |
-
<p></p>
|
36 |
-
<h2>What are the advantages of 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
37 |
-
<p>007 Facebook Hack v1.0 with Full Cracked has many advantages over other hacking tools available on the internet. Some of them are:</p>
|
38 |
-
<ul>
|
39 |
-
<li>It is free to download and use. You don't need to pay any fees or subscriptions to get this tool.</li>
|
40 |
-
<li>It is easy to use and user-friendly. You don't need any technical skills or knowledge to use this tool. Just enter the email or username of the target account and click on the "Hack" button.</li>
|
41 |
-
<li>It is fast and reliable. You don't need to wait for hours or days to get the results. The tool will hack the account in minutes and display the password and other details on your screen.</li>
|
42 |
-
<li>It is safe and secure. You don't need to worry about viruses, malware, spyware, or any other threats that might harm your computer or device. The tool is tested and verified by many users and experts.</li>
|
43 |
-
<li>It is compatible and flexible. You can use this tool on any Windows or Mac OS X computer or device that supports Facebook. You can also use it on any browser or device that the target account uses.</li>
|
44 |
-
</ul>
|
45 |
-
<h2>What are the disadvantages of 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
46 |
-
<p>Despite its many advantages, 007 Facebook Hack v1.0 with Full Cracked also has some disadvantages that you should be aware of before using it. Some of them are:</p>
|
47 |
-
<ul>
|
48 |
-
<li>It is illegal and unethical in most cases. As we mentioned before, hacking someone's Facebook account without their consent is a crime and a violation of their privacy and rights. You could face legal troubles if you are caught or reported by the victim or Facebook.</li>
|
49 |
-
<li>It is risky and dangerous. Even if you are hacking your own account or someone else's account with their permission, you could still expose yourself or them to potential threats from hackers, scammers, stalkers, or other malicious actors who might try to access or misuse the hacked account.</li>
|
50 |
-
<li>It is not guaranteed or foolproof. The tool might not work on some accounts that have strong security measures or verification methods in place. The tool might also fail to hack the account if the target changes their password or email address during the hacking process.</li>
|
51 |
-
</ul>
|
52 |
-
<h2>How to download 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
53 |
-
<p>Downloading 007 Facebook Hack v1.0 with Full Cracked is very easy and fast. You don't need to register or fill any surveys to get this tool. You just need to follow these simple steps:</p>
|
54 |
-
<ol>
|
55 |
-
<li>Click on the download link below to go to the download page.</li>
|
56 |
-
<li>Choose one of the available download options and click on the download button.</li>
|
57 |
-
<li>Wait for the download to complete and save the file on your computer or device.</li>
|
58 |
-
<li>Extract the zip file and run the setup.exe file to install the software on your computer or device.</li>
|
59 |
-
<li>Enjoy!</li>
|
60 |
-
</ol>
|
61 |
-
<p>Note: Some antivirus programs might detect 007 Facebook Hack v1.0 with Full Cracked as a virus or malware. This is a false positive and you can safely ignore it. The software is clean and harmless.</p>
|
62 |
-
<h2>How to update 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
63 |
-
<p>007 Facebook Hack v1.0 with Full Cracked is constantly updated by its developers to ensure its functionality and compatibility with the latest Facebook updates and security measures. You don't need to manually update this tool as it will automatically check for updates and download them whenever they are available.</p>
|
64 |
-
<p>However, if you want to manually check for updates or download the latest version of 007 Facebook Hack v1.0 with Full Cracked, you can do so by following these steps:</p>
|
65 |
-
<ol>
|
66 |
-
<li>Open the software and click on the "About" button on the top right corner.</li>
|
67 |
-
<li>Click on the "Check for Updates" button and wait for a few seconds.</li>
|
68 |
-
<li>If there is an update available, click on the "Download Update" button and wait for the download to complete.</li>
|
69 |
-
<li>Run the update.exe file and follow the instructions to install the update on your computer or device.</li>
|
70 |
-
<li>Restart the software and enjoy!</li>
|
71 |
-
</ol>
|
72 |
-
<h2>How to uninstall 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
73 |
-
<p>If you want to uninstall 007 Facebook Hack v1.0 with Full Cracked from your computer or device, you can do so by following these steps:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Go to the Start menu and click on Control Panel.</li>
|
76 |
-
<li>Click on Programs and Features or Add or Remove Programs depending on your Windows version.</li>
|
77 |
-
<li>Find 007 Facebook Hack v1.0 with Full Cracked in the list of installed programs and click on it.</li>
|
78 |
-
<li>Click on Uninstall or Remove and follow the instructions to uninstall the software from your computer or device.</li>
|
79 |
-
<li>Delete any leftover files or folders related to 007 Facebook Hack v1.0 with Full Cracked from your computer or device.</li>
|
80 |
-
</ol>
|
81 |
-
<h2>What are the alternatives to 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
82 |
-
<p>If you are looking for other ways to hack Facebook accounts, you might want to consider some of the alternatives to 007 Facebook Hack v1.0 with Full Cracked. Some of them are:</p>
|
83 |
-
<ul>
|
84 |
-
<li>Phishing: This is a method of creating fake login pages that look like the official Facebook login page and tricking the target into entering their email and password. You can then capture their credentials and use them to access their account.</li>
|
85 |
-
<li>Keylogging: This is a method of installing a software or hardware device that records every keystroke that the target makes on their computer or device. You can then retrieve the log file and find their email and password among other information.</li>
|
86 |
-
<li>Social Engineering: This is a method of manipulating the target into revealing their email and password or other personal information by pretending to be someone they trust or someone who can help them with a problem.</li>
|
87 |
-
<li>Password Reset: This is a method of requesting a password reset link from Facebook by using the target's email address or phone number. You can then use the link to create a new password and access their account.</li>
|
88 |
-
</ul>
|
89 |
-
<p>However, these methods are not as easy or reliable as 007 Facebook Hack v1.0 with Full Cracked. They require more time, effort, skill, and resources to execute. They also have more risks and limitations than 007 Facebook Hack v1.0 with Full Cracked.</p>
|
90 |
-
<h2>What are the testimonials of 007 Facebook Hack v1.0 with Full Cracked?</h2>
|
91 |
-
<p>Many users have tried and tested 007 Facebook Hack v1.0 with Full Cracked and have shared their positive feedback and reviews about this tool. Here are some of the testimonials from real users who have used 007 Facebook Hack v1.0 with Full Cracked:</p>
|
92 |
-
<blockquote>
|
93 |
-
<p>"I was able to hack my girlfriend's Facebook account and found out that she was cheating on me with my best friend. Thanks to 007 Facebook Hack v1.0 with Full Cracked, I was able to confront them and end the relationship." - John, USA</p>
|
94 |
-
</blockquote>
|
95 |
-
<blockquote>
|
96 |
-
<p>"I forgot my Facebook password and I couldn't access my email or phone number to reset it. I was desperate to get back into my account because I had important messages and photos there. Luckily, I found 007 Facebook Hack v1.0 with Full Cracked and it helped me recover my account in minutes." - Lisa, UK</p>
|
97 |
-
</blockquote>
|
98 |
-
<blockquote>
|
99 |
-
<p>"I wanted to prank my friend by changing his profile picture and status to something funny. I used 007 Facebook Hack v1.0 with Full Cracked to hack his account and it worked like a charm. He was so confused and angry when he saw his account. It was hilarious." - Kevin, Canada</p>
|
100 |
-
</blockquote>
|
101 |
-
<h2>Conclusion</h2>
|
102 |
-
<p>007 Facebook Hack v1.0 with Full Cracked is a powerful and easy-to-use tool that can help you hack any Facebook account in minutes. You can use it for various purposes such as monitoring, recovering, protecting, or having fun with your Facebook accounts. You can download it for free from the link below and enjoy hacking!</p>
|
103 |
-
<p>However, you should also be aware of the legal and ethical implications of using this tool. Hacking someone's Facebook account without their consent is illegal and unethical in most cases. You could face legal consequences if you are caught or reported by the victim or Facebook. Therefore, we do not recommend or endorse using this tool for malicious purposes.</p>
|
104 |
-
<p>You should also be careful of the risks and dangers of using this tool. Even if you are hacking your own account or someone else's account with their permission, you could still expose yourself or them to potential threats from hackers, scammers, stalkers, or other malicious actors who might try to access or misuse the hacked account.</p>
|
105 |
-
<p>Finally, you should also know that this tool is not guaranteed or foolproof. The tool might not work on some accounts that have strong security measures or verification methods in place. The tool might also fail to hack the account if the target changes their password or email address during the hacking process.</p>
|
106 |
-
<p>Therefore, you should use this tool responsibly and ethically, and at your own risk. We hope you found this article helpful and informative. If you have any questions or comments, please feel free to leave them below. Happy hacking!</p> 3cee63e6c2<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bed Wars Mod APK How to Get Unlimited Money and Gcubes in the Best Block Game for Android.md
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Bed Wars Mod APK</h1>
|
3 |
-
<p>Do you love playing multiplayer games with your friends? Do you want to experience an exciting and addictive game that features four levels, each with unique objectives and strategies? If you answered yes, then you should try Bed Wars. Bed Wars is a mobile game that lets you team up with other players and protect your bed from being destroyed by your enemies. You can also collect resources, build bridges, upgrade weapons, and attack other beds. Sounds fun, right?</p>
|
4 |
-
<p>But what if we tell you that you can make this game even more fun by downloading Bed Wars Mod APK? This is a modified version of the game that gives you unlimited money and gcubes, which are the in-game currencies. With these resources, you can buy anything you want in the game without worrying about running out. You can also unlock all the skins, items, maps, modes, and more. This way, you can enjoy Bed Wars to the fullest.</p>
|
5 |
-
<h2>how to download bed wars mod apk</h2><br /><p><b><b>Download Zip</b> ——— <a href="https://urlin.us/2uSU8h">https://urlin.us/2uSU8h</a></b></p><br /><br />
|
6 |
-
<p>So how do you download Bed Wars Mod APK on your Android device? Don't worry, we got you covered. In this article, we will show you how to download and install this amazing modded game in just a few simple steps. We will also give you some tips on how to play Bed Wars Mod APK and have a blast with your friends. Let's get started!</p>
|
7 |
-
<h2>What is Bed Wars?</h2>
|
8 |
-
<p>Bed Wars is a popular mobile game developed by Blockman GO Studio. It is inspired by the Minecraft mini-game of the same name. The game has four levels: Solo, Duo, Trio, and Squad. <p>In each level, you will be assigned to a team with a color. Your team will have a bed that you need to protect from being destroyed by other teams. If your bed is destroyed, you will not be able to respawn and you will be eliminated from the game. The last team standing wins the game.</p>
|
9 |
-
<p>To protect your bed, you need to collect resources from the islands. There are three types of resources: iron, gold, and diamonds. Iron and gold can be used to buy items from the shop, such as blocks, weapons, armor, tools, and potions. Diamonds can be used to upgrade your team's abilities, such as sharpness, protection, haste, and heal pool.</p>
|
10 |
-
<p>You can also build bridges to connect your island to other islands. This way, you can access more resources, attack other beds, or defend your own bed. But be careful, as other teams can also use your bridges to invade your island. You need to be strategic and cooperative with your teammates to win the game.</p>
|
11 |
-
<h2>Why Download Bed Wars Mod APK?</h2>
|
12 |
-
<p>Bed Wars is a fun and addictive game that you can play for hours with your friends. However, it can also be frustrating and challenging if you don't have enough money and gcubes to buy the items and upgrades you need. You might also get bored of playing the same maps and modes over and over again.</p>
|
13 |
-
<p>That's why downloading Bed Wars Mod APK is a great idea. This is a modified version of the game that gives you unlimited money and gcubes, which are the in-game currencies. With these resources, you can buy anything you want in the game without worrying about running out. You can also unlock all the skins, items, maps, modes, and more. This way, you can enjoy Bed Wars to the fullest.</p>
|
14 |
-
<p>Some of the features of Bed Wars Mod APK are:</p>
|
15 |
-
<ul>
|
16 |
-
<li>Unlimited money and gcubes</li>
|
17 |
-
<li>All skins unlocked</li>
|
18 |
-
<li>All items unlocked</li>
|
19 |
-
<li>All maps unlocked</li>
|
20 |
-
<li>All modes unlocked</li>
|
21 |
-
<li>No ads</li>
|
22 |
-
<li>No root required</li>
|
23 |
-
</ul>
|
24 |
-
<p>As you can see, Bed Wars Mod APK is a must-have for any fan of the game. It will make your gaming experience more fun and exciting. You will be able to customize your character, equip yourself with the best weapons and armor, explore different maps and modes, and dominate the game with your friends.</p>
|
25 |
-
<p>How to install bed wars mod apk on android<br />
|
26 |
-
Bed wars mod apk unlimited money and gcubes download<br />
|
27 |
-
Bed wars mod apk latest version free download<br />
|
28 |
-
How to play bed wars mod apk online with friends<br />
|
29 |
-
Bed wars mod apk solo, duo, trio and squad modes<br />
|
30 |
-
How to get bed wars mod apk for pc<br />
|
31 |
-
Bed wars mod apk hack and cheats<br />
|
32 |
-
Bed wars mod apk no root required<br />
|
33 |
-
Bed wars mod apk features and gameplay<br />
|
34 |
-
How to update bed wars mod apk to the newest version<br />
|
35 |
-
Bed wars mod apk review and rating<br />
|
36 |
-
Bed wars mod apk download link and instructions<br />
|
37 |
-
How to uninstall bed wars mod apk from your device<br />
|
38 |
-
Bed wars mod apk tips and tricks<br />
|
39 |
-
Bed wars mod apk vs original bed wars game<br />
|
40 |
-
How to fix bed wars mod apk not working or crashing<br />
|
41 |
-
Bed wars mod apk best strategies and tactics<br />
|
42 |
-
Bed wars mod apk compatible devices and requirements<br />
|
43 |
-
Bed wars mod apk alternatives and similar games<br />
|
44 |
-
How to contact bed wars mod apk developer and support<br />
|
45 |
-
How to join bed wars mod apk community and forums<br />
|
46 |
-
Bed wars mod apk pros and cons<br />
|
47 |
-
Bed wars mod apk bugs and glitches<br />
|
48 |
-
Bed wars mod apk custom maps and skins<br />
|
49 |
-
How to create your own bed wars mod apk server<br />
|
50 |
-
How to backup and restore bed wars mod apk data<br />
|
51 |
-
Bed wars mod apk frequently asked questions and answers<br />
|
52 |
-
Bed wars mod apk gameplay videos and screenshots<br />
|
53 |
-
How to earn free gcubes in bed wars mod apk<br />
|
54 |
-
Bed wars mod apk changelog and updates history<br />
|
55 |
-
How to report bed wars mod apk issues and feedback<br />
|
56 |
-
Bed wars mod apk privacy policy and terms of service<br />
|
57 |
-
How to enable bed wars mod apk notifications and permissions<br />
|
58 |
-
Bed wars mod apk achievements and leaderboards<br />
|
59 |
-
How to share bed wars mod apk with your friends<br />
|
60 |
-
Bed wars mod apk size and download time<br />
|
61 |
-
How to optimize bed wars mod apk performance and battery usage<br />
|
62 |
-
Bed wars mod apk sound effects and music settings<br />
|
63 |
-
How to customize bed wars mod apk controls and interface<br />
|
64 |
-
Bed wars mod apk languages and translations</p>
|
65 |
-
<h2>How to Download Bed Wars Mod APK on Android?</h2>
|
66 |
-
<p>Now that you know why you should download Bed Wars Mod APK, you might be wondering how to do it. Don't worry, it's very easy and simple. All you need is an Android device with at least 4 GB of RAM and 100 MB of free storage space. Then, follow these steps:</p>
|
67 |
-
<h3>Step 1: Allow Unknown Apps on Android</h3>
|
68 |
-
<p>The first thing you need to do is to allow unknown apps on your Android device. This means that you will be able to install apps that are not from the Google Play Store. To do this, go to your device settings and look for the security or privacy option. Then, find the unknown sources or install unknown apps option and enable it.</p>
|
69 |
-
<p>This will allow you to install Bed Wars Mod APK on your device without any problems. However, make sure that you only download apps from trusted sources and websites. Otherwise, you might end up installing malware or viruses on your device.</p>
|
70 |
-
<h3>Step 2: Install an Android File Manager</h3>
|
71 |
-
<p>The next thing you need to do is to install an Android file manager app on your device. This is an app that will help you find and manage the files on your device. You will need this app to locate and install the Bed Wars Mod APK file that you will download later.</p>
|
72 |
-
<p>There are many file manager apps that you can choose from, such as ES File Explorer, Astro File Manager, or Solid Explorer. You can download any of them from the Google Play Store for free. Once you have installed a file manager app on your device, open it and grant it the necessary permissions.</p>
|
73 |
-
<h3>Step 3: Download the APK Installer From Your Android</h3>
|
74 |
-
<p>The next step is to download the Bed Wars Mod APK file from your Android device. To do this, open your web browser and go to this link: . This is a reputable website where you can download the latest version of Bed Wars Mod APK for free.</p>
|
75 |
-
<p>Once you are on the website, scroll down until you see the download button. Tap on it and wait for the download to start. The file size is about 98 MB, so it might take a few minutes depending on your internet speed.</p>
|
76 |
-
<h3>Step 4: Transfer the APK Installer via USB (Optional)</h3>
|
77 |
-
<p>If you prefer, you can also download the Bed Wars Mod APK file from your computer and transfer it to your Android device via USB cable. This might be faster and more convenient for some users. To do this, follow these steps:</p>
|
78 |
-
<ul>
|
79 |
-
<li>Go to the same link as before on your computer and download the Bed Wars Mod APK file.</li>
|
80 |
-
<li>Connect your Android device to your computer using a USB cable.</li>
|
81 |
-
<li>On your computer, open the folder where you saved the APK file and copy it.</li>
|
82 |
-
<li>On your Android device, open the file manager app and navigate to the folder where you want to paste the APK file.</li>
|
83 |
-
<li>Paste the APK file and disconnect your device from your computer.</li>
|
84 |
-
</ul>
|
85 |
-
<h3>Step 5: Install the APK File on Your Device</h3>
|
86 |
-
<p>The final step is to install the Bed Wars Mod APK file on your device. To do this, follow these steps:</p>
|
87 |
-
<ul>
|
88 |
-
<li>Open the file manager app and locate the APK file that you downloaded or transferred.</li>
|
89 |
-
<li>Tap on the APK file and a pop-up window will appear asking you to confirm the installation.</li>
|
90 |
-
<li>Tap on install and wait for the installation to finish.</li>
|
91 |
-
<li>Once the installation is done, you will see a notification that says "App installed".</li>
|
92 |
-
<li>Tap on open and enjoy Bed Wars Mod APK!</li>
|
93 |
-
</ul>
|
94 |
-
<h2>How to Play Bed Wars Mod APK?</h2>
|
95 |
-
<p>Now that you have installed Bed Wars Mod APK on your device, you might be wondering how to play it. Don't worry, it's very easy and simple. All you need to do is follow these steps:</p>
|
96 |
-
<ul>
|
97 |
-
<li>Open Bed Wars Mod APK and tap on start.</li>
|
98 |
-
<li>Select the level that you want to play: Solo, Duo, Trio, or Squad.</li>
|
99 |
-
<li>Select the map that you want to play: Classic, Lucky Block, Rush, or Ultimate.</li>
|
100 |
-
<li>Select the mode that you want to play: Normal, Chaos, or Custom.</li>
|
101 |
-
<li>Wait for the game to load and join a team with a color.</li>
|
102 |
-
<li>Protect your bed from being destroyed by other teams and collect resources from the islands.</li>
|
103 |
-
<li>Buy items and upgrades from the shop and build bridges to other islands.</li>
|
104 |
-
<li>Attack other beds and eliminate other players.</li>
|
105 |
-
<li>The last team standing wins the game!</li>
|
106 |
-
</ul>
|
107 |
-
<h2>Conclusion</h2>
|
108 |
-
<p>Bed Wars is a fun and addictive game that you can play with your friends. However, it can also be frustrating and challenging if you don't have enough money and gcubes to buy the items and upgrades you need. That's why downloading Bed Wars Mod APK is a great idea. This is a modified version of the game that gives you unlimited money and gcubes, which are the in-game currencies. With these resources, you can buy anything you want in the game without worrying about running out. You can also unlock all the skins, items, maps, modes, and more. This way, you can enjoy Bed Wars to the fullest.</p>
|
109 |
-
<p>In this article, we showed you how to download and install Bed Wars Mod APK on your Android device in just a few simple steps. We also gave you some tips on how to play Bed Wars Mod APK and have a blast with your friends. We hope that you found this article helpful and informative. If you did, please share it with your friends who might also be interested in playing Bed Wars Mod APK. Thank you for reading!</p>
|
110 |
-
<h2>Frequently Asked Questions</h2>
|
111 |
-
<h3>Q: Is Bed Wars Mod APK safe to download?</h3>
|
112 |
-
<p>A: Yes, Bed Wars Mod APK is safe to download as long as you download it from a trusted source and website. However, make sure that you scan the APK file with an antivirus app before installing it on your device. This way, you can avoid any potential malware or viruses that might harm your device.</p>
|
113 |
-
<h3>Q: Do I need to root my device to use Bed Wars Mod APK?</h3>
|
114 |
-
<p>A: No, you don't need to root your device to use Bed Wars Mod APK. This modded game works fine on any Android device without requiring any root access or permissions. Just follow the steps above and enjoy Bed Wars Mod APK without any hassle.</p>
|
115 |
-
<h3>Q: Can I play Bed Wars Mod APK online with other players?</h3>
|
116 |
-
<p>A: Yes, you can play Bed Wars Mod APK online with other players who are also using the modded version of the game. However, you might not be able to play with players who are using the original version of the game from the Google Play Store. This is because the modded game has different features and settings that might not be compatible with the original game. Therefore, we recommend that you play Bed Wars Mod APK with your friends who are also using the same modded game.</p>
|
117 |
-
<h3>Q: How can I update Bed Wars Mod APK?</h3>
|
118 |
-
<p>A: To update Bed Wars Mod APK, you need to download the latest version of the APK file from the same website where you downloaded it before. Then, you need to uninstall the previous version of the game from your device and install the new version. This way, you can enjoy the latest features and improvements of Bed Wars Mod APK.</p>
|
119 |
-
<h3>Q: What are some tips and tricks for playing Bed Wars Mod APK?</h3>
|
120 |
-
<p>A: Here are some tips and tricks for playing Bed Wars Mod APK:</p>
|
121 |
-
<ul>
|
122 |
-
<li>Use your unlimited money and gcubes wisely. Don't spend them all on unnecessary items or upgrades. Save some for later stages of the game when you need them more.</li>
|
123 |
-
<li>Communicate and coordinate with your teammates. Use the chat feature or voice chat to plan your strategies and tactics. Share resources, items, and information with your teammates. Work together to protect your bed and attack other beds.</li>
|
124 |
-
<li>Be creative and innovative. Use different blocks, weapons, tools, and potions to build bridges, traps, defenses, and attacks. Experiment with different combinations and see what works best for you.</li>
|
125 |
-
<li>Be alert and aware. Keep an eye on your surroundings and watch out for enemies. Don't let your guard down or get distracted. Be ready to react and adapt to any situation.</li>
|
126 |
-
<li>Have fun and enjoy the game. Don't take it too seriously or get angry if you lose. Remember that it's just a game and the main purpose is to have fun with your friends.</li>
|
127 |
-
</ul>
|
128 |
-
<h2></h2>
|
129 |
-
<p>That's it! You have successfully downloaded and installed Bed Wars Mod APK on your Android device. You have also learned how to play Bed Wars Mod APK and some tips and tricks for playing it. We hope that you enjoyed this article and found it helpful and informative. If you did, please share it with your friends who might also be interested in playing Bed Wars Mod APK. Thank you for reading!</p> 197e85843d<br />
|
130 |
-
<br />
|
131 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cribbage King The Ultimate Cribbage Game for your iPhone..md
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cribbage Game Free Download for iPhone: How to Play and Enjoy this Classic Card Game</h1>
|
3 |
-
<p>If you are looking for a fun and engaging card game that can challenge your mind and improve your skills, you should try cribbage. Cribbage is a classic card game that has been played for centuries by people of all ages and backgrounds. It is easy to learn, but hard to master, and it offers endless possibilities for strategy and variation. In this article, we will show you how to download and play cribbage games on your iPhone, as well as how to improve your skills and strategy in this fascinating game.</p>
|
4 |
-
<h2>What is Cribbage and Why Should You Play It?</h2>
|
5 |
-
<h3>The History and Rules of Cribbage</h3>
|
6 |
-
<p>Cribbage is a card game that originated in England in the 17th century. It was invented by Sir John Suckling, a poet and gambler who modified an older game called Noddy. The game is played with a standard 52-card deck and a special board with holes and pegs that are used to keep score. The objective of the game is to be the first player to reach 121 points by making combinations of cards that add up to 15, pairs, runs, flushes, or nobs (the jack of the same suit as the starter card).</p>
|
7 |
-
<h2>cribbage game free download for iphone</h2><br /><p><b><b>Download File</b> > <a href="https://urlin.us/2uSRUl">https://urlin.us/2uSRUl</a></b></p><br /><br />
|
8 |
-
<p>The game is played by two or three players, or by four players in two teams. Each player is dealt six cards (five cards in a three-player game) and must discard two cards face down to form the crib, which belongs to the dealer. The non-dealer cuts the deck and reveals the top card, which is called the starter or the cut. The players then take turns playing one card each, starting with the non-dealer, and announcing the running total of the cards' values. The cards are worth their face value, except for face cards which are worth 10, and aces which are worth 1. The player who plays a card that makes the total exactly 15 scores two points, called "fifteen two". The player who plays a card that makes the total 31 scores two points, called "thirty-one for two". If a player cannot play a card without going over 31, they say "go" and the other player continues until they reach 31 or cannot play either. The player who played the last card before a go or 31 scores one point, called "one for last".</p>
|
9 |
-
<p>After all the cards have been played, the players count their hands in turn, starting with the non-dealer. The hand consists of four cards plus the starter card. The players score points for any combinations of cards that add up to 15, pairs (two points), three of a kind (six points), four of a kind (twelve points), runs (one point per card), flushes (four points for all five cards of the same suit, or five points if the crib also matches), and nobs (one point for having the jack of the same suit as the starter). The dealer then counts their hand, followed by the crib. The crib can only score points for 15s, pairs, runs, flushes, and nobs.</p>
|
10 |
-
<p>The game continues until one player reaches 121 points or more. The player who reaches 121 points first wins the game. If both players reach 121 points in the age, lowball cribbage, and back up 10 cribbage. Each mode has its own rules and challenges that will test your skills and strategy.</li>
|
11 |
-
<li>It has a realistic and immersive gameplay experience. You can play with realistic cards, boards, and pegs that are designed with high-quality graphics and animations. You can also hear realistic sound effects and voice overs that add to the atmosphere of the game.</li>
|
12 |
-
<li>It has a smart and adaptive AI that can adjust to your skill level and style. You can play against different opponents with different personalities, strengths, and weaknesses. You can also customize the AI settings to make the game more easy or difficult.</li>
|
13 |
-
<li>It has a leaderboard and achievements feature that tracks your progress and performance. You can see your rank, score, wins, losses, skunks, double skunks, and more. You can also unlock various achievements and badges that reward your accomplishments.</li>
|
14 |
-
<li>It has a multiplayer feature that lets you play with other players online or offline. You can play with your friends or family via Bluetooth, Wi-Fi, or Game Center. You can also play with random players from around the world via Game Center or Facebook.</li>
|
15 |
-
</ul>
|
16 |
-
<p>Ultimate Cribbage is a paid app that you can download from the App Store for $2.99. It is compatible with iPhone, iPad, and iPod touch devices running iOS 9.0 or later.</p>
|
17 |
-
<p>Cribbage Classic app for iPad and iPhone<br />
|
18 |
-
Ultimate Cribbage: Classic card game with different modes<br />
|
19 |
-
Cribbage: The Best Card Game by FIOGONIA LIMITED<br />
|
20 |
-
How to play cribbage on your iPhone with friends<br />
|
21 |
-
Cribbage tips and tricks to improve your skills<br />
|
22 |
-
Best cribbage apps for iPhone in 2023<br />
|
23 |
-
Cribbage Club subscription for ad-free gameplay<br />
|
24 |
-
Cribbage Pegboard app to track your score<br />
|
25 |
-
Cribbage variants: Classic, Muggins, and Shotgun<br />
|
26 |
-
Cribbage rules and scoring explained<br />
|
27 |
-
Cribbage Classic settings and features<br />
|
28 |
-
Ultimate Cribbage: Classic reviews and ratings<br />
|
29 |
-
Cribbage: The Best Card Game privacy policy<br />
|
30 |
-
Cribbage online tournaments and leaderboards<br />
|
31 |
-
Cribbage strategy and tactics guide<br />
|
32 |
-
Cribbage Classic discard analyzer bonus feature<br />
|
33 |
-
Ultimate Cribbage: Classic daily challenge rewards<br />
|
34 |
-
Cribbage: The Best Card Game support and feedback<br />
|
35 |
-
Cribbage history and origin<br />
|
36 |
-
Cribbage board designs and customizations<br />
|
37 |
-
Cribbage Classic update and bug fixes<br />
|
38 |
-
Ultimate Cribbage: Classic in-app purchases and prices<br />
|
39 |
-
Cribbage: The Best Card Game download size and compatibility<br />
|
40 |
-
Cribbage offline mode and solo play<br />
|
41 |
-
Cribbage glossary and terminology<br />
|
42 |
-
Cribbage Classic statistics and performance tracking<br />
|
43 |
-
Ultimate Cribbage: Classic app for Mac with Apple M1 chip or later<br />
|
44 |
-
Cribbage: The Best Card Game screenshots and videos<br />
|
45 |
-
Cribbage etiquette and manners<br />
|
46 |
-
Cribbage fun facts and trivia.</p>
|
47 |
-
<h4>How to Download and Play Ultimate Cribbage</h4>
|
48 |
-
<p>To download and play Ultimate Cribbage on your iPhone, follow these simple steps:</p>
|
49 |
-
<ol>
|
50 |
-
<li>Open the App Store on your iPhone and search for "Ultimate Cribbage".</li>
|
51 |
-
<li>Tap on the app icon and then tap on "Buy" to purchase and install the app on your device.</li>
|
52 |
-
<li>Once the app is installed, tap on "Open" to launch the app.</li>
|
53 |
-
<li>Select the game mode you want to play: classic cribbage, crash cribbage, cross cribbage, lowball cribbage, or back up 10 cribbage.</li>
|
54 |
-
<li>Select the difficulty level you want to play: easy, medium, hard, or custom.</li>
|
55 |
-
<li>Tap on "Play" to start the game.</li>
|
56 |
-
<li>Enjoy playing cribbage with Ultimate Cribbage!</li>
|
57 |
-
</ol>
|
58 |
-
<h2>How to Improve Your Skills and Strategy in Cribbage</h2>
|
59 |
-
<h3>Tips and Tricks for Discarding and Pegging</h3>
|
60 |
-
<p>One of the most important aspects of cribbage is discarding and pegging. Discarding is the process of choosing which two cards to put in the crib at the beginning of each deal. Pegging is the process of playing cards during the play phase of each deal. Here are some tips and tricks for discarding and pegging:</p>
|
61 |
-
<ul>
|
62 |
-
<li>When discarding, you should consider both your hand and the crib. You should try to keep cards that can score points in your hand, such as 15s, pairs, runs, flushes, or nobs. You should also try to avoid giving cards that can score points in the crib to your opponent, especially if they are the dealer.</li>
|
63 |
-
<li>When discarding, you should also consider the probability of certain cards appearing as the starter card. For example, a 5 is more likely to appear than a 2, because there are more cards that have a value of 10 than a value of 7. Therefore, you should avoid discarding a 5 or a 10 to the crib, as they can make 15s with the starter card.</li>
|
64 |
-
<li>When pegging, you should try to score points by making 15s or 31s, or by making pairs, runs, or flushes. You should also try to prevent your opponent from scoring points by blocking their combinations or forcing them to go over 31.</li>
|
65 |
-
<li>When pegging, you should also try to anticipate what cards your opponent has in their hand based on what they have played so far. For example, if they have played a 6 and a 7, they might have an 8 or a 9 in their hand to make a run. Therefore, you should avoid playing an 8 or a 9 yourself.</li>
|
66 |
-
</ul>
|
67 |
-
<h3>How to Use the Discard Analyzer Feature in Cribbage Apps</h3>
|
68 |
-
<p>If you want to improve your discarding skills in cribbage , you can use the discard analyzer feature in some cribbage apps. The discard analyzer feature is a tool that can help you decide which cards to discard to the crib based on the expected value of each possible combination. The expected value is the average number of points that you can expect to score from your hand and the crib after the starter card is revealed. The higher the expected value, the better the combination. To use the discard analyzer feature in cribbage apps, follow these simple steps: <ol>
|
69 |
-
<li>After you are dealt your cards, tap on the discard analyzer button on the screen. This will open a window that shows you all the possible combinations of cards that you can discard to the crib, along with their expected values.</li>
|
70 |
-
<li>Compare the expected values of each combination and choose the one that has the highest expected value. This means that this combination will give you the best chance of scoring more points from your hand and the crib.</li>
|
71 |
-
<li>Tap on the cards that you want to discard to the crib and confirm your choice. The app will then show you your remaining hand and the crib.</li>
|
72 |
-
<li>Wait for the starter card to be revealed and continue playing as usual.</li>
|
73 |
-
</ol>
|
74 |
-
<p>The discard analyzer feature is a useful tool that can help you improve your discarding skills in cribbage, but it is not a substitute for your own judgment and intuition. You should also consider other factors, such as your opponent's skill level, your position on the board, and your personal preference, when deciding which cards to discard to the crib.</p>
|
75 |
-
<h3>How to Practice and Learn from Other Players Online</h3>
|
76 |
-
<p>Another way to improve your skills and strategy in cribbage is to practice and learn from other players online. Playing online can expose you to different styles and strategies of cribbage, as well as give you feedback and tips on how to play better. Here are some ways to practice and learn from other players online:</p>
|
77 |
-
<ul>
|
78 |
-
<li>Play against different opponents with different skill levels. You can play against easy, medium, or hard AI opponents, or against real players from around the world. You can also play against your friends or family online. Playing against different opponents can help you adapt to different situations and challenges in cribbage.</li>
|
79 |
-
<li>Watch replays of your games or other players' games. You can watch replays of your own games or other players' games on some cribbage apps. You can see how they played their cards, how they scored their points, and how they won or lost the game. You can also pause, rewind, or fast-forward the replays to analyze them in detail.</li>
|
80 |
-
<li>Join online communities and forums for cribbage players. You can join online communities and forums for cribbage players on social media platforms, such as Facebook, Twitter, or Reddit. You can interact with other cribbage players, ask questions, share tips, discuss strategies, or challenge each other to games.</li>
|
81 |
-
</ul>
|
82 |
-
<p>Practicing and learning from other players online can help you improve your skills and strategy in cribbage, but it is not a substitute for your own experience and practice. You should also play offline with real cards and boards, as well as read books and articles about cribbage.</p>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Cribbage is a classic card game that can provide you with hours of fun and entertainment, as well as improve your mental skills and abilities. It is a game that you can play anytime, anywhere, and with anyone. In this article, we have shown you how to download and play cribbage games on your iPhone, as well as how to improve your skills and strategy in this fascinating game. We hope that you have enjoyed reading this article and that you have learned something new about cribbage. Now go ahead and download one of the cribbage apps we have recommended and start playing this amazing game!</p>
|
85 |
-
<h2>Frequently Asked Questions</h2>
|
86 |
-
<p>Here are some frequently asked questions about cribbage:</p>
|
87 |
-
<ol>
|
88 |
-
<li>What is the best hand in cribbage?</li>
|
89 |
-
<p>The best hand in cribbage is 29 points, which consists of three fives of different suits, a jack of the same suit as the starter card, and a five of the same suit as the starter card. This hand scores 12 points for four 15s (5+5+5+J), 12 points for six pairs (5-5, 5-5, 5-5, 5-J, 5-J, J-J), four points for a flush (all five cards of the same suit), and one point for nobs (the jack of the same suit as the starter card). This hand is very rare and can only occur when the dealer has three fives of different suits in their hand and discards them to their own crib.</p>
|
90 |
-
<li>What is a muggins in cribbage?</li>
|
91 |
-
<p <p>A muggins in cribbage is a rule that allows a player to claim any points that their opponent has missed or miscalculated during the scoring phase of each deal. For example, if a player counts their hand as 10 points, but their opponent notices that they actually have 12 points, the opponent can say "muggins" and take the extra two points for themselves. The muggins rule is optional and can be agreed upon or declined by the players before the game starts.</p>
|
92 |
-
<li>What is the difference between a skunk and a double skunk in cribbage?</li>
|
93 |
-
<p>A skunk in cribbage is when a player wins the game by 31 or more points over their opponent. A double skunk in cribbage is when a player wins the game by 61 or more points over their opponent. A skunk and a double skunk are considered to be humiliating defeats for the losing player, and they usually result in extra penalties or rewards for the winning player. For example, some players may agree to double or quadruple the stakes of the game if a skunk or a double skunk occurs.</p>
|
94 |
-
<li>How many cards are in a cribbage deck?</li>
|
95 |
-
<p>A cribbage deck consists of a standard 52-card deck, which includes 13 cards of each suit (clubs, diamonds, hearts, and spades) and four ranks of each card (ace, 2, 3, 4, 5, 6, 7, 8, 9, 10, jack, queen, and king). However, some variations of cribbage may use different decks, such as a 48-card deck (without the 10s) or a 32-card deck (without the 2s, 3s, 4s, and 5s).</p>
|
96 |
-
<li>How do you shuffle and cut the cards in cribbage?</li>
|
97 |
-
<p>To shuffle and cut the cards in cribbage, follow these simple steps:</p>
|
98 |
-
<ol>
|
99 |
-
<li>The dealer shuffles the cards thoroughly and offers them to the non-dealer to cut. The non-dealer cuts the cards by taking a portion of the cards from the top of the deck and placing them on the bottom.</li>
|
100 |
-
<li>The dealer then takes the top card from the bottom portion of the deck and places it face up on top of the deck. This card is called the starter or the cut.</li>
|
101 |
-
<li>The dealer then deals six cards to each player (five cards in a three-player game), one at a time, starting with the non-dealer.</li>
|
102 |
-
</ol>
|
103 |
-
<li>What are some common terms and phrases used in cribbage?</li>
|
104 |
-
<p>Some common terms and phrases used in cribbage are:</p>
|
105 |
-
<ul>
|
106 |
-
<li>Fifteen: A combination of cards that adds up to 15 points.</li>
|
107 |
-
<li>Pair: Two cards of the same rank.</li>
|
108 |
-
<li>Run: Three or more consecutive cards of any suit.</li>
|
109 |
-
<li>Flush: Four or five cards of the same suit.</li>
|
110 |
-
<li>Nobs: The jack of the same suit as the starter card.</li>
|
111 |
-
<li>His heels: The jack of any suit as the starter card.</li>
|
112 |
-
<li>Crib: The two cards discarded by each player at the beginning of each deal.</li>
|
113 |
-
<li>Peg: A small marker used to keep score on the board.</li>
|
114 |
-
<li>Hole: A small hole on the board where pegs are inserted.</li>
|
115 |
-
<li>Street: A section of five holes on the board.</li>
|
116 |
-
</ul></p> 197e85843d<br />
|
117 |
-
<br />
|
118 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 3D Chess Game for PC and Play in Stunning Scenes and Graphics.md
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download 3D Chess Game for PC: A Guide to the Best Options</h1>
|
3 |
-
<p>If you are a fan of chess and want to enjoy a more immersive and realistic experience, you might want to try playing 3D chess on your PC. 3D chess is a variation of the classic board game that uses three-dimensional graphics and animations to simulate a real chessboard. You can play against the computer, online opponents, or even friends in local multiplayer mode. In this article, we will show you how to download and install 3D chess game for PC, and review some of the best options available on the market.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is 3D chess and why play it on PC?</h3>
|
6 |
-
<p>3D chess is a type of chess game that uses three-dimensional models and effects to create a more realistic and engaging gameplay. Unlike traditional chess games that use flat images or icons, 3D chess games allow you to see the pieces from different angles, zoom in and out, rotate the board, and enjoy various visual effects. Some 3D chess games also have different themes, backgrounds, sounds, and music to enhance the atmosphere.</p>
|
7 |
-
<h2>download 3d chess game for pc</h2><br /><p><b><b>Download</b> ->>->>->> <a href="https://urlin.us/2uSZuw">https://urlin.us/2uSZuw</a></b></p><br /><br />
|
8 |
-
<p>Playing 3D chess on PC has several advantages over playing it on other devices. First of all, you can enjoy better graphics quality and performance on a larger screen. Second, you can use your mouse and keyboard to control the game more easily and precisely. Third, you can access a wider range of options and features, such as online multiplayer, puzzles, rankings, achievements, etc. Fourth, you can save money by downloading free or cheap games from online platforms.</p>
|
9 |
-
<h2>How to download and install 3D chess game for PC</h2>
|
10 |
-
<p>There are many ways to download and install 3D chess game for PC, but we will focus on three of the most popular and reliable ones: Chess! on Steam, 3D Chess Game on Microsoft Store, and 3D Chess on Steam. We will compare their features, pros and cons, and how to get them in the following sections.</p>
|
11 |
-
<h3>Option 1: Chess! on Steam</h3>
|
12 |
-
<h4>Features, pros and cons, and how to get it</h4>
|
13 |
-
<p>Chess! is an upcoming 3D chess game that is expected to be released in Q2 2023. It is developed by Exeter Game Studios and published by familyplay. It is built with Unreal Engine 5 and integrated with Lichess, one of the largest online chess platforms in the world. Here are some of its features:</p>
|
14 |
-
<ul>
|
15 |
-
<li>Next-generation graphics: Witness the power of Unreal Engine 5 as it transforms the classic game of chess into a breathtaking visual spectacle. Enjoy lifelike textures, realistic lighting, and multiple scenes with intricate details that elevate your gaming experience to new heights.</li>
|
16 |
-
<li>Lichess integration: Seamlessly connect to the Lichess platform, allowing you to engage in exciting games with advanced AI, track your ELO, practice with puzzles, and participate in ranked Lichess matchmaking for a truly competitive experience.</li>
|
17 |
-
<li>Advanced AI and human opponents: Test your skills against sophisticated AI opponents or challenge real players from around the world, providing endless opportunities to refine your strategies and grow as a chess master.</li>
|
18 |
-
<li>Millions of offline puzzles: Dive into a vast library of puzzles that cater to all skill levels, from beginner to grandmaster. Sharpen your tactical acumen and learn from the best with our expertly curated collection of chess challenges.</li>
|
19 |
-
<li>Relaxing scenes <p>Relaxing scenes and sounds: Choose from a variety of relaxing scenes and sounds to create the perfect ambiance for your chess game. Whether you prefer a cozy fireplace, a tranquil garden, or a futuristic cityscape, you can find the ideal setting for your mood and style.</li>
|
20 |
-
</ul>
|
21 |
-
<p>The pros of Chess! are:</p>
|
22 |
-
<ul>
|
23 |
-
<li>It offers stunning graphics and animations that make the game more immersive and realistic.</li>
|
24 |
-
<li>It connects to Lichess, which is a reputable and popular online chess platform with millions of users and features.</li>
|
25 |
-
<li>It has a wide range of difficulty levels, puzzles, and modes to suit different preferences and goals.</li>
|
26 |
-
<li>It has relaxing scenes and sounds that create a soothing atmosphere for the game.</li>
|
27 |
-
</ul>
|
28 |
-
<p>The cons of Chess! are:</p>
|
29 |
-
<ul>
|
30 |
-
<li>It is not yet released, so it might have some bugs or glitches when it launches.</li>
|
31 |
-
<li>It might require a high-end PC to run smoothly and enjoy the full graphics quality.</li>
|
32 |
-
<li>It might not be compatible with some older operating systems or devices.</li>
|
33 |
-
</ul>
|
34 |
-
<p>To get Chess!, you need to have a Steam account and a PC that meets the minimum system requirements. You can pre-order the game on Steam for $9.99 and get access to it as soon as it is released. You can also follow the game's development updates on its official website or social media accounts.</p>
|
35 |
-
<p>How to install 3D Chess Game on Windows PC or Mac[^1^]<br />
|
36 |
-
Chess! an immersive 3D chess game with Lichess integration[^2^]<br />
|
37 |
-
Get 3D Chess Game from Microsoft Store for free[^3^]<br />
|
38 |
-
3D Chess a unique chess trip with instant duels on Steam[^4^]<br />
|
39 |
-
Best 3D chess games for PC in 2023<br />
|
40 |
-
Download 3D Chess Game APK for Android devices<br />
|
41 |
-
Play 3D Chess online with friends or strangers<br />
|
42 |
-
Learn chess with 3D Chess Game puzzles and challenges<br />
|
43 |
-
Compare 3D Chess Game with other chess apps and software<br />
|
44 |
-
3D Chess Game reviews and ratings from users and experts<br />
|
45 |
-
How to uninstall 3D Chess Game from your PC or Mac<br />
|
46 |
-
3D Chess Game tips and tricks to improve your skills<br />
|
47 |
-
How to customize your board and pieces in 3D Chess Game<br />
|
48 |
-
How to play 3D Chess Game offline or without internet connection<br />
|
49 |
-
How to solve common issues and errors in 3D Chess Game<br />
|
50 |
-
How to update 3D Chess Game to the latest version<br />
|
51 |
-
How to use the free flying camera in 3D Chess Game<br />
|
52 |
-
How to play against advanced AI in 3D Chess Game<br />
|
53 |
-
How to participate in ranked matchmaking in 3D Chess Game<br />
|
54 |
-
How to track your online ELO in 3D Chess Game<br />
|
55 |
-
How to sign up and login to Lichess account in 3D Chess Game<br />
|
56 |
-
How to donate to Lichess charity organization in 3D Chess Game<br />
|
57 |
-
How to enjoy lifelike textures and realistic lighting in 3D Chess Game<br />
|
58 |
-
How to switch between different scenes and locations in 3D Chess Game<br />
|
59 |
-
How to play 3D Chess Game on HoloLens or other VR devices<br />
|
60 |
-
How to download and play 3D Chess Game on Linux or Ubuntu<br />
|
61 |
-
How to stream or record your gameplay of 3D Chess Game<br />
|
62 |
-
How to join or create a chess club in 3D Chess Game<br />
|
63 |
-
How to chat or communicate with other players in 3D Chess Game<br />
|
64 |
-
How to report or block abusive or cheating players in 3D Chess Game<br />
|
65 |
-
How to access the vast library of offline puzzles in 3D Chess Game<br />
|
66 |
-
How to share your achievements and scores of 3D Chess Game on social media<br />
|
67 |
-
How to find and play with your friends in 3D Chess Game<br />
|
68 |
-
How to change the language or sound settings in 3D Chess Game<br />
|
69 |
-
How to enable or disable the relaxing music in 3D Chess Game<br />
|
70 |
-
How to use wildcard characters or anagrams in 3D Chess Game<br />
|
71 |
-
How to checkmate your opponent with only a few moves in 3D Chess Game<br />
|
72 |
-
How to learn from the best with expertly curated chess challenges in 3D Chess Game<br />
|
73 |
-
How to play different variants of chess such as blitz, bullet, rapid, etc. in 3D Chess Game<br />
|
74 |
-
How to watch live games or tournaments of professional chess players in 3D Chess Game<br />
|
75 |
-
How to use the dictionary feature in 3D Chess Game for definitions and synonyms of chess terms<br />
|
76 |
-
How to play the piano or other musical instruments in 3D Chess Game for fun or relaxation<br />
|
77 |
-
How to use the Phoenix Force feature in 3D Chess Game for a fiery and explosive gameplay<br />
|
78 |
-
How to climb up and overcome increasing challenges in Upward mode of 3D Chess Game <br />
|
79 |
-
How to use the speech function in 3D Chess Game for correct pronunciation of chess moves and names <br />
|
80 |
-
How to see your word history or make your own list of favorite words in Dictionary mode of 3D Chess Game <br />
|
81 |
-
How to get the word of the day with interesting and entertaining words in Dictionary mode of 3D Chess Game <br />
|
82 |
-
How to use the solar physics feature in 3D Chess Game for learning about the Sun and its layers <br />
|
83 |
-
How to witness the power of Unreal Engine 5 in transforming the classic game of chess into a breathtaking visual spectacle</p>
|
84 |
-
<h3>Option 2: 3D Chess Game on Microsoft Store</h3>
|
85 |
-
<h4>Features, pros and cons, and how to get it</h4>
|
86 |
-
<p>3D Chess Game is a free 3D chess game that is available on Microsoft Store. It is developed by A Trillion Games Ltd and has over 10 million downloads. It is designed for Windows 10 devices, including PCs, tablets, and phones. Here are some of its features:</p>
|
87 |
-
<ul>
|
88 |
-
<li>Simple and intuitive interface: Enjoy a user-friendly interface that lets you play the game with ease. You can adjust the board size, view angle, sound effects, and difficulty level with just a few clicks.</li>
|
89 |
-
<li>Multiple game modes: Choose from four different game modes to suit your preference. You can play against the computer, online opponents, local friends, or watch the computer play against itself.</li>
|
90 |
-
<li>Customizable board and pieces: Customize the appearance of the board and pieces by selecting from various colors, styles, and themes. You can also change the background image to match your mood.</li>
|
91 |
-
<li>Statistics and achievements: Track your progress and performance with statistics and achievements. You can see your win rate, ELO rating, moves history, best moves, etc. You can also unlock achievements by completing certain tasks or challenges.</li>
|
92 |
-
</ul>
|
93 |
-
<p>The pros of 3D Chess Game are:</p>
|
94 |
-
<ul>
|
95 |
-
<li>It is free to download and play, with no ads or in-app purchases.</li>
|
96 |
-
<li>It is compatible with Windows 10 devices, including PCs, tablets, and phones.</li>
|
97 |
-
<li>It has a simple and intuitive interface that makes the game easy to play.</li>
|
98 |
-
<li>It has multiple game modes and customizable options to suit different tastes and needs.</li>
|
99 |
-
</ul>
|
100 |
-
<p>The cons of 3D Chess Game are:</p>
|
101 |
-
<ul>
|
102 |
-
<li>It has mediocre graphics and animations that might not appeal to some players.</li>
|
103 |
-
<li>It has limited online features and options compared to other platforms.</li>
|
104 |
-
<li>It has occasional bugs or errors that might affect the gameplay.</li>
|
105 |
-
</ul>
|
106 |
-
<p>To get 3D Chess Game, you need to have a Microsoft account and a Windows 10 device that meets the minimum system requirements. You can download the game from Microsoft Store for free and start playing it right away. You can also rate and review the game on the store page or contact the developer for feedback or support.</p>
|
107 |
-
<h3>Option 3: 3D Chess on Steam</h3>
|
108 |
-
<h4>Features, pros and cons, and how to get it</h4>
|
109 |
-
<p>3D Chess is another 3D chess game that is available on Steam. It is developed by Bumblebee Games Studio Ltd. It was released in 2016 and has over 1000 reviews. It is designed for Windows PCs only. Here are some of its features:</p>
|
110 |
-
<ul>
|
111 |
-
<li>Detailed graphics: Experience a high-quality 3D chess game with detailed graphics and realistic shadows. You can choose from three different boards (wooden, marble, or glass) and four different sets of pieces (classic, metal, wood, or glass).</li>
|
112 |
-
<li>Cinematic camera: Enjoy a cinematic camera that follows the action on the board. You can <p>Cinematic camera: Enjoy a cinematic camera that follows the action on the board. You can also adjust the camera angle, zoom, and rotation to get the best view of the game.</li>
|
113 |
-
<li>Single-player and multiplayer modes: Play against the computer with three difficulty levels, or challenge your friends in local or online multiplayer mode. You can also play against random opponents from around the world, or join a tournament and compete for prizes.</li>
|
114 |
-
<li>Steam features: Take advantage of Steam features such as cloud saving, achievements, leaderboards, trading cards, etc. You can also share your screenshots and videos with the Steam community, or use the Steam Workshop to download and upload custom boards and pieces.</li>
|
115 |
-
</ul>
|
116 |
-
<p>The pros of 3D Chess are:</p>
|
117 |
-
<ul>
|
118 |
-
<li>It has detailed graphics and realistic shadows that create a more immersive gameplay.</li>
|
119 |
-
<li>It has a cinematic camera that adds to the visual appeal of the game.</li>
|
120 |
-
<li>It has single-player and multiplayer modes that offer different challenges and fun.</li>
|
121 |
-
<li>It has Steam features that enhance the game's functionality and community.</li>
|
122 |
-
</ul>
|
123 |
-
<p>The cons of 3D Chess are:</p>
|
124 |
-
<ul>
|
125 |
-
<li>It is not free to play, and costs $4.99 on Steam.</li>
|
126 |
-
<li>It is only compatible with Windows PCs, and not with other devices or operating systems.</li>
|
127 |
-
<li>It has some negative reviews that complain about the game's bugs, glitches, or lack of updates.</li>
|
128 |
-
</ul>
|
129 |
-
<p>To get 3D Chess, you need to have a Steam account and a Windows PC that meets the minimum system requirements. You can buy the game on Steam for $4.99 and download it to your PC. You can also check out the game's trailer, screenshots, and reviews on its Steam page or official website.</p>
|
130 |
-
<h2>Comparison table of the three options</h2>
|
131 |
-
<p>To help you decide which 3D chess game for PC is best for you, we have created a comparison table that summarizes the main features, pros and cons, and prices of the three options we have reviewed. You can see the table below:</p>
|
132 |
-
| Feature | Chess! | 3D Chess Game | 3D Chess | | --- | --- | --- | --- | | Graphics quality | Excellent | Mediocre | Good | | Online platform | Lichess | None | Steam | | Game modes | AI, online, puzzles | AI, online, local | AI, online, local | | Customization options | Scenes, sounds | Board, pieces, background | Board, pieces | | Statistics and achievements | Yes | Yes | Yes | | Price | $9.99 (pre-order) | Free | $4.99 | | Pros | Stunning graphics; Lichess integration; wide range of difficulty levels and puzzles; relaxing scenes and sounds | Free; compatible with Windows 10 devices; simple and intuitive interface; multiple game modes and customizable options | Detailed graphics and realistic shadows; cinematic camera; single-player and multiplayer modes; Steam features | | Cons | Not yet released; might require high-end PC; might not be compatible with older systems or devices | Mediocre graphics; limited online features; occasional bugs or errors | Not free; only compatible with Windows PCs; some negative reviews | <h2>Conclusion</h2>
|
133 |
-
<h3>Summary of the main points</h3>
|
134 |
-
<p>In this article, we have shown you how to download and install 3D chess game for PC, and reviewed some of the best options available on the market. We have compared their features, pros and cons, and prices in a comparison table. We have also explained what 3D chess is and why playing it on PC has several advantages over playing it on other devices.</p>
|
135 |
-
<h3>Recommendation and call to action</h3>
|
136 |
-
<p>Based on our analysis, we recommend Chess! as the best option for downloading 3D chess game for PC. It offers stunning graphics, Lichess integration, wide range of difficulty levels and puzzles, relaxing scenes and sounds, and more. It is also reasonably priced at $9.99 for pre-ordering. However, if you are looking for a free or simpler option, you can also try 3D Chess Game or 3D Chess on Microsoft Store or Steam respectively.</p>
|
137 |
-
<p>If you are interested in playing 3D chess on your PC, you can follow the links below to get your preferred option: - [Chess! on Steam] - [3D Chess Game on Microsoft Store] - [3D Chess on Steam] We hope you enjoyed this article and found it helpful. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
138 |
-
<h2>FAQs</h2>
|
139 |
-
<p>Here are some frequently asked questions about downloading 3D chess game <p>Here are some frequently asked questions about downloading 3D chess game for PC:</p>
|
140 |
-
<ol>
|
141 |
-
<li>What are the benefits of playing 3D chess on PC?</li>
|
142 |
-
<p>Playing 3D chess on PC has several benefits, such as better graphics quality and performance, easier and more precise control, wider range of options and features, and saving money by downloading free or cheap games.</p>
|
143 |
-
<li>What are the main differences between 3D chess and traditional chess?</li>
|
144 |
-
<p>3D chess is a variation of the classic board game that uses three-dimensional graphics and animations to simulate a real chessboard. It allows you to see the pieces from different angles, zoom in and out, rotate the board, and enjoy various visual effects. Some 3D chess games also have different themes, backgrounds, sounds, and music to enhance the atmosphere.</p>
|
145 |
-
<li>What are the minimum system requirements for playing 3D chess on PC?</li>
|
146 |
-
<p>The minimum system requirements for playing 3D chess on PC vary depending on the game you choose. However, a general guideline is that you need a Windows PC with at least 4 GB of RAM, 2 GB of disk space, a dual-core processor, and a graphics card that supports DirectX 11 or higher.</p>
|
147 |
-
<li>How can I improve my skills in 3D chess?</li>
|
148 |
-
<p>You can improve your skills in 3D chess by practicing regularly, playing against different opponents, solving puzzles, learning from tutorials or guides, watching videos or streams of other players, and joining online communities or forums.</p>
|
149 |
-
<li>Where can I find more information or support about 3D chess games?</li>
|
150 |
-
<p>You can find more information or support about 3D chess games by visiting their official websites or social media accounts, reading their reviews or ratings on online platforms, contacting their developers or publishers, or asking other players or experts.</p>
|
151 |
-
</ol></p> 197e85843d<br />
|
152 |
-
<br />
|
153 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/9xbuddy Music Download A Review of the Features Benefits and Limitations.md
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br>
|
3 |
-
<code>
|
4 |
-
<table>
|
5 |
-
<tr>
|
6 |
-
<td>
|
7 |
-
<h1>How to Download Music from YouTube with 9xbuddy</h1>
|
8 |
-
<p>Do you love listening to music on YouTube but wish you could save it offline? Do you want to enjoy your favorite songs without ads or interruptions? Do you want to convert YouTube videos into MP3 files easily and quickly?</p>
|
9 |
-
<p>If you answered yes to any of these questions, then you need to try <strong>9xbuddy</strong>. It is a powerful online tool that lets you download music from YouTube in a matter of seconds. In this article, we will show you how to use 9xbuddy to download music from YouTube, as well as some tips and tricks for making the most of it. We will also compare it with some alternatives that you can try if you want more options.</p>
|
10 |
-
<h2>9xbuddy music download</h2><br /><p><b><b>DOWNLOAD</b> ↔ <a href="https://jinyurl.com/2uNNUt">https://jinyurl.com/2uNNUt</a></b></p><br /><br /> <h2>What is 9xbuddy?</h2>
|
11 |
-
<p><strong>9xbuddy</strong> is a free online service that allows you to download any video or audio from any website, including YouTube, Facebook, Instagram, Twitter, Vimeo, Dailymotion, SoundCloud, and more. You can use it to download music from YouTube in MP3 format, as well as other formats like MP4, WEBM, M4A, and more. You can also choose the quality of the download, from low to high. 9xbuddy is fast, easy, and reliable. You don't need to install any software or register an account. You just need to copy and paste the URL of the video or audio you want to download and click on the download button. 9xbuddy will do the rest for you.</p>
|
12 |
-
<h2>Why Use 9xbuddy to Download Music from YouTube?</h2>
|
13 |
-
<p>There are many reasons why you might want to use 9xbuddy to download music from YouTube. Here are some of them:</p>
|
14 |
-
<ul>
|
15 |
-
<li>You can save your favorite songs offline and listen to them anytime, anywhere, without internet connection or data charges.</li>
|
16 |
-
<li>You can avoid annoying ads or interruptions that might ruin your listening experience.</li>
|
17 |
-
<li>You can create your own playlists and mixtapes with the songs you download.</li>
|
18 |
-
<li>You can transfer the songs to other devices or platforms, such as your phone, tablet, computer, MP3 player, car stereo, etc.</li>
|
19 |
-
<li>You can edit or remix the songs with other tools or software.</li>
|
20 |
-
<li>You can share the songs with your friends or family via email, social media, Bluetooth, etc.</li>
|
21 |
-
</ul>
|
22 |
-
<p>As you can see, using 9xbuddy to download music from YouTube can give you a lot of benefits and convenience. It can also save you time and money. So why not give it a try?</p>
|
23 |
-
<h2>How to Use 9xbuddy to Download Music from YouTube?</h2>
|
24 |
-
<p>Using 9xbuddy to download music from YouTube is very simple and straightforward. You just need to follow these four steps:</p>
|
25 |
-
<p>9xbuddy online video downloader<br />
|
26 |
-
9xbuddy mp3 converter<br />
|
27 |
-
9xbuddy youtube to mp3<br />
|
28 |
-
9xbuddy soundcloud downloader<br />
|
29 |
-
9xbuddy alternative sites<br />
|
30 |
-
9xbuddy facebook video download<br />
|
31 |
-
9xbuddy twitter video download<br />
|
32 |
-
9xbuddy dailymotion video download<br />
|
33 |
-
9xbuddy instagram video download<br />
|
34 |
-
9xbuddy tiktok video download<br />
|
35 |
-
9xbuddy vimeo video download<br />
|
36 |
-
9xbuddy spotify music download<br />
|
37 |
-
9xbuddy apple music download<br />
|
38 |
-
9xbuddy amazon music download<br />
|
39 |
-
9xbuddy deezer music download<br />
|
40 |
-
9xbuddy tidal music download<br />
|
41 |
-
9xbuddy pandora music download<br />
|
42 |
-
9xbuddy audiomack music download<br />
|
43 |
-
9xbuddy bandcamp music download<br />
|
44 |
-
9xbuddy soundclick music download<br />
|
45 |
-
9xbuddy mixcloud music download<br />
|
46 |
-
9xbuddy reverbnation music download<br />
|
47 |
-
9xbuddy datpiff music download<br />
|
48 |
-
9xbuddy jamendo music download<br />
|
49 |
-
9xbuddy beatport music download<br />
|
50 |
-
9xbuddy jiosaavn music download<br />
|
51 |
-
9xbuddy gaana music download<br />
|
52 |
-
9xbuddy hungama music download<br />
|
53 |
-
9xbuddy wynk music download<br />
|
54 |
-
9xbuddy shazam music download<br />
|
55 |
-
9xbuddy musixmatch music download<br />
|
56 |
-
9xbuddy tunein music download<br />
|
57 |
-
9xbuddy iheartradio music download<br />
|
58 |
-
9xbuddy last.fm music download<br />
|
59 |
-
9xbuddy napster music download<br />
|
60 |
-
9xbuddy yandex.music download<br />
|
61 |
-
9xbuddy qqmusic download<br />
|
62 |
-
9xbuddy netease cloud music download<br />
|
63 |
-
9xbuddy xiami music download<br />
|
64 |
-
9xbuddy kuwo music download<br />
|
65 |
-
9xbuddy kugou music download<br />
|
66 |
-
9xbuddy migu music download<br />
|
67 |
-
9xbuddy melon music download<br />
|
68 |
-
9xbuddy bugs music download<br />
|
69 |
-
9xbuddy genie music download<br />
|
70 |
-
9xbuddy flo music download<br />
|
71 |
-
9xbuddy vibe music download</p>
|
72 |
-
<h3>Step 1: Copy the YouTube Video URL</h3>
|
73 |
-
<p>The first thing you need to do is to find the YouTube video that contains the music you want to download. You can use the YouTube app or website to search for it. Once you find it, you need to copy its URL. The URL is the web address that appears in the address bar of your browser or app. For example, the URL of this video is https://www.youtube.com/watch?v=kJQP7kiw5Fk. To copy it, you can either right-click on it and select "Copy" or highlight it and press Ctrl+C on your keyboard (or Command+C on Mac).</p>
|
74 |
-
<h3>Step 2: Paste the URL into 9xbuddy</h3>
|
75 |
-
<p>The next thing you need to do is to go to the 9xbuddy website: https://9xbuddy.org/. You will see a search box where you can paste the URL of the YouTube video. To paste it, you can either right-click on the box and select "Paste" or click on the box and press Ctrl+V on your keyboard (or Command+V on Mac). Then, click on the "Download" button next to the box.</p>
|
76 |
-
<h3>Step 3: Choose the MP3 Format and Quality</h3>
|
77 |
-
<p>After clicking on the "Download" button, 9xbuddy will analyze the URL and show you a list of available formats and qualities for downloading. You will see options like MP4 (video), WEBM (video), M4A (audio), MP3 (audio), etc. To download music from YouTube, you need to choose the MP3 format. You can also choose the quality of the MP3 file, from low (64 kbps) to high (320 kbps). The higher the quality, the larger the file size and the better the sound quality. To choose the format and quality, just click on them.</p>
|
78 |
-
<h3>Step 4: Download the MP3 File</h3>
|
79 |
-
<p>The final step is to download the MP3 file to your device or cloud storage. You will see a green "Download Now" button next to the format and quality you chose. Click on it and a new tab will open with a countdown timer. Wait for a few seconds until the timer reaches zero and then click on the "Download" button that appears. The MP3 file will start downloading automatically. You can check the progress of the download in your browser or app. Once the download is complete, you can find the MP3 file in your default download folder or location. You can also rename or move it as you wish.</p>
|
80 |
-
<h2>Tips <h2>Tips and Tricks for Using 9xbuddy</h2>
|
81 |
-
<p>To make your experience with 9xbuddy even better, here are some tips and tricks that you can use:</p>
|
82 |
-
<h3>Tip 1: Use the Bookmarklet or Extension</h3>
|
83 |
-
<p>If you want to download music from YouTube faster and easier, you can use the bookmarklet or extension that 9xbuddy offers. The bookmarklet is a small piece of code that you can drag and drop to your browser's bookmarks bar. The extension is a small program that you can install to your browser. Both of them allow you to download music from YouTube with just one click, without having to copy and paste the URL or go to the 9xbuddy website. To use the bookmarklet or extension, you need to go to this page: https://9xbuddy.org/tools and follow the instructions there.</p>
|
84 |
-
<h3>Tip 2: Use the Batch Download Feature</h3>
|
85 |
-
<p>If you want to download multiple music files at once, you can use the batch download feature that 9xbuddy offers. This feature allows you to enter multiple URLs in one search box and download them all in one go. To use the batch download feature, you need to go to this page: https://9xbuddy.org/batch and follow the instructions there.</p>
|
86 |
-
<h3>Tip 3: Use the Playlist Download Feature</h3>
|
87 |
-
<p>If you want to download an entire playlist from YouTube, you can use the playlist download feature that 9xbuddy offers. This feature allows you to enter the URL of a YouTube playlist and download all the videos or audios in it in one go. To use the playlist download feature, you need to go to this page: https://9xbuddy.org/playlist and follow the instructions there.</p>
|
88 |
-
<h2>Alternatives to 9xbuddy</h2>
|
89 |
-
<p>Although 9xbuddy is a great tool for downloading music from YouTube, it is not the only one. There are some other websites or tools that can also do the same job. Here are some of them:</p>
|
90 |
-
<h3>Alternative 1: YTMP3</h3>
|
91 |
-
<p><strong>YTMP3</strong> is a simple and fast online service that allows you to convert and download YouTube videos into MP3 or MP4 files. You can use it to download music from YouTube in high quality (up to 320 kbps) and without any limitations. You don't need to install any software or register an account. You just need to copy and paste the URL of the YouTube video and click on the convert button. YTMP3 will do the rest for you. You can access YTMP3 here: https://ytmp3.cc/.</p>
|
92 |
-
<h3>Alternative 2: Snappea</h3>
|
93 |
-
<p><strong>Snappea</strong> is a versatile and powerful online tool that allows you to download videos and audios from various websites, including YouTube, Facebook, Instagram, TikTok, Dailymotion, etc. You can use it to download music from YouTube in various formats (MP3, MP4, M4A, etc.) and qualities (from 144p to 1080p). You don't need to install any software or register an account. You just need to copy and paste the URL of the video or audio and click on the download button. Snappea will do the rest for you. You can access Snappea here: https://www.snappea.com/.</p>
|
94 |
-
<h3>Alternative 3: MP3FY</h3>
|
95 |
-
<p><strong>MP3FY</strong> is a fast and easy online service that allows you to convert and download any video or audio from any website into MP3 files. You can use it to download music from YouTube in high quality (up to 320 kbps) and without any restrictions. You don't need to install any software or register an account. You just need to copy and paste the URL of the video or audio and click on the convert button. MP3FY will do the rest for you. You can access MP3FY here: https://mp3fy.com/.</p>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
<p>In conclusion, downloading music from YouTube with 9xbuddy is a simple and convenient way to enjoy your favorite songs offline. You just need to follow four easy steps: copy the URL of the YouTube video, paste it into 9xbuddy, choose the MP3 format and quality, and download the file. You can also use some tips and tricks to enhance your experience with 9xbuddy, such as using the bookmarklet or extension, using the batch download feature, or using the playlist download feature. If you want more options, you can also try some alternatives to 9xbuddy, such as YTMP3, Snappea, or MP3FY.</p>
|
98 |
-
<p>We hope this <p>We hope this article has helped you learn how to download music from YouTube with 9xbuddy. Now you can enjoy your favorite songs anytime, anywhere, without any hassle. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
|
99 |
-
<h2>FAQs</h2>
|
100 |
-
<p>Here are some frequently asked questions and answers about 9xbuddy and downloading music from YouTube:</p>
|
101 |
-
<h3>Q: Is 9xbuddy safe and legal?</h3>
|
102 |
-
<p>A: 9xbuddy is safe and legal to use, as long as you use it for personal and non-commercial purposes. 9xbuddy does not host or store any content on its servers. It only acts as a mediator between the user and the source website. However, you should always respect the intellectual property rights of the original creators and owners of the content. You should not download or distribute any content that is protected by copyright or other laws.</p>
|
103 |
-
<h3>Q: How long does it take to download music from YouTube with 9xbuddy?</h3>
|
104 |
-
<p>A: The time it takes to download music from YouTube with 9xbuddy depends on several factors, such as the length and quality of the video, the speed of your internet connection, and the traffic on the website. Generally, it takes a few seconds to a few minutes to download a music file from YouTube with 9xbuddy.</p>
|
105 |
-
<h3>Q: How many music files can I download from YouTube with 9xbuddy?</h3>
|
106 |
-
<p>A: There is no limit to how many music files you can download from YouTube with 9xbuddy. You can download as many as you want, as long as you have enough space on your device or cloud storage. However, you should be mindful of the bandwidth and data usage that downloading music files can consume.</p>
|
107 |
-
<h3>Q: Can I download music from other websites besides YouTube with 9xbuddy?</h3>
|
108 |
-
<p>A: Yes, you can download music from other websites besides YouTube with 9xbuddy. 9xbuddy supports over 1000 websites, including Facebook, Instagram, Twitter, Vimeo, Dailymotion, SoundCloud, and more. You can use the same steps as downloading music from YouTube with 9xbuddy.</p>
|
109 |
-
<h3>Q: Can I download music from YouTube with 9xbuddy on my mobile device?</h3>
|
110 |
-
<p>A: Yes, you can download music from YouTube with 9xbuddy on your mobile device. 9xbuddy is compatible with all devices and platforms, including Android, iOS, Windows, Mac, Linux, etc. You can use any browser or app that supports web browsing to access 9xbuddy and download music from YouTube.</p> 401be4b1e0<br />
|
111 |
-
<br />
|
112 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/dualstylegan.py
DELETED
@@ -1,203 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from model.stylegan.model import ConvLayer, PixelNorm, EqualLinear, Generator
|
5 |
-
|
6 |
-
class AdaptiveInstanceNorm(nn.Module):
|
7 |
-
def __init__(self, fin, style_dim=512):
|
8 |
-
super().__init__()
|
9 |
-
|
10 |
-
self.norm = nn.InstanceNorm2d(fin, affine=False)
|
11 |
-
self.style = nn.Linear(style_dim, fin * 2)
|
12 |
-
|
13 |
-
self.style.bias.data[:fin] = 1
|
14 |
-
self.style.bias.data[fin:] = 0
|
15 |
-
|
16 |
-
def forward(self, input, style):
|
17 |
-
style = self.style(style).unsqueeze(2).unsqueeze(3)
|
18 |
-
gamma, beta = style.chunk(2, 1)
|
19 |
-
out = self.norm(input)
|
20 |
-
out = gamma * out + beta
|
21 |
-
return out
|
22 |
-
|
23 |
-
# modulative residual blocks (ModRes)
|
24 |
-
class AdaResBlock(nn.Module):
|
25 |
-
def __init__(self, fin, style_dim=512, dilation=1): # modified
|
26 |
-
super().__init__()
|
27 |
-
|
28 |
-
self.conv = ConvLayer(fin, fin, 3, dilation=dilation) # modified
|
29 |
-
self.conv2 = ConvLayer(fin, fin, 3, dilation=dilation) # modified
|
30 |
-
self.norm = AdaptiveInstanceNorm(fin, style_dim)
|
31 |
-
self.norm2 = AdaptiveInstanceNorm(fin, style_dim)
|
32 |
-
|
33 |
-
# model initialization
|
34 |
-
# the convolution filters are set to values close to 0 to produce negligible residual features
|
35 |
-
self.conv[0].weight.data *= 0.01
|
36 |
-
self.conv2[0].weight.data *= 0.01
|
37 |
-
|
38 |
-
def forward(self, x, s, w=1):
|
39 |
-
skip = x
|
40 |
-
if w == 0:
|
41 |
-
return skip
|
42 |
-
out = self.conv(self.norm(x, s))
|
43 |
-
out = self.conv2(self.norm2(out, s))
|
44 |
-
out = out * w + skip
|
45 |
-
return out
|
46 |
-
|
47 |
-
class DualStyleGAN(nn.Module):
|
48 |
-
def __init__(self, size, style_dim, n_mlp, channel_multiplier=2, twoRes=True, res_index=6):
|
49 |
-
super().__init__()
|
50 |
-
|
51 |
-
layers = [PixelNorm()]
|
52 |
-
for i in range(n_mlp-6):
|
53 |
-
layers.append(EqualLinear(512, 512, lr_mul=0.01, activation="fused_lrelu"))
|
54 |
-
# color transform blocks T_c
|
55 |
-
self.style = nn.Sequential(*layers)
|
56 |
-
# StyleGAN2
|
57 |
-
self.generator = Generator(size, style_dim, n_mlp, channel_multiplier)
|
58 |
-
# The extrinsic style path
|
59 |
-
self.res = nn.ModuleList()
|
60 |
-
self.res_index = res_index//2 * 2
|
61 |
-
self.res.append(AdaResBlock(self.generator.channels[2 ** 2])) # for conv1
|
62 |
-
for i in range(3, self.generator.log_size + 1):
|
63 |
-
out_channel = self.generator.channels[2 ** i]
|
64 |
-
if i < 3 + self.res_index//2:
|
65 |
-
# ModRes
|
66 |
-
self.res.append(AdaResBlock(out_channel))
|
67 |
-
self.res.append(AdaResBlock(out_channel))
|
68 |
-
else:
|
69 |
-
# structure transform block T_s
|
70 |
-
self.res.append(EqualLinear(512, 512))
|
71 |
-
# FC layer is initialized with identity matrices, meaning no changes to the input latent code
|
72 |
-
self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01
|
73 |
-
self.res.append(EqualLinear(512, 512))
|
74 |
-
self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01
|
75 |
-
self.res.append(EqualLinear(512, 512)) # for to_rgb7
|
76 |
-
self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01
|
77 |
-
self.size = self.generator.size
|
78 |
-
self.style_dim = self.generator.style_dim
|
79 |
-
self.log_size = self.generator.log_size
|
80 |
-
self.num_layers = self.generator.num_layers
|
81 |
-
self.n_latent = self.generator.n_latent
|
82 |
-
self.channels = self.generator.channels
|
83 |
-
|
84 |
-
def forward(
|
85 |
-
self,
|
86 |
-
styles, # intrinsic style code
|
87 |
-
exstyles, # extrinsic style code
|
88 |
-
return_latents=False,
|
89 |
-
return_feat=False,
|
90 |
-
inject_index=None,
|
91 |
-
truncation=1,
|
92 |
-
truncation_latent=None,
|
93 |
-
input_is_latent=False,
|
94 |
-
noise=None,
|
95 |
-
randomize_noise=True,
|
96 |
-
z_plus_latent=False, # intrinsic style code is z+ or z
|
97 |
-
use_res=True, # whether to use the extrinsic style path
|
98 |
-
fuse_index=18, # layers > fuse_index do not use the extrinsic style path
|
99 |
-
interp_weights=[1]*18, # weight vector for style combination of two paths
|
100 |
-
):
|
101 |
-
|
102 |
-
if not input_is_latent:
|
103 |
-
if not z_plus_latent:
|
104 |
-
styles = [self.generator.style(s) for s in styles]
|
105 |
-
else:
|
106 |
-
styles = [self.generator.style(s.reshape(s.shape[0]*s.shape[1], s.shape[2])).reshape(s.shape) for s in styles]
|
107 |
-
|
108 |
-
if noise is None:
|
109 |
-
if randomize_noise:
|
110 |
-
noise = [None] * self.generator.num_layers
|
111 |
-
else:
|
112 |
-
noise = [
|
113 |
-
getattr(self.generator.noises, f"noise_{i}") for i in range(self.generator.num_layers)
|
114 |
-
]
|
115 |
-
|
116 |
-
if truncation < 1:
|
117 |
-
style_t = []
|
118 |
-
|
119 |
-
for style in styles:
|
120 |
-
style_t.append(
|
121 |
-
truncation_latent + truncation * (style - truncation_latent)
|
122 |
-
)
|
123 |
-
|
124 |
-
styles = style_t
|
125 |
-
|
126 |
-
if len(styles) < 2:
|
127 |
-
inject_index = self.generator.n_latent
|
128 |
-
|
129 |
-
if styles[0].ndim < 3:
|
130 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
131 |
-
|
132 |
-
else:
|
133 |
-
latent = styles[0]
|
134 |
-
|
135 |
-
else:
|
136 |
-
if inject_index is None:
|
137 |
-
inject_index = random.randint(1, self.generator.n_latent - 1)
|
138 |
-
|
139 |
-
if styles[0].ndim < 3:
|
140 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
141 |
-
latent2 = styles[1].unsqueeze(1).repeat(1, self.generator.n_latent - inject_index, 1)
|
142 |
-
|
143 |
-
latent = torch.cat([latent, latent2], 1)
|
144 |
-
else:
|
145 |
-
latent = torch.cat([styles[0][:,0:inject_index], styles[1][:,inject_index:]], 1)
|
146 |
-
|
147 |
-
if use_res:
|
148 |
-
if exstyles.ndim < 3:
|
149 |
-
resstyles = self.style(exstyles).unsqueeze(1).repeat(1, self.generator.n_latent, 1)
|
150 |
-
adastyles = exstyles.unsqueeze(1).repeat(1, self.generator.n_latent, 1)
|
151 |
-
else:
|
152 |
-
nB, nL, nD = exstyles.shape
|
153 |
-
resstyles = self.style(exstyles.reshape(nB*nL, nD)).reshape(nB, nL, nD)
|
154 |
-
adastyles = exstyles
|
155 |
-
|
156 |
-
out = self.generator.input(latent)
|
157 |
-
out = self.generator.conv1(out, latent[:, 0], noise=noise[0])
|
158 |
-
if use_res and fuse_index > 0:
|
159 |
-
out = self.res[0](out, resstyles[:, 0], interp_weights[0])
|
160 |
-
|
161 |
-
skip = self.generator.to_rgb1(out, latent[:, 1])
|
162 |
-
i = 1
|
163 |
-
for conv1, conv2, noise1, noise2, to_rgb in zip(
|
164 |
-
self.generator.convs[::2], self.generator.convs[1::2], noise[1::2], noise[2::2], self.generator.to_rgbs):
|
165 |
-
if use_res and fuse_index >= i and i > self.res_index:
|
166 |
-
out = conv1(out, interp_weights[i] * self.res[i](adastyles[:, i]) +
|
167 |
-
(1-interp_weights[i]) * latent[:, i], noise=noise1)
|
168 |
-
else:
|
169 |
-
out = conv1(out, latent[:, i], noise=noise1)
|
170 |
-
if use_res and fuse_index >= i and i <= self.res_index:
|
171 |
-
out = self.res[i](out, resstyles[:, i], interp_weights[i])
|
172 |
-
if use_res and fuse_index >= (i+1) and i > self.res_index:
|
173 |
-
out = conv2(out, interp_weights[i+1] * self.res[i+1](adastyles[:, i+1]) +
|
174 |
-
(1-interp_weights[i+1]) * latent[:, i+1], noise=noise2)
|
175 |
-
else:
|
176 |
-
out = conv2(out, latent[:, i + 1], noise=noise2)
|
177 |
-
if use_res and fuse_index >= (i+1) and i <= self.res_index:
|
178 |
-
out = self.res[i+1](out, resstyles[:, i+1], interp_weights[i+1])
|
179 |
-
if use_res and fuse_index >= (i+2) and i >= self.res_index-1:
|
180 |
-
skip = to_rgb(out, interp_weights[i+2] * self.res[i+2](adastyles[:, i+2]) +
|
181 |
-
(1-interp_weights[i+2]) * latent[:, i + 2], skip)
|
182 |
-
else:
|
183 |
-
skip = to_rgb(out, latent[:, i + 2], skip)
|
184 |
-
i += 2
|
185 |
-
if i > self.res_index and return_feat:
|
186 |
-
return out, skip
|
187 |
-
|
188 |
-
image = skip
|
189 |
-
|
190 |
-
if return_latents:
|
191 |
-
return image, latent
|
192 |
-
|
193 |
-
else:
|
194 |
-
return image, None
|
195 |
-
|
196 |
-
def make_noise(self):
|
197 |
-
return self.generator.make_noise()
|
198 |
-
|
199 |
-
def mean_latent(self, n_latent):
|
200 |
-
return self.generator.mean_latent(n_latent)
|
201 |
-
|
202 |
-
def get_latent(self, input):
|
203 |
-
return self.generator.style(input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/raft/core/__init__.py
DELETED
File without changes
|
spaces/ANDRYHA/FakeNewsClassifier/app.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
2 |
-
from transformers import AutoModelForSequenceClassification
|
3 |
-
from transformers import AutoTokenizer
|
4 |
-
from langdetect import detect
|
5 |
-
from newspaper import Article
|
6 |
-
from PIL import Image
|
7 |
-
import streamlit as st
|
8 |
-
import requests
|
9 |
-
import torch
|
10 |
-
|
11 |
-
st.markdown("## Prediction of Fakeness by Given URL")
|
12 |
-
background = Image.open('logo.jpg')
|
13 |
-
st.image(background)
|
14 |
-
|
15 |
-
st.markdown(f"### Article URL")
|
16 |
-
text = st.text_area("Insert some url here",
|
17 |
-
value="https://en.globes.co.il/en/article-yandex-looks-to-expand-activities-in-israel-1001406519")
|
18 |
-
|
19 |
-
@st.cache(allow_output_mutation=True)
|
20 |
-
def get_models_and_tokenizers():
|
21 |
-
model_name = 'distilbert-base-uncased-finetuned-sst-2-english'
|
22 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
|
23 |
-
model.eval()
|
24 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
25 |
-
model.load_state_dict(torch.load('./model.pth', map_location='cpu'))
|
26 |
-
|
27 |
-
model_name_translator = "facebook/wmt19-ru-en"
|
28 |
-
tokenizer_translator = FSMTTokenizer.from_pretrained(model_name_translator)
|
29 |
-
model_translator = FSMTForConditionalGeneration.from_pretrained(model_name_translator)
|
30 |
-
model_translator.eval()
|
31 |
-
return model, tokenizer, model_translator, tokenizer_translator
|
32 |
-
|
33 |
-
model, tokenizer, model_translator, tokenizer_translator = get_models_and_tokenizers()
|
34 |
-
|
35 |
-
article = Article(text)
|
36 |
-
article.download()
|
37 |
-
article.parse()
|
38 |
-
concated_text = article.title + '. ' + article.text
|
39 |
-
lang = detect(concated_text)
|
40 |
-
|
41 |
-
st.markdown(f"### Language detection")
|
42 |
-
|
43 |
-
if lang == 'ru':
|
44 |
-
st.markdown(f"The language of this article is {lang.upper()} so we translated it!")
|
45 |
-
with st.spinner('Waiting for translation'):
|
46 |
-
input_ids = tokenizer_translator.encode(concated_text,
|
47 |
-
return_tensors="pt", max_length=512, truncation=True)
|
48 |
-
outputs = model_translator.generate(input_ids)
|
49 |
-
decoded = tokenizer_translator.decode(outputs[0], skip_special_tokens=True)
|
50 |
-
st.markdown("### Translated Text")
|
51 |
-
st.markdown(f"{decoded[:777]}")
|
52 |
-
concated_text = decoded
|
53 |
-
else:
|
54 |
-
st.markdown(f"The language of this article for sure: {lang.upper()}!")
|
55 |
-
|
56 |
-
st.markdown("### Extracted Text")
|
57 |
-
st.markdown(f"{concated_text[:777]}")
|
58 |
-
|
59 |
-
tokens_info = tokenizer(concated_text, truncation=True, return_tensors="pt")
|
60 |
-
with torch.no_grad():
|
61 |
-
raw_predictions = model(**tokens_info)
|
62 |
-
softmaxed = int(torch.nn.functional.softmax(raw_predictions.logits[0], dim=0)[1] * 100)
|
63 |
-
st.markdown("### Fakeness Prediction")
|
64 |
-
st.progress(softmaxed)
|
65 |
-
st.markdown(f"This is fake by **{softmaxed}%**!")
|
66 |
-
if (softmaxed > 70):
|
67 |
-
st.error('We would not trust this text!')
|
68 |
-
elif (softmaxed > 40):
|
69 |
-
st.warning('We are not sure about this text!')
|
70 |
-
else:
|
71 |
-
st.success('We would trust this text!')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/models/common.py
DELETED
@@ -1,1268 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Common modules
|
4 |
-
"""
|
5 |
-
|
6 |
-
import ast
|
7 |
-
import contextlib
|
8 |
-
import json
|
9 |
-
import math
|
10 |
-
import platform
|
11 |
-
import warnings
|
12 |
-
import zipfile
|
13 |
-
from collections import OrderedDict, namedtuple
|
14 |
-
from copy import copy
|
15 |
-
from pathlib import Path
|
16 |
-
from urllib.parse import urlparse
|
17 |
-
|
18 |
-
import cv2
|
19 |
-
import numpy as np
|
20 |
-
import pandas as pd
|
21 |
-
import requests
|
22 |
-
import torch
|
23 |
-
import torch.nn as nn
|
24 |
-
from IPython.display import display
|
25 |
-
from PIL import Image
|
26 |
-
from torch.cuda import amp
|
27 |
-
|
28 |
-
from utils import TryExcept
|
29 |
-
from utils.dataloaders import exif_transpose, letterbox
|
30 |
-
from utils.general import (
|
31 |
-
LOGGER,
|
32 |
-
ROOT,
|
33 |
-
Profile,
|
34 |
-
check_requirements,
|
35 |
-
check_suffix,
|
36 |
-
check_version,
|
37 |
-
colorstr,
|
38 |
-
increment_path,
|
39 |
-
is_notebook,
|
40 |
-
make_divisible,
|
41 |
-
non_max_suppression,
|
42 |
-
scale_boxes,
|
43 |
-
xywh2xyxy,
|
44 |
-
xyxy2xywh,
|
45 |
-
yaml_load,
|
46 |
-
)
|
47 |
-
from utils.plots import Annotator, colors, save_one_box
|
48 |
-
from utils.torch_utils import copy_attr, smart_inference_mode
|
49 |
-
|
50 |
-
|
51 |
-
def autopad(k, p=None, d=1): # kernel, padding, dilation
|
52 |
-
# Pad to 'same' shape outputs
|
53 |
-
if d > 1:
|
54 |
-
k = (
|
55 |
-
d * (k - 1) + 1
|
56 |
-
if isinstance(k, int)
|
57 |
-
else [d * (x - 1) + 1 for x in k]
|
58 |
-
) # actual kernel-size
|
59 |
-
if p is None:
|
60 |
-
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
61 |
-
return p
|
62 |
-
|
63 |
-
|
64 |
-
class Conv(nn.Module):
|
65 |
-
# Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
|
66 |
-
default_act = nn.SiLU() # default activation
|
67 |
-
|
68 |
-
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
|
69 |
-
super().__init__()
|
70 |
-
self.conv = nn.Conv2d(
|
71 |
-
c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False
|
72 |
-
)
|
73 |
-
self.bn = nn.BatchNorm2d(c2)
|
74 |
-
self.act = (
|
75 |
-
self.default_act
|
76 |
-
if act is True
|
77 |
-
else act
|
78 |
-
if isinstance(act, nn.Module)
|
79 |
-
else nn.Identity()
|
80 |
-
)
|
81 |
-
|
82 |
-
def forward(self, x):
|
83 |
-
return self.act(self.bn(self.conv(x)))
|
84 |
-
|
85 |
-
def forward_fuse(self, x):
|
86 |
-
return self.act(self.conv(x))
|
87 |
-
|
88 |
-
|
89 |
-
class DWConv(Conv):
|
90 |
-
# Depth-wise convolution
|
91 |
-
def __init__(
|
92 |
-
self, c1, c2, k=1, s=1, d=1, act=True
|
93 |
-
): # ch_in, ch_out, kernel, stride, dilation, activation
|
94 |
-
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
|
95 |
-
|
96 |
-
|
97 |
-
class DWConvTranspose2d(nn.ConvTranspose2d):
|
98 |
-
# Depth-wise transpose convolution
|
99 |
-
def __init__(
|
100 |
-
self, c1, c2, k=1, s=1, p1=0, p2=0
|
101 |
-
): # ch_in, ch_out, kernel, stride, padding, padding_out
|
102 |
-
super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
|
103 |
-
|
104 |
-
|
105 |
-
class TransformerLayer(nn.Module):
|
106 |
-
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
|
107 |
-
def __init__(self, c, num_heads):
|
108 |
-
super().__init__()
|
109 |
-
self.q = nn.Linear(c, c, bias=False)
|
110 |
-
self.k = nn.Linear(c, c, bias=False)
|
111 |
-
self.v = nn.Linear(c, c, bias=False)
|
112 |
-
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
|
113 |
-
self.fc1 = nn.Linear(c, c, bias=False)
|
114 |
-
self.fc2 = nn.Linear(c, c, bias=False)
|
115 |
-
|
116 |
-
def forward(self, x):
|
117 |
-
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
|
118 |
-
x = self.fc2(self.fc1(x)) + x
|
119 |
-
return x
|
120 |
-
|
121 |
-
|
122 |
-
class TransformerBlock(nn.Module):
|
123 |
-
# Vision Transformer https://arxiv.org/abs/2010.11929
|
124 |
-
def __init__(self, c1, c2, num_heads, num_layers):
|
125 |
-
super().__init__()
|
126 |
-
self.conv = None
|
127 |
-
if c1 != c2:
|
128 |
-
self.conv = Conv(c1, c2)
|
129 |
-
self.linear = nn.Linear(c2, c2) # learnable position embedding
|
130 |
-
self.tr = nn.Sequential(
|
131 |
-
*(TransformerLayer(c2, num_heads) for _ in range(num_layers))
|
132 |
-
)
|
133 |
-
self.c2 = c2
|
134 |
-
|
135 |
-
def forward(self, x):
|
136 |
-
if self.conv is not None:
|
137 |
-
x = self.conv(x)
|
138 |
-
b, _, w, h = x.shape
|
139 |
-
p = x.flatten(2).permute(2, 0, 1)
|
140 |
-
return (
|
141 |
-
self.tr(p + self.linear(p))
|
142 |
-
.permute(1, 2, 0)
|
143 |
-
.reshape(b, self.c2, w, h)
|
144 |
-
)
|
145 |
-
|
146 |
-
|
147 |
-
class Bottleneck(nn.Module):
|
148 |
-
# Standard bottleneck
|
149 |
-
def __init__(
|
150 |
-
self, c1, c2, shortcut=True, g=1, e=0.5
|
151 |
-
): # ch_in, ch_out, shortcut, groups, expansion
|
152 |
-
super().__init__()
|
153 |
-
c_ = int(c2 * e) # hidden channels
|
154 |
-
self.cv1 = Conv(c1, c_, 1, 1)
|
155 |
-
self.cv2 = Conv(c_, c2, 3, 1, g=g)
|
156 |
-
self.add = shortcut and c1 == c2
|
157 |
-
|
158 |
-
def forward(self, x):
|
159 |
-
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
160 |
-
|
161 |
-
|
162 |
-
class BottleneckCSP(nn.Module):
|
163 |
-
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
164 |
-
def __init__(
|
165 |
-
self, c1, c2, n=1, shortcut=True, g=1, e=0.5
|
166 |
-
): # ch_in, ch_out, number, shortcut, groups, expansion
|
167 |
-
super().__init__()
|
168 |
-
c_ = int(c2 * e) # hidden channels
|
169 |
-
self.cv1 = Conv(c1, c_, 1, 1)
|
170 |
-
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
171 |
-
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
172 |
-
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
173 |
-
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
174 |
-
self.act = nn.SiLU()
|
175 |
-
self.m = nn.Sequential(
|
176 |
-
*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))
|
177 |
-
)
|
178 |
-
|
179 |
-
def forward(self, x):
|
180 |
-
y1 = self.cv3(self.m(self.cv1(x)))
|
181 |
-
y2 = self.cv2(x)
|
182 |
-
return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
|
183 |
-
|
184 |
-
|
185 |
-
class CrossConv(nn.Module):
|
186 |
-
# Cross Convolution Downsample
|
187 |
-
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
188 |
-
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
189 |
-
super().__init__()
|
190 |
-
c_ = int(c2 * e) # hidden channels
|
191 |
-
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
192 |
-
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
193 |
-
self.add = shortcut and c1 == c2
|
194 |
-
|
195 |
-
def forward(self, x):
|
196 |
-
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
197 |
-
|
198 |
-
|
199 |
-
class C3(nn.Module):
|
200 |
-
# CSP Bottleneck with 3 convolutions
|
201 |
-
def __init__(
|
202 |
-
self, c1, c2, n=1, shortcut=True, g=1, e=0.5
|
203 |
-
): # ch_in, ch_out, number, shortcut, groups, expansion
|
204 |
-
super().__init__()
|
205 |
-
c_ = int(c2 * e) # hidden channels
|
206 |
-
self.cv1 = Conv(c1, c_, 1, 1)
|
207 |
-
self.cv2 = Conv(c1, c_, 1, 1)
|
208 |
-
self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
|
209 |
-
self.m = nn.Sequential(
|
210 |
-
*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))
|
211 |
-
)
|
212 |
-
|
213 |
-
def forward(self, x):
|
214 |
-
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
|
215 |
-
|
216 |
-
|
217 |
-
class C3x(C3):
|
218 |
-
# C3 module with cross-convolutions
|
219 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
220 |
-
super().__init__(c1, c2, n, shortcut, g, e)
|
221 |
-
c_ = int(c2 * e)
|
222 |
-
self.m = nn.Sequential(
|
223 |
-
*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))
|
224 |
-
)
|
225 |
-
|
226 |
-
|
227 |
-
class C3TR(C3):
|
228 |
-
# C3 module with TransformerBlock()
|
229 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
230 |
-
super().__init__(c1, c2, n, shortcut, g, e)
|
231 |
-
c_ = int(c2 * e)
|
232 |
-
self.m = TransformerBlock(c_, c_, 4, n)
|
233 |
-
|
234 |
-
|
235 |
-
class C3SPP(C3):
|
236 |
-
# C3 module with SPP()
|
237 |
-
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
|
238 |
-
super().__init__(c1, c2, n, shortcut, g, e)
|
239 |
-
c_ = int(c2 * e)
|
240 |
-
self.m = SPP(c_, c_, k)
|
241 |
-
|
242 |
-
|
243 |
-
class C3Ghost(C3):
|
244 |
-
# C3 module with GhostBottleneck()
|
245 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
246 |
-
super().__init__(c1, c2, n, shortcut, g, e)
|
247 |
-
c_ = int(c2 * e) # hidden channels
|
248 |
-
self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
|
249 |
-
|
250 |
-
|
251 |
-
class SPP(nn.Module):
|
252 |
-
# Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
|
253 |
-
def __init__(self, c1, c2, k=(5, 9, 13)):
|
254 |
-
super().__init__()
|
255 |
-
c_ = c1 // 2 # hidden channels
|
256 |
-
self.cv1 = Conv(c1, c_, 1, 1)
|
257 |
-
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
|
258 |
-
self.m = nn.ModuleList(
|
259 |
-
[nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]
|
260 |
-
)
|
261 |
-
|
262 |
-
def forward(self, x):
|
263 |
-
x = self.cv1(x)
|
264 |
-
with warnings.catch_warnings():
|
265 |
-
warnings.simplefilter(
|
266 |
-
"ignore"
|
267 |
-
) # suppress torch 1.9.0 max_pool2d() warning
|
268 |
-
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
|
269 |
-
|
270 |
-
|
271 |
-
class SPPF(nn.Module):
|
272 |
-
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
|
273 |
-
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
|
274 |
-
super().__init__()
|
275 |
-
c_ = c1 // 2 # hidden channels
|
276 |
-
self.cv1 = Conv(c1, c_, 1, 1)
|
277 |
-
self.cv2 = Conv(c_ * 4, c2, 1, 1)
|
278 |
-
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
|
279 |
-
|
280 |
-
def forward(self, x):
|
281 |
-
x = self.cv1(x)
|
282 |
-
with warnings.catch_warnings():
|
283 |
-
warnings.simplefilter(
|
284 |
-
"ignore"
|
285 |
-
) # suppress torch 1.9.0 max_pool2d() warning
|
286 |
-
y1 = self.m(x)
|
287 |
-
y2 = self.m(y1)
|
288 |
-
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
|
289 |
-
|
290 |
-
|
291 |
-
class Focus(nn.Module):
|
292 |
-
# Focus wh information into c-space
|
293 |
-
def __init__(
|
294 |
-
self, c1, c2, k=1, s=1, p=None, g=1, act=True
|
295 |
-
): # ch_in, ch_out, kernel, stride, padding, groups
|
296 |
-
super().__init__()
|
297 |
-
self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
|
298 |
-
# self.contract = Contract(gain=2)
|
299 |
-
|
300 |
-
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
301 |
-
return self.conv(
|
302 |
-
torch.cat(
|
303 |
-
(
|
304 |
-
x[..., ::2, ::2],
|
305 |
-
x[..., 1::2, ::2],
|
306 |
-
x[..., ::2, 1::2],
|
307 |
-
x[..., 1::2, 1::2],
|
308 |
-
),
|
309 |
-
1,
|
310 |
-
)
|
311 |
-
)
|
312 |
-
# return self.conv(self.contract(x))
|
313 |
-
|
314 |
-
|
315 |
-
class GhostConv(nn.Module):
|
316 |
-
# Ghost Convolution https://github.com/huawei-noah/ghostnet
|
317 |
-
def __init__(
|
318 |
-
self, c1, c2, k=1, s=1, g=1, act=True
|
319 |
-
): # ch_in, ch_out, kernel, stride, groups
|
320 |
-
super().__init__()
|
321 |
-
c_ = c2 // 2 # hidden channels
|
322 |
-
self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
|
323 |
-
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
|
324 |
-
|
325 |
-
def forward(self, x):
|
326 |
-
y = self.cv1(x)
|
327 |
-
return torch.cat((y, self.cv2(y)), 1)
|
328 |
-
|
329 |
-
|
330 |
-
class GhostBottleneck(nn.Module):
|
331 |
-
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
|
332 |
-
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
|
333 |
-
super().__init__()
|
334 |
-
c_ = c2 // 2
|
335 |
-
self.conv = nn.Sequential(
|
336 |
-
GhostConv(c1, c_, 1, 1), # pw
|
337 |
-
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
|
338 |
-
GhostConv(c_, c2, 1, 1, act=False),
|
339 |
-
) # pw-linear
|
340 |
-
self.shortcut = (
|
341 |
-
nn.Sequential(
|
342 |
-
DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)
|
343 |
-
)
|
344 |
-
if s == 2
|
345 |
-
else nn.Identity()
|
346 |
-
)
|
347 |
-
|
348 |
-
def forward(self, x):
|
349 |
-
return self.conv(x) + self.shortcut(x)
|
350 |
-
|
351 |
-
|
352 |
-
class Contract(nn.Module):
|
353 |
-
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
|
354 |
-
def __init__(self, gain=2):
|
355 |
-
super().__init__()
|
356 |
-
self.gain = gain
|
357 |
-
|
358 |
-
def forward(self, x):
|
359 |
-
(
|
360 |
-
b,
|
361 |
-
c,
|
362 |
-
h,
|
363 |
-
w,
|
364 |
-
) = (
|
365 |
-
x.size()
|
366 |
-
) # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
|
367 |
-
s = self.gain
|
368 |
-
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
|
369 |
-
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
|
370 |
-
return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
|
371 |
-
|
372 |
-
|
373 |
-
class Expand(nn.Module):
|
374 |
-
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
|
375 |
-
def __init__(self, gain=2):
|
376 |
-
super().__init__()
|
377 |
-
self.gain = gain
|
378 |
-
|
379 |
-
def forward(self, x):
|
380 |
-
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
|
381 |
-
s = self.gain
|
382 |
-
x = x.view(b, s, s, c // s**2, h, w) # x(1,2,2,16,80,80)
|
383 |
-
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
|
384 |
-
return x.view(b, c // s**2, h * s, w * s) # x(1,16,160,160)
|
385 |
-
|
386 |
-
|
387 |
-
class Concat(nn.Module):
|
388 |
-
# Concatenate a list of tensors along dimension
|
389 |
-
def __init__(self, dimension=1):
|
390 |
-
super().__init__()
|
391 |
-
self.d = dimension
|
392 |
-
|
393 |
-
def forward(self, x):
|
394 |
-
return torch.cat(x, self.d)
|
395 |
-
|
396 |
-
|
397 |
-
class DetectMultiBackend(nn.Module):
|
398 |
-
# YOLOv5 MultiBackend class for python inference on various backends
|
399 |
-
def __init__(
|
400 |
-
self,
|
401 |
-
weights="yolov5s.pt",
|
402 |
-
device=torch.device("cpu"),
|
403 |
-
dnn=False,
|
404 |
-
data=None,
|
405 |
-
fp16=False,
|
406 |
-
fuse=True,
|
407 |
-
):
|
408 |
-
# Usage:
|
409 |
-
# PyTorch: weights = *.pt
|
410 |
-
# TorchScript: *.torchscript
|
411 |
-
# ONNX Runtime: *.onnx
|
412 |
-
# ONNX OpenCV DNN: *.onnx --dnn
|
413 |
-
# OpenVINO: *_openvino_model
|
414 |
-
# CoreML: *.mlmodel
|
415 |
-
# TensorRT: *.engine
|
416 |
-
# TensorFlow SavedModel: *_saved_model
|
417 |
-
# TensorFlow GraphDef: *.pb
|
418 |
-
# TensorFlow Lite: *.tflite
|
419 |
-
# TensorFlow Edge TPU: *_edgetpu.tflite
|
420 |
-
# PaddlePaddle: *_paddle_model
|
421 |
-
from models.experimental import ( # scoped to avoid circular import
|
422 |
-
attempt_download,
|
423 |
-
attempt_load,
|
424 |
-
)
|
425 |
-
|
426 |
-
super().__init__()
|
427 |
-
w = str(weights[0] if isinstance(weights, list) else weights)
|
428 |
-
(
|
429 |
-
pt,
|
430 |
-
jit,
|
431 |
-
onnx,
|
432 |
-
xml,
|
433 |
-
engine,
|
434 |
-
coreml,
|
435 |
-
saved_model,
|
436 |
-
pb,
|
437 |
-
tflite,
|
438 |
-
edgetpu,
|
439 |
-
tfjs,
|
440 |
-
paddle,
|
441 |
-
triton,
|
442 |
-
) = self._model_type(w)
|
443 |
-
fp16 &= pt or jit or onnx or engine # FP16
|
444 |
-
nhwc = (
|
445 |
-
coreml or saved_model or pb or tflite or edgetpu
|
446 |
-
) # BHWC formats (vs torch BCWH)
|
447 |
-
stride = 32 # default stride
|
448 |
-
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
|
449 |
-
if not (pt or triton):
|
450 |
-
w = attempt_download(w) # download if not local
|
451 |
-
|
452 |
-
if pt: # PyTorch
|
453 |
-
model = attempt_load(
|
454 |
-
weights if isinstance(weights, list) else w,
|
455 |
-
device=device,
|
456 |
-
inplace=True,
|
457 |
-
fuse=fuse,
|
458 |
-
)
|
459 |
-
stride = max(int(model.stride.max()), 32) # model stride
|
460 |
-
names = (
|
461 |
-
model.module.names if hasattr(model, "module") else model.names
|
462 |
-
) # get class names
|
463 |
-
model.half() if fp16 else model.float()
|
464 |
-
self.model = (
|
465 |
-
model # explicitly assign for to(), cpu(), cuda(), half()
|
466 |
-
)
|
467 |
-
elif jit: # TorchScript
|
468 |
-
LOGGER.info(f"Loading {w} for TorchScript inference...")
|
469 |
-
extra_files = {"config.txt": ""} # model metadata
|
470 |
-
model = torch.jit.load(
|
471 |
-
w, _extra_files=extra_files, map_location=device
|
472 |
-
)
|
473 |
-
model.half() if fp16 else model.float()
|
474 |
-
if extra_files["config.txt"]: # load metadata dict
|
475 |
-
d = json.loads(
|
476 |
-
extra_files["config.txt"],
|
477 |
-
object_hook=lambda d: {
|
478 |
-
int(k) if k.isdigit() else k: v for k, v in d.items()
|
479 |
-
},
|
480 |
-
)
|
481 |
-
stride, names = int(d["stride"]), d["names"]
|
482 |
-
elif dnn: # ONNX OpenCV DNN
|
483 |
-
LOGGER.info(f"Loading {w} for ONNX OpenCV DNN inference...")
|
484 |
-
check_requirements("opencv-python>=4.5.4")
|
485 |
-
net = cv2.dnn.readNetFromONNX(w)
|
486 |
-
elif onnx: # ONNX Runtime
|
487 |
-
LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
|
488 |
-
check_requirements(
|
489 |
-
("onnx", "onnxruntime-gpu" if cuda else "onnxruntime")
|
490 |
-
)
|
491 |
-
import onnxruntime
|
492 |
-
|
493 |
-
providers = (
|
494 |
-
["CUDAExecutionProvider", "CPUExecutionProvider"]
|
495 |
-
if cuda
|
496 |
-
else ["CPUExecutionProvider"]
|
497 |
-
)
|
498 |
-
session = onnxruntime.InferenceSession(w, providers=providers)
|
499 |
-
output_names = [x.name for x in session.get_outputs()]
|
500 |
-
meta = session.get_modelmeta().custom_metadata_map # metadata
|
501 |
-
if "stride" in meta:
|
502 |
-
stride, names = int(meta["stride"]), eval(meta["names"])
|
503 |
-
elif xml: # OpenVINO
|
504 |
-
LOGGER.info(f"Loading {w} for OpenVINO inference...")
|
505 |
-
check_requirements(
|
506 |
-
"openvino"
|
507 |
-
) # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
508 |
-
from openvino.runtime import Core, Layout, get_batch
|
509 |
-
|
510 |
-
ie = Core()
|
511 |
-
if not Path(w).is_file(): # if not *.xml
|
512 |
-
w = next(
|
513 |
-
Path(w).glob("*.xml")
|
514 |
-
) # get *.xml file from *_openvino_model dir
|
515 |
-
network = ie.read_model(
|
516 |
-
model=w, weights=Path(w).with_suffix(".bin")
|
517 |
-
)
|
518 |
-
if network.get_parameters()[0].get_layout().empty:
|
519 |
-
network.get_parameters()[0].set_layout(Layout("NCHW"))
|
520 |
-
batch_dim = get_batch(network)
|
521 |
-
if batch_dim.is_static:
|
522 |
-
batch_size = batch_dim.get_length()
|
523 |
-
executable_network = ie.compile_model(
|
524 |
-
network, device_name="CPU"
|
525 |
-
) # device_name="MYRIAD" for Intel NCS2
|
526 |
-
stride, names = self._load_metadata(
|
527 |
-
Path(w).with_suffix(".yaml")
|
528 |
-
) # load metadata
|
529 |
-
elif engine: # TensorRT
|
530 |
-
LOGGER.info(f"Loading {w} for TensorRT inference...")
|
531 |
-
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
|
532 |
-
|
533 |
-
check_version(
|
534 |
-
trt.__version__, "7.0.0", hard=True
|
535 |
-
) # require tensorrt>=7.0.0
|
536 |
-
if device.type == "cpu":
|
537 |
-
device = torch.device("cuda:0")
|
538 |
-
Binding = namedtuple(
|
539 |
-
"Binding", ("name", "dtype", "shape", "data", "ptr")
|
540 |
-
)
|
541 |
-
logger = trt.Logger(trt.Logger.INFO)
|
542 |
-
with open(w, "rb") as f, trt.Runtime(logger) as runtime:
|
543 |
-
model = runtime.deserialize_cuda_engine(f.read())
|
544 |
-
context = model.create_execution_context()
|
545 |
-
bindings = OrderedDict()
|
546 |
-
output_names = []
|
547 |
-
fp16 = False # default updated below
|
548 |
-
dynamic = False
|
549 |
-
for i in range(model.num_bindings):
|
550 |
-
name = model.get_binding_name(i)
|
551 |
-
dtype = trt.nptype(model.get_binding_dtype(i))
|
552 |
-
if model.binding_is_input(i):
|
553 |
-
if -1 in tuple(model.get_binding_shape(i)): # dynamic
|
554 |
-
dynamic = True
|
555 |
-
context.set_binding_shape(
|
556 |
-
i, tuple(model.get_profile_shape(0, i)[2])
|
557 |
-
)
|
558 |
-
if dtype == np.float16:
|
559 |
-
fp16 = True
|
560 |
-
else: # output
|
561 |
-
output_names.append(name)
|
562 |
-
shape = tuple(context.get_binding_shape(i))
|
563 |
-
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
|
564 |
-
bindings[name] = Binding(
|
565 |
-
name, dtype, shape, im, int(im.data_ptr())
|
566 |
-
)
|
567 |
-
binding_addrs = OrderedDict(
|
568 |
-
(n, d.ptr) for n, d in bindings.items()
|
569 |
-
)
|
570 |
-
batch_size = bindings["images"].shape[
|
571 |
-
0
|
572 |
-
] # if dynamic, this is instead max batch size
|
573 |
-
elif coreml: # CoreML
|
574 |
-
LOGGER.info(f"Loading {w} for CoreML inference...")
|
575 |
-
import coremltools as ct
|
576 |
-
|
577 |
-
model = ct.models.MLModel(w)
|
578 |
-
elif saved_model: # TF SavedModel
|
579 |
-
LOGGER.info(f"Loading {w} for TensorFlow SavedModel inference...")
|
580 |
-
import tensorflow as tf
|
581 |
-
|
582 |
-
keras = False # assume TF1 saved_model
|
583 |
-
model = (
|
584 |
-
tf.keras.models.load_model(w)
|
585 |
-
if keras
|
586 |
-
else tf.saved_model.load(w)
|
587 |
-
)
|
588 |
-
elif (
|
589 |
-
pb
|
590 |
-
): # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
|
591 |
-
LOGGER.info(f"Loading {w} for TensorFlow GraphDef inference...")
|
592 |
-
import tensorflow as tf
|
593 |
-
|
594 |
-
def wrap_frozen_graph(gd, inputs, outputs):
|
595 |
-
x = tf.compat.v1.wrap_function(
|
596 |
-
lambda: tf.compat.v1.import_graph_def(gd, name=""), []
|
597 |
-
) # wrapped
|
598 |
-
ge = x.graph.as_graph_element
|
599 |
-
return x.prune(
|
600 |
-
tf.nest.map_structure(ge, inputs),
|
601 |
-
tf.nest.map_structure(ge, outputs),
|
602 |
-
)
|
603 |
-
|
604 |
-
def gd_outputs(gd):
|
605 |
-
name_list, input_list = [], []
|
606 |
-
for (
|
607 |
-
node
|
608 |
-
) in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
|
609 |
-
name_list.append(node.name)
|
610 |
-
input_list.extend(node.input)
|
611 |
-
return sorted(
|
612 |
-
f"{x}:0"
|
613 |
-
for x in list(set(name_list) - set(input_list))
|
614 |
-
if not x.startswith("NoOp")
|
615 |
-
)
|
616 |
-
|
617 |
-
gd = tf.Graph().as_graph_def() # TF GraphDef
|
618 |
-
with open(w, "rb") as f:
|
619 |
-
gd.ParseFromString(f.read())
|
620 |
-
frozen_func = wrap_frozen_graph(
|
621 |
-
gd, inputs="x:0", outputs=gd_outputs(gd)
|
622 |
-
)
|
623 |
-
elif (
|
624 |
-
tflite or edgetpu
|
625 |
-
): # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
|
626 |
-
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
|
627 |
-
from tflite_runtime.interpreter import Interpreter, load_delegate
|
628 |
-
except ImportError:
|
629 |
-
import tensorflow as tf
|
630 |
-
|
631 |
-
Interpreter, load_delegate = (
|
632 |
-
tf.lite.Interpreter,
|
633 |
-
tf.lite.experimental.load_delegate,
|
634 |
-
)
|
635 |
-
if (
|
636 |
-
edgetpu
|
637 |
-
): # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
|
638 |
-
LOGGER.info(
|
639 |
-
f"Loading {w} for TensorFlow Lite Edge TPU inference..."
|
640 |
-
)
|
641 |
-
delegate = {
|
642 |
-
"Linux": "libedgetpu.so.1",
|
643 |
-
"Darwin": "libedgetpu.1.dylib",
|
644 |
-
"Windows": "edgetpu.dll",
|
645 |
-
}[platform.system()]
|
646 |
-
interpreter = Interpreter(
|
647 |
-
model_path=w,
|
648 |
-
experimental_delegates=[load_delegate(delegate)],
|
649 |
-
)
|
650 |
-
else: # TFLite
|
651 |
-
LOGGER.info(f"Loading {w} for TensorFlow Lite inference...")
|
652 |
-
interpreter = Interpreter(model_path=w) # load TFLite model
|
653 |
-
interpreter.allocate_tensors() # allocate
|
654 |
-
input_details = interpreter.get_input_details() # inputs
|
655 |
-
output_details = interpreter.get_output_details() # outputs
|
656 |
-
# load metadata
|
657 |
-
with contextlib.suppress(zipfile.BadZipFile):
|
658 |
-
with zipfile.ZipFile(w, "r") as model:
|
659 |
-
meta_file = model.namelist()[0]
|
660 |
-
meta = ast.literal_eval(
|
661 |
-
model.read(meta_file).decode("utf-8")
|
662 |
-
)
|
663 |
-
stride, names = int(meta["stride"]), meta["names"]
|
664 |
-
elif tfjs: # TF.js
|
665 |
-
raise NotImplementedError(
|
666 |
-
"ERROR: YOLOv5 TF.js inference is not supported"
|
667 |
-
)
|
668 |
-
elif paddle: # PaddlePaddle
|
669 |
-
LOGGER.info(f"Loading {w} for PaddlePaddle inference...")
|
670 |
-
check_requirements("paddlepaddle-gpu" if cuda else "paddlepaddle")
|
671 |
-
import paddle.inference as pdi
|
672 |
-
|
673 |
-
if not Path(w).is_file(): # if not *.pdmodel
|
674 |
-
w = next(
|
675 |
-
Path(w).rglob("*.pdmodel")
|
676 |
-
) # get *.pdmodel file from *_paddle_model dir
|
677 |
-
weights = Path(w).with_suffix(".pdiparams")
|
678 |
-
config = pdi.Config(str(w), str(weights))
|
679 |
-
if cuda:
|
680 |
-
config.enable_use_gpu(
|
681 |
-
memory_pool_init_size_mb=2048, device_id=0
|
682 |
-
)
|
683 |
-
predictor = pdi.create_predictor(config)
|
684 |
-
input_handle = predictor.get_input_handle(
|
685 |
-
predictor.get_input_names()[0]
|
686 |
-
)
|
687 |
-
output_names = predictor.get_output_names()
|
688 |
-
elif triton: # NVIDIA Triton Inference Server
|
689 |
-
LOGGER.info(f"Using {w} as Triton Inference Server...")
|
690 |
-
check_requirements("tritonclient[all]")
|
691 |
-
from utils.triton import TritonRemoteModel
|
692 |
-
|
693 |
-
model = TritonRemoteModel(url=w)
|
694 |
-
nhwc = model.runtime.startswith("tensorflow")
|
695 |
-
else:
|
696 |
-
raise NotImplementedError(f"ERROR: {w} is not a supported format")
|
697 |
-
|
698 |
-
# class names
|
699 |
-
if "names" not in locals():
|
700 |
-
names = (
|
701 |
-
yaml_load(data)["names"]
|
702 |
-
if data
|
703 |
-
else {i: f"class{i}" for i in range(999)}
|
704 |
-
)
|
705 |
-
if names[0] == "n01440764" and len(names) == 1000: # ImageNet
|
706 |
-
names = yaml_load(ROOT / "data/ImageNet.yaml")[
|
707 |
-
"names"
|
708 |
-
] # human-readable names
|
709 |
-
|
710 |
-
self.__dict__.update(locals()) # assign all variables to self
|
711 |
-
|
712 |
-
def forward(self, im, augment=False, visualize=False):
|
713 |
-
# YOLOv5 MultiBackend inference
|
714 |
-
b, ch, h, w = im.shape # batch, channel, height, width
|
715 |
-
if self.fp16 and im.dtype != torch.float16:
|
716 |
-
im = im.half() # to FP16
|
717 |
-
if self.nhwc:
|
718 |
-
im = im.permute(
|
719 |
-
0, 2, 3, 1
|
720 |
-
) # torch BCHW to numpy BHWC shape(1,320,192,3)
|
721 |
-
|
722 |
-
if self.pt: # PyTorch
|
723 |
-
y = (
|
724 |
-
self.model(im, augment=augment, visualize=visualize)
|
725 |
-
if augment or visualize
|
726 |
-
else self.model(im)
|
727 |
-
)
|
728 |
-
elif self.jit: # TorchScript
|
729 |
-
y = self.model(im)
|
730 |
-
elif self.dnn: # ONNX OpenCV DNN
|
731 |
-
im = im.cpu().numpy() # torch to numpy
|
732 |
-
self.net.setInput(im)
|
733 |
-
y = self.net.forward()
|
734 |
-
elif self.onnx: # ONNX Runtime
|
735 |
-
im = im.cpu().numpy() # torch to numpy
|
736 |
-
y = self.session.run(
|
737 |
-
self.output_names, {self.session.get_inputs()[0].name: im}
|
738 |
-
)
|
739 |
-
elif self.xml: # OpenVINO
|
740 |
-
im = im.cpu().numpy() # FP32
|
741 |
-
y = list(self.executable_network([im]).values())
|
742 |
-
elif self.engine: # TensorRT
|
743 |
-
if self.dynamic and im.shape != self.bindings["images"].shape:
|
744 |
-
i = self.model.get_binding_index("images")
|
745 |
-
self.context.set_binding_shape(
|
746 |
-
i, im.shape
|
747 |
-
) # reshape if dynamic
|
748 |
-
self.bindings["images"] = self.bindings["images"]._replace(
|
749 |
-
shape=im.shape
|
750 |
-
)
|
751 |
-
for name in self.output_names:
|
752 |
-
i = self.model.get_binding_index(name)
|
753 |
-
self.bindings[name].data.resize_(
|
754 |
-
tuple(self.context.get_binding_shape(i))
|
755 |
-
)
|
756 |
-
s = self.bindings["images"].shape
|
757 |
-
assert (
|
758 |
-
im.shape == s
|
759 |
-
), f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
|
760 |
-
self.binding_addrs["images"] = int(im.data_ptr())
|
761 |
-
self.context.execute_v2(list(self.binding_addrs.values()))
|
762 |
-
y = [self.bindings[x].data for x in sorted(self.output_names)]
|
763 |
-
elif self.coreml: # CoreML
|
764 |
-
im = im.cpu().numpy()
|
765 |
-
im = Image.fromarray((im[0] * 255).astype("uint8"))
|
766 |
-
# im = im.resize((192, 320), Image.ANTIALIAS)
|
767 |
-
y = self.model.predict(
|
768 |
-
{"image": im}
|
769 |
-
) # coordinates are xywh normalized
|
770 |
-
if "confidence" in y:
|
771 |
-
box = xywh2xyxy(
|
772 |
-
y["coordinates"] * [[w, h, w, h]]
|
773 |
-
) # xyxy pixels
|
774 |
-
conf, cls = y["confidence"].max(1), y["confidence"].argmax(
|
775 |
-
1
|
776 |
-
).astype(np.float)
|
777 |
-
y = np.concatenate(
|
778 |
-
(box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1
|
779 |
-
)
|
780 |
-
else:
|
781 |
-
y = list(
|
782 |
-
reversed(y.values())
|
783 |
-
) # reversed for segmentation models (pred, proto)
|
784 |
-
elif self.paddle: # PaddlePaddle
|
785 |
-
im = im.cpu().numpy().astype(np.float32)
|
786 |
-
self.input_handle.copy_from_cpu(im)
|
787 |
-
self.predictor.run()
|
788 |
-
y = [
|
789 |
-
self.predictor.get_output_handle(x).copy_to_cpu()
|
790 |
-
for x in self.output_names
|
791 |
-
]
|
792 |
-
elif self.triton: # NVIDIA Triton Inference Server
|
793 |
-
y = self.model(im)
|
794 |
-
else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
|
795 |
-
im = im.cpu().numpy()
|
796 |
-
if self.saved_model: # SavedModel
|
797 |
-
y = (
|
798 |
-
self.model(im, training=False)
|
799 |
-
if self.keras
|
800 |
-
else self.model(im)
|
801 |
-
)
|
802 |
-
elif self.pb: # GraphDef
|
803 |
-
y = self.frozen_func(x=self.tf.constant(im))
|
804 |
-
else: # Lite or Edge TPU
|
805 |
-
input = self.input_details[0]
|
806 |
-
int8 = (
|
807 |
-
input["dtype"] == np.uint8
|
808 |
-
) # is TFLite quantized uint8 model
|
809 |
-
if int8:
|
810 |
-
scale, zero_point = input["quantization"]
|
811 |
-
im = (im / scale + zero_point).astype(np.uint8) # de-scale
|
812 |
-
self.interpreter.set_tensor(input["index"], im)
|
813 |
-
self.interpreter.invoke()
|
814 |
-
y = []
|
815 |
-
for output in self.output_details:
|
816 |
-
x = self.interpreter.get_tensor(output["index"])
|
817 |
-
if int8:
|
818 |
-
scale, zero_point = output["quantization"]
|
819 |
-
x = (
|
820 |
-
x.astype(np.float32) - zero_point
|
821 |
-
) * scale # re-scale
|
822 |
-
y.append(x)
|
823 |
-
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
|
824 |
-
y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
|
825 |
-
|
826 |
-
if isinstance(y, (list, tuple)):
|
827 |
-
return (
|
828 |
-
self.from_numpy(y[0])
|
829 |
-
if len(y) == 1
|
830 |
-
else [self.from_numpy(x) for x in y]
|
831 |
-
)
|
832 |
-
else:
|
833 |
-
return self.from_numpy(y)
|
834 |
-
|
835 |
-
def from_numpy(self, x):
|
836 |
-
return (
|
837 |
-
torch.from_numpy(x).to(self.device)
|
838 |
-
if isinstance(x, np.ndarray)
|
839 |
-
else x
|
840 |
-
)
|
841 |
-
|
842 |
-
def warmup(self, imgsz=(1, 3, 640, 640)):
|
843 |
-
# Warmup model by running inference once
|
844 |
-
warmup_types = (
|
845 |
-
self.pt,
|
846 |
-
self.jit,
|
847 |
-
self.onnx,
|
848 |
-
self.engine,
|
849 |
-
self.saved_model,
|
850 |
-
self.pb,
|
851 |
-
self.triton,
|
852 |
-
)
|
853 |
-
if any(warmup_types) and (self.device.type != "cpu" or self.triton):
|
854 |
-
im = torch.empty(
|
855 |
-
*imgsz,
|
856 |
-
dtype=torch.half if self.fp16 else torch.float,
|
857 |
-
device=self.device,
|
858 |
-
) # input
|
859 |
-
for _ in range(2 if self.jit else 1): #
|
860 |
-
self.forward(im) # warmup
|
861 |
-
|
862 |
-
@staticmethod
|
863 |
-
def _model_type(p="path/to/model.pt"):
|
864 |
-
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
|
865 |
-
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
|
866 |
-
from export import export_formats
|
867 |
-
from utils.downloads import is_url
|
868 |
-
|
869 |
-
sf = list(export_formats().Suffix) # export suffixes
|
870 |
-
if not is_url(p, check=False):
|
871 |
-
check_suffix(p, sf) # checks
|
872 |
-
url = urlparse(p) # if url may be Triton inference server
|
873 |
-
types = [s in Path(p).name for s in sf]
|
874 |
-
types[8] &= not types[9] # tflite &= not edgetpu
|
875 |
-
triton = not any(types) and all(
|
876 |
-
[any(s in url.scheme for s in ["http", "grpc"]), url.netloc]
|
877 |
-
)
|
878 |
-
return types + [triton]
|
879 |
-
|
880 |
-
@staticmethod
|
881 |
-
def _load_metadata(f=Path("path/to/meta.yaml")):
|
882 |
-
# Load metadata from meta.yaml if it exists
|
883 |
-
if f.exists():
|
884 |
-
d = yaml_load(f)
|
885 |
-
return d["stride"], d["names"] # assign stride, names
|
886 |
-
return None, None
|
887 |
-
|
888 |
-
|
889 |
-
class AutoShape(nn.Module):
|
890 |
-
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
891 |
-
conf = 0.25 # NMS confidence threshold
|
892 |
-
iou = 0.45 # NMS IoU threshold
|
893 |
-
agnostic = False # NMS class-agnostic
|
894 |
-
multi_label = False # NMS multiple labels per box
|
895 |
-
classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
|
896 |
-
max_det = 1000 # maximum number of detections per image
|
897 |
-
amp = False # Automatic Mixed Precision (AMP) inference
|
898 |
-
|
899 |
-
def __init__(self, model, verbose=True):
|
900 |
-
super().__init__()
|
901 |
-
if verbose:
|
902 |
-
LOGGER.info("Adding AutoShape... ")
|
903 |
-
copy_attr(
|
904 |
-
self,
|
905 |
-
model,
|
906 |
-
include=("yaml", "nc", "hyp", "names", "stride", "abc"),
|
907 |
-
exclude=(),
|
908 |
-
) # copy attributes
|
909 |
-
self.dmb = isinstance(
|
910 |
-
model, DetectMultiBackend
|
911 |
-
) # DetectMultiBackend() instance
|
912 |
-
self.pt = not self.dmb or model.pt # PyTorch model
|
913 |
-
self.model = model.eval()
|
914 |
-
if self.pt:
|
915 |
-
m = (
|
916 |
-
self.model.model.model[-1]
|
917 |
-
if self.dmb
|
918 |
-
else self.model.model[-1]
|
919 |
-
) # Detect()
|
920 |
-
m.inplace = (
|
921 |
-
False # Detect.inplace=False for safe multithread inference
|
922 |
-
)
|
923 |
-
m.export = True # do not output loss values
|
924 |
-
|
925 |
-
def _apply(self, fn):
|
926 |
-
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
|
927 |
-
self = super()._apply(fn)
|
928 |
-
if self.pt:
|
929 |
-
m = (
|
930 |
-
self.model.model.model[-1]
|
931 |
-
if self.dmb
|
932 |
-
else self.model.model[-1]
|
933 |
-
) # Detect()
|
934 |
-
m.stride = fn(m.stride)
|
935 |
-
m.grid = list(map(fn, m.grid))
|
936 |
-
if isinstance(m.anchor_grid, list):
|
937 |
-
m.anchor_grid = list(map(fn, m.anchor_grid))
|
938 |
-
return self
|
939 |
-
|
940 |
-
@smart_inference_mode()
|
941 |
-
def forward(self, ims, size=640, augment=False, profile=False):
|
942 |
-
# Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
|
943 |
-
# file: ims = 'data/images/zidane.jpg' # str or PosixPath
|
944 |
-
# URI: = 'https://ultralytics.com/images/zidane.jpg'
|
945 |
-
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
|
946 |
-
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
|
947 |
-
# numpy: = np.zeros((640,1280,3)) # HWC
|
948 |
-
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
|
949 |
-
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
950 |
-
|
951 |
-
dt = (Profile(), Profile(), Profile())
|
952 |
-
with dt[0]:
|
953 |
-
if isinstance(size, int): # expand
|
954 |
-
size = (size, size)
|
955 |
-
p = (
|
956 |
-
next(self.model.parameters())
|
957 |
-
if self.pt
|
958 |
-
else torch.empty(1, device=self.model.device)
|
959 |
-
) # param
|
960 |
-
autocast = self.amp and (
|
961 |
-
p.device.type != "cpu"
|
962 |
-
) # Automatic Mixed Precision (AMP) inference
|
963 |
-
if isinstance(ims, torch.Tensor): # torch
|
964 |
-
with amp.autocast(autocast):
|
965 |
-
return self.model(
|
966 |
-
ims.to(p.device).type_as(p), augment=augment
|
967 |
-
) # inference
|
968 |
-
|
969 |
-
# Pre-process
|
970 |
-
n, ims = (
|
971 |
-
(len(ims), list(ims))
|
972 |
-
if isinstance(ims, (list, tuple))
|
973 |
-
else (1, [ims])
|
974 |
-
) # number, list of images
|
975 |
-
shape0, shape1, files = (
|
976 |
-
[],
|
977 |
-
[],
|
978 |
-
[],
|
979 |
-
) # image and inference shapes, filenames
|
980 |
-
for i, im in enumerate(ims):
|
981 |
-
f = f"image{i}" # filename
|
982 |
-
if isinstance(im, (str, Path)): # filename or uri
|
983 |
-
im, f = (
|
984 |
-
Image.open(
|
985 |
-
requests.get(im, stream=True).raw
|
986 |
-
if str(im).startswith("http")
|
987 |
-
else im
|
988 |
-
),
|
989 |
-
im,
|
990 |
-
)
|
991 |
-
im = np.asarray(exif_transpose(im))
|
992 |
-
elif isinstance(im, Image.Image): # PIL Image
|
993 |
-
im, f = (
|
994 |
-
np.asarray(exif_transpose(im)),
|
995 |
-
getattr(im, "filename", f) or f,
|
996 |
-
)
|
997 |
-
files.append(Path(f).with_suffix(".jpg").name)
|
998 |
-
if im.shape[0] < 5: # image in CHW
|
999 |
-
im = im.transpose(
|
1000 |
-
(1, 2, 0)
|
1001 |
-
) # reverse dataloader .transpose(2, 0, 1)
|
1002 |
-
im = (
|
1003 |
-
im[..., :3]
|
1004 |
-
if im.ndim == 3
|
1005 |
-
else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
|
1006 |
-
) # enforce 3ch input
|
1007 |
-
s = im.shape[:2] # HWC
|
1008 |
-
shape0.append(s) # image shape
|
1009 |
-
g = max(size) / max(s) # gain
|
1010 |
-
shape1.append([int(y * g) for y in s])
|
1011 |
-
ims[i] = (
|
1012 |
-
im if im.data.contiguous else np.ascontiguousarray(im)
|
1013 |
-
) # update
|
1014 |
-
shape1 = [
|
1015 |
-
make_divisible(x, self.stride) for x in np.array(shape1).max(0)
|
1016 |
-
] # inf shape
|
1017 |
-
x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
|
1018 |
-
x = np.ascontiguousarray(
|
1019 |
-
np.array(x).transpose((0, 3, 1, 2))
|
1020 |
-
) # stack and BHWC to BCHW
|
1021 |
-
x = (
|
1022 |
-
torch.from_numpy(x).to(p.device).type_as(p) / 255
|
1023 |
-
) # uint8 to fp16/32
|
1024 |
-
|
1025 |
-
with amp.autocast(autocast):
|
1026 |
-
# Inference
|
1027 |
-
with dt[1]:
|
1028 |
-
y = self.model(x, augment=augment) # forward
|
1029 |
-
|
1030 |
-
# Post-process
|
1031 |
-
with dt[2]:
|
1032 |
-
y = non_max_suppression(
|
1033 |
-
y if self.dmb else y[0],
|
1034 |
-
self.conf,
|
1035 |
-
self.iou,
|
1036 |
-
self.classes,
|
1037 |
-
self.agnostic,
|
1038 |
-
self.multi_label,
|
1039 |
-
max_det=self.max_det,
|
1040 |
-
) # NMS
|
1041 |
-
for i in range(n):
|
1042 |
-
scale_boxes(shape1, y[i][:, :4], shape0[i])
|
1043 |
-
|
1044 |
-
return Detections(ims, y, files, dt, self.names, x.shape)
|
1045 |
-
|
1046 |
-
|
1047 |
-
class Detections:
|
1048 |
-
# YOLOv5 detections class for inference results
|
1049 |
-
def __init__(
|
1050 |
-
self, ims, pred, files, times=(0, 0, 0), names=None, shape=None
|
1051 |
-
):
|
1052 |
-
super().__init__()
|
1053 |
-
d = pred[0].device # device
|
1054 |
-
gn = [
|
1055 |
-
torch.tensor(
|
1056 |
-
[*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d
|
1057 |
-
)
|
1058 |
-
for im in ims
|
1059 |
-
] # normalizations
|
1060 |
-
self.ims = ims # list of images as numpy arrays
|
1061 |
-
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
1062 |
-
self.names = names # class names
|
1063 |
-
self.files = files # image filenames
|
1064 |
-
self.times = times # profiling times
|
1065 |
-
self.xyxy = pred # xyxy pixels
|
1066 |
-
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
1067 |
-
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
1068 |
-
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
1069 |
-
self.n = len(self.pred) # number of images (batch size)
|
1070 |
-
self.t = tuple(x.t / self.n * 1e3 for x in times) # timestamps (ms)
|
1071 |
-
self.s = tuple(shape) # inference BCHW shape
|
1072 |
-
|
1073 |
-
def _run(
|
1074 |
-
self,
|
1075 |
-
pprint=False,
|
1076 |
-
show=False,
|
1077 |
-
save=False,
|
1078 |
-
crop=False,
|
1079 |
-
render=False,
|
1080 |
-
labels=True,
|
1081 |
-
save_dir=Path(""),
|
1082 |
-
):
|
1083 |
-
s, crops = "", []
|
1084 |
-
for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
|
1085 |
-
s += f"\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} " # string
|
1086 |
-
if pred.shape[0]:
|
1087 |
-
for c in pred[:, -1].unique():
|
1088 |
-
n = (pred[:, -1] == c).sum() # detections per class
|
1089 |
-
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
|
1090 |
-
s = s.rstrip(", ")
|
1091 |
-
if show or save or render or crop:
|
1092 |
-
annotator = Annotator(im, example=str(self.names))
|
1093 |
-
for *box, conf, cls in reversed(
|
1094 |
-
pred
|
1095 |
-
): # xyxy, confidence, class
|
1096 |
-
label = f"{self.names[int(cls)]} {conf:.2f}"
|
1097 |
-
if crop:
|
1098 |
-
file = (
|
1099 |
-
save_dir
|
1100 |
-
/ "crops"
|
1101 |
-
/ self.names[int(cls)]
|
1102 |
-
/ self.files[i]
|
1103 |
-
if save
|
1104 |
-
else None
|
1105 |
-
)
|
1106 |
-
crops.append(
|
1107 |
-
{
|
1108 |
-
"box": box,
|
1109 |
-
"conf": conf,
|
1110 |
-
"cls": cls,
|
1111 |
-
"label": label,
|
1112 |
-
"im": save_one_box(
|
1113 |
-
box, im, file=file, save=save
|
1114 |
-
),
|
1115 |
-
}
|
1116 |
-
)
|
1117 |
-
else: # all others
|
1118 |
-
annotator.box_label(
|
1119 |
-
box, label if labels else "", color=colors(cls)
|
1120 |
-
)
|
1121 |
-
im = annotator.im
|
1122 |
-
else:
|
1123 |
-
s += "(no detections)"
|
1124 |
-
|
1125 |
-
im = (
|
1126 |
-
Image.fromarray(im.astype(np.uint8))
|
1127 |
-
if isinstance(im, np.ndarray)
|
1128 |
-
else im
|
1129 |
-
) # from np
|
1130 |
-
if show:
|
1131 |
-
display(im) if is_notebook() else im.show(self.files[i])
|
1132 |
-
if save:
|
1133 |
-
f = self.files[i]
|
1134 |
-
im.save(save_dir / f) # save
|
1135 |
-
if i == self.n - 1:
|
1136 |
-
LOGGER.info(
|
1137 |
-
f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}"
|
1138 |
-
)
|
1139 |
-
if render:
|
1140 |
-
self.ims[i] = np.asarray(im)
|
1141 |
-
if pprint:
|
1142 |
-
s = s.lstrip("\n")
|
1143 |
-
return (
|
1144 |
-
f"{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}"
|
1145 |
-
% self.t
|
1146 |
-
)
|
1147 |
-
if crop:
|
1148 |
-
if save:
|
1149 |
-
LOGGER.info(f"Saved results to {save_dir}\n")
|
1150 |
-
return crops
|
1151 |
-
|
1152 |
-
@TryExcept("Showing images is not supported in this environment")
|
1153 |
-
def show(self, labels=True):
|
1154 |
-
self._run(show=True, labels=labels) # show results
|
1155 |
-
|
1156 |
-
def save(self, labels=True, save_dir="runs/detect/exp", exist_ok=False):
|
1157 |
-
save_dir = increment_path(
|
1158 |
-
save_dir, exist_ok, mkdir=True
|
1159 |
-
) # increment save_dir
|
1160 |
-
self._run(save=True, labels=labels, save_dir=save_dir) # save results
|
1161 |
-
|
1162 |
-
def crop(self, save=True, save_dir="runs/detect/exp", exist_ok=False):
|
1163 |
-
save_dir = (
|
1164 |
-
increment_path(save_dir, exist_ok, mkdir=True) if save else None
|
1165 |
-
)
|
1166 |
-
return self._run(
|
1167 |
-
crop=True, save=save, save_dir=save_dir
|
1168 |
-
) # crop results
|
1169 |
-
|
1170 |
-
def render(self, labels=True):
|
1171 |
-
self._run(render=True, labels=labels) # render results
|
1172 |
-
return self.ims
|
1173 |
-
|
1174 |
-
def pandas(self):
|
1175 |
-
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
|
1176 |
-
new = copy(self) # return copy
|
1177 |
-
ca = (
|
1178 |
-
"xmin",
|
1179 |
-
"ymin",
|
1180 |
-
"xmax",
|
1181 |
-
"ymax",
|
1182 |
-
"confidence",
|
1183 |
-
"class",
|
1184 |
-
"name",
|
1185 |
-
) # xyxy columns
|
1186 |
-
cb = (
|
1187 |
-
"xcenter",
|
1188 |
-
"ycenter",
|
1189 |
-
"width",
|
1190 |
-
"height",
|
1191 |
-
"confidence",
|
1192 |
-
"class",
|
1193 |
-
"name",
|
1194 |
-
) # xywh columns
|
1195 |
-
for k, c in zip(["xyxy", "xyxyn", "xywh", "xywhn"], [ca, ca, cb, cb]):
|
1196 |
-
a = [
|
1197 |
-
[
|
1198 |
-
x[:5] + [int(x[5]), self.names[int(x[5])]]
|
1199 |
-
for x in x.tolist()
|
1200 |
-
]
|
1201 |
-
for x in getattr(self, k)
|
1202 |
-
] # update
|
1203 |
-
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
|
1204 |
-
return new
|
1205 |
-
|
1206 |
-
def tolist(self):
|
1207 |
-
# return a list of Detections objects, i.e. 'for result in results.tolist():'
|
1208 |
-
r = range(self.n) # iterable
|
1209 |
-
x = [
|
1210 |
-
Detections(
|
1211 |
-
[self.ims[i]],
|
1212 |
-
[self.pred[i]],
|
1213 |
-
[self.files[i]],
|
1214 |
-
self.times,
|
1215 |
-
self.names,
|
1216 |
-
self.s,
|
1217 |
-
)
|
1218 |
-
for i in r
|
1219 |
-
]
|
1220 |
-
# for d in x:
|
1221 |
-
# for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
|
1222 |
-
# setattr(d, k, getattr(d, k)[0]) # pop out of list
|
1223 |
-
return x
|
1224 |
-
|
1225 |
-
def print(self):
|
1226 |
-
LOGGER.info(self.__str__())
|
1227 |
-
|
1228 |
-
def __len__(self): # override len(results)
|
1229 |
-
return self.n
|
1230 |
-
|
1231 |
-
def __str__(self): # override print(results)
|
1232 |
-
return self._run(pprint=True) # print results
|
1233 |
-
|
1234 |
-
def __repr__(self):
|
1235 |
-
return f"YOLOv5 {self.__class__} instance\n" + self.__str__()
|
1236 |
-
|
1237 |
-
|
1238 |
-
class Proto(nn.Module):
|
1239 |
-
# YOLOv5 mask Proto module for segmentation models
|
1240 |
-
def __init__(
|
1241 |
-
self, c1, c_=256, c2=32
|
1242 |
-
): # ch_in, number of protos, number of masks
|
1243 |
-
super().__init__()
|
1244 |
-
self.cv1 = Conv(c1, c_, k=3)
|
1245 |
-
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
|
1246 |
-
self.cv2 = Conv(c_, c_, k=3)
|
1247 |
-
self.cv3 = Conv(c_, c2)
|
1248 |
-
|
1249 |
-
def forward(self, x):
|
1250 |
-
return self.cv3(self.cv2(self.upsample(self.cv1(x))))
|
1251 |
-
|
1252 |
-
|
1253 |
-
class Classify(nn.Module):
|
1254 |
-
# YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
|
1255 |
-
def __init__(
|
1256 |
-
self, c1, c2, k=1, s=1, p=None, g=1
|
1257 |
-
): # ch_in, ch_out, kernel, stride, padding, groups
|
1258 |
-
super().__init__()
|
1259 |
-
c_ = 1280 # efficientnet_b0 size
|
1260 |
-
self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
|
1261 |
-
self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
|
1262 |
-
self.drop = nn.Dropout(p=0.0, inplace=True)
|
1263 |
-
self.linear = nn.Linear(c_, c2) # to x(b,c2)
|
1264 |
-
|
1265 |
-
def forward(self, x):
|
1266 |
-
if isinstance(x, list):
|
1267 |
-
x = torch.cat(x, 1)
|
1268 |
-
return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/utils/flask_rest_api/restapi.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Run a Flask REST API exposing one or more YOLOv5s models
|
4 |
-
"""
|
5 |
-
|
6 |
-
import argparse
|
7 |
-
import io
|
8 |
-
|
9 |
-
import torch
|
10 |
-
from flask import Flask, request
|
11 |
-
from PIL import Image
|
12 |
-
|
13 |
-
app = Flask(__name__)
|
14 |
-
models = {}
|
15 |
-
|
16 |
-
DETECTION_URL = "/v1/object-detection/<model>"
|
17 |
-
|
18 |
-
|
19 |
-
@app.route(DETECTION_URL, methods=["POST"])
|
20 |
-
def predict(model):
|
21 |
-
if request.method != "POST":
|
22 |
-
return
|
23 |
-
|
24 |
-
if request.files.get("image"):
|
25 |
-
# Method 1
|
26 |
-
# with request.files["image"] as f:
|
27 |
-
# im = Image.open(io.BytesIO(f.read()))
|
28 |
-
|
29 |
-
# Method 2
|
30 |
-
im_file = request.files["image"]
|
31 |
-
im_bytes = im_file.read()
|
32 |
-
im = Image.open(io.BytesIO(im_bytes))
|
33 |
-
|
34 |
-
if model in models:
|
35 |
-
results = models[model](
|
36 |
-
im, size=640
|
37 |
-
) # reduce size=320 for faster inference
|
38 |
-
return results.pandas().xyxy[0].to_json(orient="records")
|
39 |
-
|
40 |
-
|
41 |
-
if __name__ == "__main__":
|
42 |
-
parser = argparse.ArgumentParser(
|
43 |
-
description="Flask API exposing YOLOv5 model"
|
44 |
-
)
|
45 |
-
parser.add_argument("--port", default=5000, type=int, help="port number")
|
46 |
-
parser.add_argument(
|
47 |
-
"--model",
|
48 |
-
nargs="+",
|
49 |
-
default=["yolov5s"],
|
50 |
-
help="model(s) to run, i.e. --model yolov5n yolov5s",
|
51 |
-
)
|
52 |
-
opt = parser.parse_args()
|
53 |
-
|
54 |
-
for m in opt.model:
|
55 |
-
models[m] = torch.hub.load(
|
56 |
-
"ultralytics/yolov5", m, force_reload=True, skip_validation=True
|
57 |
-
)
|
58 |
-
|
59 |
-
app.run(
|
60 |
-
host="0.0.0.0", port=opt.port
|
61 |
-
) # debug=True causes Restarting with stat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/overview.md
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Schedulers
|
14 |
-
|
15 |
-
Diffusers contains multiple pre-built schedule functions for the diffusion process.
|
16 |
-
|
17 |
-
## What is a scheduler?
|
18 |
-
|
19 |
-
The schedule functions, denoted *Schedulers* in the library take in the output of a trained model, a sample which the diffusion process is iterating on, and a timestep to return a denoised sample. That's why schedulers may also be called *Samplers* in other diffusion models implementations.
|
20 |
-
|
21 |
-
- Schedulers define the methodology for iteratively adding noise to an image or for updating a sample based on model outputs.
|
22 |
-
- adding noise in different manners represent the algorithmic processes to train a diffusion model by adding noise to images.
|
23 |
-
- for inference, the scheduler defines how to update a sample based on an output from a pretrained model.
|
24 |
-
- Schedulers are often defined by a *noise schedule* and an *update rule* to solve the differential equation solution.
|
25 |
-
|
26 |
-
### Discrete versus continuous schedulers
|
27 |
-
|
28 |
-
All schedulers take in a timestep to predict the updated version of the sample being diffused.
|
29 |
-
The timesteps dictate where in the diffusion process the step is, where data is generated by iterating forward in time and inference is executed by propagating backwards through timesteps.
|
30 |
-
Different algorithms use timesteps that can be discrete (accepting `int` inputs), such as the [`DDPMScheduler`] or [`PNDMScheduler`], or continuous (accepting `float` inputs), such as the score-based schedulers [`ScoreSdeVeScheduler`] or [`ScoreSdeVpScheduler`].
|
31 |
-
|
32 |
-
## Designing Re-usable schedulers
|
33 |
-
|
34 |
-
The core design principle between the schedule functions is to be model, system, and framework independent.
|
35 |
-
This allows for rapid experimentation and cleaner abstractions in the code, where the model prediction is separated from the sample update.
|
36 |
-
To this end, the design of schedulers is such that:
|
37 |
-
|
38 |
-
- Schedulers can be used interchangeably between diffusion models in inference to find the preferred trade-off between speed and generation quality.
|
39 |
-
- Schedulers are currently by default in PyTorch, but are designed to be framework independent (partial Jax support currently exists).
|
40 |
-
- Many diffusion pipelines, such as [`StableDiffusionPipeline`] and [`DiTPipeline`] can use any of [`KarrasDiffusionSchedulers`]
|
41 |
-
|
42 |
-
## Schedulers Summary
|
43 |
-
|
44 |
-
The following table summarizes all officially supported schedulers, their corresponding paper
|
45 |
-
|
46 |
-
| Scheduler | Paper |
|
47 |
-
|---|---|
|
48 |
-
| [ddim](./ddim) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) |
|
49 |
-
| [ddim_inverse](./ddim_inverse) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) |
|
50 |
-
| [ddpm](./ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) |
|
51 |
-
| [deis](./deis) | [**DEISMultistepScheduler**](https://arxiv.org/abs/2204.13902) |
|
52 |
-
| [singlestep_dpm_solver](./singlestep_dpm_solver) | [**Singlestep DPM-Solver**](https://arxiv.org/abs/2206.00927) |
|
53 |
-
| [multistep_dpm_solver](./multistep_dpm_solver) | [**Multistep DPM-Solver**](https://arxiv.org/abs/2206.00927) |
|
54 |
-
| [heun](./heun) | [**Heun scheduler inspired by Karras et. al paper**](https://arxiv.org/abs/2206.00364) |
|
55 |
-
| [dpm_discrete](./dpm_discrete) | [**DPM Discrete Scheduler inspired by Karras et. al paper**](https://arxiv.org/abs/2206.00364) |
|
56 |
-
| [dpm_discrete_ancestral](./dpm_discrete_ancestral) | [**DPM Discrete Scheduler with ancestral sampling inspired by Karras et. al paper**](https://arxiv.org/abs/2206.00364) |
|
57 |
-
| [stochastic_karras_ve](./stochastic_karras_ve) | [**Variance exploding, stochastic sampling from Karras et. al**](https://arxiv.org/abs/2206.00364) |
|
58 |
-
| [lms_discrete](./lms_discrete) | [**Linear multistep scheduler for discrete beta schedules**](https://arxiv.org/abs/2206.00364) |
|
59 |
-
| [pndm](./pndm) | [**Pseudo numerical methods for diffusion models (PNDM)**](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181) |
|
60 |
-
| [score_sde_ve](./score_sde_ve) | [**variance exploding stochastic differential equation (VE-SDE) scheduler**](https://arxiv.org/abs/2011.13456) |
|
61 |
-
| [ipndm](./ipndm) | [**improved pseudo numerical methods for diffusion models (iPNDM)**](https://github.com/crowsonkb/v-diffusion-pytorch/blob/987f8985e38208345c1959b0ea767a625831cc9b/diffusion/sampling.py#L296) |
|
62 |
-
| [score_sde_vp](./score_sde_vp) | [**Variance preserving stochastic differential equation (VP-SDE) scheduler**](https://arxiv.org/abs/2011.13456) |
|
63 |
-
| [euler](./euler) | [**Euler scheduler**](https://arxiv.org/abs/2206.00364) |
|
64 |
-
| [euler_ancestral](./euler_ancestral) | [**Euler Ancestral scheduler**](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L72) |
|
65 |
-
| [vq_diffusion](./vq_diffusion) | [**VQDiffusionScheduler**](https://arxiv.org/abs/2111.14822) |
|
66 |
-
| [unipc](./unipc) | [**UniPCMultistepScheduler**](https://arxiv.org/abs/2302.04867) |
|
67 |
-
| [repaint](./repaint) | [**RePaint scheduler**](https://arxiv.org/abs/2201.09865) |
|
68 |
-
|
69 |
-
## API
|
70 |
-
|
71 |
-
The core API for any new scheduler must follow a limited structure.
|
72 |
-
- Schedulers should provide one or more `def step(...)` functions that should be called to update the generated sample iteratively.
|
73 |
-
- Schedulers should provide a `set_timesteps(...)` method that configures the parameters of a schedule function for a specific inference task.
|
74 |
-
- Schedulers should be framework-specific.
|
75 |
-
|
76 |
-
The base class [`SchedulerMixin`] implements low level utilities used by multiple schedulers.
|
77 |
-
|
78 |
-
### SchedulerMixin
|
79 |
-
[[autodoc]] SchedulerMixin
|
80 |
-
|
81 |
-
### SchedulerOutput
|
82 |
-
The class [`SchedulerOutput`] contains the outputs from any schedulers `step(...)` call.
|
83 |
-
|
84 |
-
[[autodoc]] schedulers.scheduling_utils.SchedulerOutput
|
85 |
-
|
86 |
-
### KarrasDiffusionSchedulers
|
87 |
-
|
88 |
-
`KarrasDiffusionSchedulers` encompasses the main generalization of schedulers in Diffusers. The schedulers in this class are distinguished, at a high level, by their noise sampling strategy; the type of network and scaling; and finally the training strategy or how the loss is weighed.
|
89 |
-
|
90 |
-
The different schedulers, depending on the type of ODE solver, fall into the above taxonomy and provide a good abstraction for the design of the main schedulers implemented in Diffusers. The schedulers in this class are given below:
|
91 |
-
|
92 |
-
[[autodoc]] schedulers.scheduling_utils.KarrasDiffusionSchedulers
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
norm_cfg = dict(type='BN', requires_grad=False)
|
3 |
-
model = dict(
|
4 |
-
type='FasterRCNN',
|
5 |
-
pretrained='open-mmlab://detectron2/resnet50_caffe',
|
6 |
-
backbone=dict(
|
7 |
-
type='ResNet',
|
8 |
-
depth=50,
|
9 |
-
num_stages=3,
|
10 |
-
strides=(1, 2, 2),
|
11 |
-
dilations=(1, 1, 1),
|
12 |
-
out_indices=(2, ),
|
13 |
-
frozen_stages=1,
|
14 |
-
norm_cfg=norm_cfg,
|
15 |
-
norm_eval=True,
|
16 |
-
style='caffe'),
|
17 |
-
rpn_head=dict(
|
18 |
-
type='RPNHead',
|
19 |
-
in_channels=1024,
|
20 |
-
feat_channels=1024,
|
21 |
-
anchor_generator=dict(
|
22 |
-
type='AnchorGenerator',
|
23 |
-
scales=[2, 4, 8, 16, 32],
|
24 |
-
ratios=[0.5, 1.0, 2.0],
|
25 |
-
strides=[16]),
|
26 |
-
bbox_coder=dict(
|
27 |
-
type='DeltaXYWHBBoxCoder',
|
28 |
-
target_means=[.0, .0, .0, .0],
|
29 |
-
target_stds=[1.0, 1.0, 1.0, 1.0]),
|
30 |
-
loss_cls=dict(
|
31 |
-
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
|
32 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
|
33 |
-
roi_head=dict(
|
34 |
-
type='StandardRoIHead',
|
35 |
-
shared_head=dict(
|
36 |
-
type='ResLayer',
|
37 |
-
depth=50,
|
38 |
-
stage=3,
|
39 |
-
stride=2,
|
40 |
-
dilation=1,
|
41 |
-
style='caffe',
|
42 |
-
norm_cfg=norm_cfg,
|
43 |
-
norm_eval=True),
|
44 |
-
bbox_roi_extractor=dict(
|
45 |
-
type='SingleRoIExtractor',
|
46 |
-
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
|
47 |
-
out_channels=1024,
|
48 |
-
featmap_strides=[16]),
|
49 |
-
bbox_head=dict(
|
50 |
-
type='BBoxHead',
|
51 |
-
with_avg_pool=True,
|
52 |
-
roi_feat_size=7,
|
53 |
-
in_channels=2048,
|
54 |
-
num_classes=80,
|
55 |
-
bbox_coder=dict(
|
56 |
-
type='DeltaXYWHBBoxCoder',
|
57 |
-
target_means=[0., 0., 0., 0.],
|
58 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
59 |
-
reg_class_agnostic=False,
|
60 |
-
loss_cls=dict(
|
61 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
62 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
|
63 |
-
# model training and testing settings
|
64 |
-
train_cfg=dict(
|
65 |
-
rpn=dict(
|
66 |
-
assigner=dict(
|
67 |
-
type='MaxIoUAssigner',
|
68 |
-
pos_iou_thr=0.7,
|
69 |
-
neg_iou_thr=0.3,
|
70 |
-
min_pos_iou=0.3,
|
71 |
-
match_low_quality=True,
|
72 |
-
ignore_iof_thr=-1),
|
73 |
-
sampler=dict(
|
74 |
-
type='RandomSampler',
|
75 |
-
num=256,
|
76 |
-
pos_fraction=0.5,
|
77 |
-
neg_pos_ub=-1,
|
78 |
-
add_gt_as_proposals=False),
|
79 |
-
allowed_border=0,
|
80 |
-
pos_weight=-1,
|
81 |
-
debug=False),
|
82 |
-
rpn_proposal=dict(
|
83 |
-
nms_pre=12000,
|
84 |
-
max_per_img=2000,
|
85 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
86 |
-
min_bbox_size=0),
|
87 |
-
rcnn=dict(
|
88 |
-
assigner=dict(
|
89 |
-
type='MaxIoUAssigner',
|
90 |
-
pos_iou_thr=0.5,
|
91 |
-
neg_iou_thr=0.5,
|
92 |
-
min_pos_iou=0.5,
|
93 |
-
match_low_quality=False,
|
94 |
-
ignore_iof_thr=-1),
|
95 |
-
sampler=dict(
|
96 |
-
type='RandomSampler',
|
97 |
-
num=512,
|
98 |
-
pos_fraction=0.25,
|
99 |
-
neg_pos_ub=-1,
|
100 |
-
add_gt_as_proposals=True),
|
101 |
-
pos_weight=-1,
|
102 |
-
debug=False)),
|
103 |
-
test_cfg=dict(
|
104 |
-
rpn=dict(
|
105 |
-
nms_pre=6000,
|
106 |
-
max_per_img=1000,
|
107 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
108 |
-
min_bbox_size=0),
|
109 |
-
rcnn=dict(
|
110 |
-
score_thr=0.05,
|
111 |
-
nms=dict(type='nms', iou_threshold=0.5),
|
112 |
-
max_per_img=100)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/data_processor.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This module is responsible for processing the corpus and feeding it into chromaDB. It will receive a corpus of text.
|
3 |
-
It will then split it into chunks of specified length. For each of those chunks, it will append surrounding context.
|
4 |
-
It will only include full words.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import re
|
8 |
-
import bisect
|
9 |
-
|
10 |
-
import extensions.superboogav2.parameters as parameters
|
11 |
-
|
12 |
-
from .data_preprocessor import TextPreprocessorBuilder, TextSummarizer
|
13 |
-
from .chromadb import ChromaCollector
|
14 |
-
|
15 |
-
def preprocess_text_no_summary(text) -> str:
|
16 |
-
builder = TextPreprocessorBuilder(text)
|
17 |
-
if parameters.should_to_lower():
|
18 |
-
builder.to_lower()
|
19 |
-
|
20 |
-
if parameters.should_remove_punctuation():
|
21 |
-
builder.remove_punctuation()
|
22 |
-
|
23 |
-
if parameters.should_remove_specific_pos():
|
24 |
-
builder.remove_specific_pos()
|
25 |
-
|
26 |
-
if parameters.should_remove_stopwords():
|
27 |
-
builder.remove_stopwords
|
28 |
-
|
29 |
-
if parameters.should_lemmatize():
|
30 |
-
builder.lemmatize()
|
31 |
-
|
32 |
-
if parameters.should_merge_spaces():
|
33 |
-
builder.merge_spaces
|
34 |
-
|
35 |
-
if parameters.should_strip():
|
36 |
-
builder.strip()
|
37 |
-
|
38 |
-
if parameters.get_num_conversion_strategy():
|
39 |
-
if parameters.get_num_conversion_strategy() == parameters.NUM_TO_WORD_METHOD:
|
40 |
-
builder.num_to_word(parameters.get_min_num_length())
|
41 |
-
elif parameters.get_num_conversion_strategy() == parameters.NUM_TO_CHAR_METHOD:
|
42 |
-
builder.num_to_char(parameters.get_min_num_length())
|
43 |
-
elif parameters.get_num_conversion_strategy() == parameters.NUM_TO_CHAR_LONG_METHOD:
|
44 |
-
builder.num_to_char_long(parameters.get_min_num_length())
|
45 |
-
|
46 |
-
return builder.build()
|
47 |
-
|
48 |
-
|
49 |
-
def preprocess_text(text) -> list[str]:
|
50 |
-
important_sentences = TextSummarizer.process_long_text(text, parameters.get_min_num_sentences())
|
51 |
-
return [preprocess_text_no_summary(sent) for sent in important_sentences]
|
52 |
-
|
53 |
-
|
54 |
-
def _create_chunks_with_context(corpus, chunk_len, context_left, context_right):
|
55 |
-
"""
|
56 |
-
This function takes a corpus of text and splits it into chunks of a specified length,
|
57 |
-
then adds a specified amount of context to each chunk. The context is added by first
|
58 |
-
going backwards from the start of the chunk and then going forwards from the end of the
|
59 |
-
chunk, ensuring that the context includes only whole words and that the total context length
|
60 |
-
does not exceed the specified limit. This function uses binary search for efficiency.
|
61 |
-
|
62 |
-
Returns:
|
63 |
-
chunks (list of str): The chunks of text.
|
64 |
-
chunks_with_context (list of str): The chunks of text with added context.
|
65 |
-
chunk_with_context_start_indices (list of int): The starting indices of each chunk with context in the corpus.
|
66 |
-
"""
|
67 |
-
words = re.split('(\\s+)', corpus)
|
68 |
-
word_start_indices = [0]
|
69 |
-
current_index = 0
|
70 |
-
|
71 |
-
for word in words:
|
72 |
-
current_index += len(word)
|
73 |
-
word_start_indices.append(current_index)
|
74 |
-
|
75 |
-
chunks, chunk_lengths, chunk_start_indices, chunk_with_context_start_indices = [], [], [], []
|
76 |
-
current_length = 0
|
77 |
-
current_index = 0
|
78 |
-
chunk = []
|
79 |
-
|
80 |
-
for word in words:
|
81 |
-
if current_length + len(word) > chunk_len:
|
82 |
-
chunks.append(''.join(chunk))
|
83 |
-
chunk_lengths.append(current_length)
|
84 |
-
chunk_start_indices.append(current_index - current_length)
|
85 |
-
chunk = [word]
|
86 |
-
current_length = len(word)
|
87 |
-
else:
|
88 |
-
chunk.append(word)
|
89 |
-
current_length += len(word)
|
90 |
-
current_index += len(word)
|
91 |
-
|
92 |
-
if chunk:
|
93 |
-
chunks.append(''.join(chunk))
|
94 |
-
chunk_lengths.append(current_length)
|
95 |
-
chunk_start_indices.append(current_index - current_length)
|
96 |
-
|
97 |
-
chunks_with_context = []
|
98 |
-
for start_index, chunk_length in zip(chunk_start_indices, chunk_lengths):
|
99 |
-
context_start_index = bisect.bisect_right(word_start_indices, start_index - context_left)
|
100 |
-
context_end_index = bisect.bisect_left(word_start_indices, start_index + chunk_length + context_right)
|
101 |
-
|
102 |
-
# Combine all the words in the context range (before, chunk, and after)
|
103 |
-
chunk_with_context = ''.join(words[context_start_index:context_end_index])
|
104 |
-
chunks_with_context.append(chunk_with_context)
|
105 |
-
|
106 |
-
# Determine the start index of the chunk with context
|
107 |
-
chunk_with_context_start_index = word_start_indices[context_start_index]
|
108 |
-
chunk_with_context_start_indices.append(chunk_with_context_start_index)
|
109 |
-
|
110 |
-
return chunks, chunks_with_context, chunk_with_context_start_indices
|
111 |
-
|
112 |
-
|
113 |
-
def _clear_chunks(data_chunks, data_chunks_with_context, data_chunk_starting_indices):
|
114 |
-
distinct_data_chunks = []
|
115 |
-
distinct_data_chunks_with_context = []
|
116 |
-
distinct_data_chunk_starting_indices = []
|
117 |
-
|
118 |
-
seen_chunks = dict()
|
119 |
-
|
120 |
-
for chunk, context, index in zip(data_chunks, data_chunks_with_context, data_chunk_starting_indices):
|
121 |
-
# Skip the chunk if it does not contain any alphanumeric characters
|
122 |
-
if not any(char.isalnum() for char in chunk):
|
123 |
-
continue
|
124 |
-
|
125 |
-
seen_chunk_start = seen_chunks.get(chunk)
|
126 |
-
if seen_chunk_start:
|
127 |
-
# If we've already seen this exact chunk, and the context around it it very close to the seen chunk, then skip it.
|
128 |
-
if abs(seen_chunk_start-index) < parameters.get_delta_start():
|
129 |
-
continue
|
130 |
-
|
131 |
-
distinct_data_chunks.append(chunk)
|
132 |
-
distinct_data_chunks_with_context.append(context)
|
133 |
-
distinct_data_chunk_starting_indices.append(index)
|
134 |
-
|
135 |
-
seen_chunks[chunk] = index
|
136 |
-
|
137 |
-
return distinct_data_chunks, distinct_data_chunks_with_context, distinct_data_chunk_starting_indices
|
138 |
-
|
139 |
-
|
140 |
-
def process_and_add_to_collector(corpus: str, collector: ChromaCollector, clear_collector_before_adding: bool, metadata: dict):
|
141 |
-
# Defining variables
|
142 |
-
chunk_lens = [int(len.strip()) for len in parameters.get_chunk_len().split(',')]
|
143 |
-
context_len = [int(len.strip()) for len in parameters.get_context_len().split(',')]
|
144 |
-
if len(context_len) >= 3:
|
145 |
-
raise f"Context len has too many values: {len(context_len)}"
|
146 |
-
if len(context_len) == 2:
|
147 |
-
context_left = context_len[0]
|
148 |
-
context_right = context_len[1]
|
149 |
-
else:
|
150 |
-
context_left = context_right = context_len[0]
|
151 |
-
|
152 |
-
data_chunks = []
|
153 |
-
data_chunks_with_context = []
|
154 |
-
data_chunk_starting_indices = []
|
155 |
-
|
156 |
-
# Handling chunk_regex
|
157 |
-
if parameters.get_chunk_regex():
|
158 |
-
if parameters.get_chunk_separator():
|
159 |
-
cumulative_length = 0 # This variable will store the length of the processed corpus
|
160 |
-
sections = corpus.split(parameters.get_chunk_separator())
|
161 |
-
for section in sections:
|
162 |
-
special_chunks = list(re.finditer(parameters.get_chunk_regex(), section))
|
163 |
-
for match in special_chunks:
|
164 |
-
chunk = match.group(0)
|
165 |
-
start_index = match.start()
|
166 |
-
end_index = start_index + len(chunk)
|
167 |
-
context = section[max(0, start_index - context_left):min(len(section), end_index + context_right)]
|
168 |
-
data_chunks.append(chunk)
|
169 |
-
data_chunks_with_context.append(context)
|
170 |
-
data_chunk_starting_indices.append(cumulative_length + max(0, start_index - context_left))
|
171 |
-
cumulative_length += len(section) + len(parameters.get_chunk_separator()) # Update the length of the processed corpus
|
172 |
-
else:
|
173 |
-
special_chunks = list(re.finditer(parameters.get_chunk_regex(), corpus))
|
174 |
-
for match in special_chunks:
|
175 |
-
chunk = match.group(0)
|
176 |
-
start_index = match.start()
|
177 |
-
end_index = start_index + len(chunk)
|
178 |
-
context = corpus[max(0, start_index - context_left):min(len(corpus), end_index + context_right)]
|
179 |
-
data_chunks.append(chunk)
|
180 |
-
data_chunks_with_context.append(context)
|
181 |
-
data_chunk_starting_indices.append(max(0, start_index - context_left))
|
182 |
-
|
183 |
-
for chunk_len in chunk_lens:
|
184 |
-
# Breaking the data into chunks and adding those to the db
|
185 |
-
if parameters.get_chunk_separator():
|
186 |
-
cumulative_length = 0 # This variable will store the length of the processed corpus
|
187 |
-
sections = corpus.split(parameters.get_chunk_separator())
|
188 |
-
for section in sections:
|
189 |
-
chunks, chunks_with_context, context_start_indices = _create_chunks_with_context(section, chunk_len, context_left, context_right)
|
190 |
-
context_start_indices = [cumulative_length + i for i in context_start_indices] # Add the length of the processed corpus to each start index
|
191 |
-
data_chunks.extend(chunks)
|
192 |
-
data_chunks_with_context.extend(chunks_with_context)
|
193 |
-
data_chunk_starting_indices.extend(context_start_indices)
|
194 |
-
cumulative_length += len(section) + len(parameters.get_chunk_separator()) # Update the length of the processed corpus
|
195 |
-
else:
|
196 |
-
chunks, chunks_with_context, context_start_indices = _create_chunks_with_context(corpus, chunk_len, context_left, context_right)
|
197 |
-
data_chunks.extend(chunks)
|
198 |
-
data_chunks_with_context.extend(chunks_with_context)
|
199 |
-
data_chunk_starting_indices.extend(context_start_indices)
|
200 |
-
|
201 |
-
data_chunks = [preprocess_text_no_summary(chunk) for chunk in data_chunks]
|
202 |
-
|
203 |
-
data_chunks, data_chunks_with_context, data_chunk_starting_indices = _clear_chunks(
|
204 |
-
data_chunks, data_chunks_with_context, data_chunk_starting_indices
|
205 |
-
)
|
206 |
-
|
207 |
-
if clear_collector_before_adding:
|
208 |
-
collector.clear()
|
209 |
-
collector.add(data_chunks, data_chunks_with_context, data_chunk_starting_indices, [metadata]*len(data_chunks) if metadata is not None else None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Archan/ArXivAudio/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ArXiv Audio
|
3 |
-
emoji: 🖨️
|
4 |
-
colorFrom: cyan
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.10.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/filesystem.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
import fnmatch
|
2 |
-
import os
|
3 |
-
import os.path
|
4 |
-
import random
|
5 |
-
import sys
|
6 |
-
from contextlib import contextmanager
|
7 |
-
from tempfile import NamedTemporaryFile
|
8 |
-
from typing import Any, BinaryIO, Generator, List, Union, cast
|
9 |
-
|
10 |
-
from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
|
11 |
-
|
12 |
-
from pip._internal.utils.compat import get_path_uid
|
13 |
-
from pip._internal.utils.misc import format_size
|
14 |
-
|
15 |
-
|
16 |
-
def check_path_owner(path: str) -> bool:
|
17 |
-
# If we don't have a way to check the effective uid of this process, then
|
18 |
-
# we'll just assume that we own the directory.
|
19 |
-
if sys.platform == "win32" or not hasattr(os, "geteuid"):
|
20 |
-
return True
|
21 |
-
|
22 |
-
assert os.path.isabs(path)
|
23 |
-
|
24 |
-
previous = None
|
25 |
-
while path != previous:
|
26 |
-
if os.path.lexists(path):
|
27 |
-
# Check if path is writable by current user.
|
28 |
-
if os.geteuid() == 0:
|
29 |
-
# Special handling for root user in order to handle properly
|
30 |
-
# cases where users use sudo without -H flag.
|
31 |
-
try:
|
32 |
-
path_uid = get_path_uid(path)
|
33 |
-
except OSError:
|
34 |
-
return False
|
35 |
-
return path_uid == 0
|
36 |
-
else:
|
37 |
-
return os.access(path, os.W_OK)
|
38 |
-
else:
|
39 |
-
previous, path = path, os.path.dirname(path)
|
40 |
-
return False # assume we don't own the path
|
41 |
-
|
42 |
-
|
43 |
-
@contextmanager
|
44 |
-
def adjacent_tmp_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
|
45 |
-
"""Return a file-like object pointing to a tmp file next to path.
|
46 |
-
|
47 |
-
The file is created securely and is ensured to be written to disk
|
48 |
-
after the context reaches its end.
|
49 |
-
|
50 |
-
kwargs will be passed to tempfile.NamedTemporaryFile to control
|
51 |
-
the way the temporary file will be opened.
|
52 |
-
"""
|
53 |
-
with NamedTemporaryFile(
|
54 |
-
delete=False,
|
55 |
-
dir=os.path.dirname(path),
|
56 |
-
prefix=os.path.basename(path),
|
57 |
-
suffix=".tmp",
|
58 |
-
**kwargs,
|
59 |
-
) as f:
|
60 |
-
result = cast(BinaryIO, f)
|
61 |
-
try:
|
62 |
-
yield result
|
63 |
-
finally:
|
64 |
-
result.flush()
|
65 |
-
os.fsync(result.fileno())
|
66 |
-
|
67 |
-
|
68 |
-
# Tenacity raises RetryError by default, explicitly raise the original exception
|
69 |
-
_replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25))
|
70 |
-
|
71 |
-
replace = _replace_retry(os.replace)
|
72 |
-
|
73 |
-
|
74 |
-
# test_writable_dir and _test_writable_dir_win are copied from Flit,
|
75 |
-
# with the author's agreement to also place them under pip's license.
|
76 |
-
def test_writable_dir(path: str) -> bool:
|
77 |
-
"""Check if a directory is writable.
|
78 |
-
|
79 |
-
Uses os.access() on POSIX, tries creating files on Windows.
|
80 |
-
"""
|
81 |
-
# If the directory doesn't exist, find the closest parent that does.
|
82 |
-
while not os.path.isdir(path):
|
83 |
-
parent = os.path.dirname(path)
|
84 |
-
if parent == path:
|
85 |
-
break # Should never get here, but infinite loops are bad
|
86 |
-
path = parent
|
87 |
-
|
88 |
-
if os.name == "posix":
|
89 |
-
return os.access(path, os.W_OK)
|
90 |
-
|
91 |
-
return _test_writable_dir_win(path)
|
92 |
-
|
93 |
-
|
94 |
-
def _test_writable_dir_win(path: str) -> bool:
|
95 |
-
# os.access doesn't work on Windows: http://bugs.python.org/issue2528
|
96 |
-
# and we can't use tempfile: http://bugs.python.org/issue22107
|
97 |
-
basename = "accesstest_deleteme_fishfingers_custard_"
|
98 |
-
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
|
99 |
-
for _ in range(10):
|
100 |
-
name = basename + "".join(random.choice(alphabet) for _ in range(6))
|
101 |
-
file = os.path.join(path, name)
|
102 |
-
try:
|
103 |
-
fd = os.open(file, os.O_RDWR | os.O_CREAT | os.O_EXCL)
|
104 |
-
except FileExistsError:
|
105 |
-
pass
|
106 |
-
except PermissionError:
|
107 |
-
# This could be because there's a directory with the same name.
|
108 |
-
# But it's highly unlikely there's a directory called that,
|
109 |
-
# so we'll assume it's because the parent dir is not writable.
|
110 |
-
# This could as well be because the parent dir is not readable,
|
111 |
-
# due to non-privileged user access.
|
112 |
-
return False
|
113 |
-
else:
|
114 |
-
os.close(fd)
|
115 |
-
os.unlink(file)
|
116 |
-
return True
|
117 |
-
|
118 |
-
# This should never be reached
|
119 |
-
raise OSError("Unexpected condition testing for writable directory")
|
120 |
-
|
121 |
-
|
122 |
-
def find_files(path: str, pattern: str) -> List[str]:
|
123 |
-
"""Returns a list of absolute paths of files beneath path, recursively,
|
124 |
-
with filenames which match the UNIX-style shell glob pattern."""
|
125 |
-
result: List[str] = []
|
126 |
-
for root, _, files in os.walk(path):
|
127 |
-
matches = fnmatch.filter(files, pattern)
|
128 |
-
result.extend(os.path.join(root, f) for f in matches)
|
129 |
-
return result
|
130 |
-
|
131 |
-
|
132 |
-
def file_size(path: str) -> Union[int, float]:
|
133 |
-
# If it's a symlink, return 0.
|
134 |
-
if os.path.islink(path):
|
135 |
-
return 0
|
136 |
-
return os.path.getsize(path)
|
137 |
-
|
138 |
-
|
139 |
-
def format_file_size(path: str) -> str:
|
140 |
-
return format_size(file_size(path))
|
141 |
-
|
142 |
-
|
143 |
-
def directory_size(path: str) -> Union[int, float]:
|
144 |
-
size = 0.0
|
145 |
-
for root, _dirs, files in os.walk(path):
|
146 |
-
for filename in files:
|
147 |
-
file_path = os.path.join(root, filename)
|
148 |
-
size += file_size(file_path)
|
149 |
-
return size
|
150 |
-
|
151 |
-
|
152 |
-
def format_directory_size(path: str) -> str:
|
153 |
-
return format_size(directory_size(path))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.formatters.terminal
|
3 |
-
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Formatter for terminal output with ANSI sequences.
|
6 |
-
|
7 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
8 |
-
:license: BSD, see LICENSE for details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
from pip._vendor.pygments.formatter import Formatter
|
12 |
-
from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
|
13 |
-
Number, Operator, Generic, Token, Whitespace
|
14 |
-
from pip._vendor.pygments.console import ansiformat
|
15 |
-
from pip._vendor.pygments.util import get_choice_opt
|
16 |
-
|
17 |
-
|
18 |
-
__all__ = ['TerminalFormatter']
|
19 |
-
|
20 |
-
|
21 |
-
#: Map token types to a tuple of color values for light and dark
|
22 |
-
#: backgrounds.
|
23 |
-
TERMINAL_COLORS = {
|
24 |
-
Token: ('', ''),
|
25 |
-
|
26 |
-
Whitespace: ('gray', 'brightblack'),
|
27 |
-
Comment: ('gray', 'brightblack'),
|
28 |
-
Comment.Preproc: ('cyan', 'brightcyan'),
|
29 |
-
Keyword: ('blue', 'brightblue'),
|
30 |
-
Keyword.Type: ('cyan', 'brightcyan'),
|
31 |
-
Operator.Word: ('magenta', 'brightmagenta'),
|
32 |
-
Name.Builtin: ('cyan', 'brightcyan'),
|
33 |
-
Name.Function: ('green', 'brightgreen'),
|
34 |
-
Name.Namespace: ('_cyan_', '_brightcyan_'),
|
35 |
-
Name.Class: ('_green_', '_brightgreen_'),
|
36 |
-
Name.Exception: ('cyan', 'brightcyan'),
|
37 |
-
Name.Decorator: ('brightblack', 'gray'),
|
38 |
-
Name.Variable: ('red', 'brightred'),
|
39 |
-
Name.Constant: ('red', 'brightred'),
|
40 |
-
Name.Attribute: ('cyan', 'brightcyan'),
|
41 |
-
Name.Tag: ('brightblue', 'brightblue'),
|
42 |
-
String: ('yellow', 'yellow'),
|
43 |
-
Number: ('blue', 'brightblue'),
|
44 |
-
|
45 |
-
Generic.Deleted: ('brightred', 'brightred'),
|
46 |
-
Generic.Inserted: ('green', 'brightgreen'),
|
47 |
-
Generic.Heading: ('**', '**'),
|
48 |
-
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
|
49 |
-
Generic.Prompt: ('**', '**'),
|
50 |
-
Generic.Error: ('brightred', 'brightred'),
|
51 |
-
|
52 |
-
Error: ('_brightred_', '_brightred_'),
|
53 |
-
}
|
54 |
-
|
55 |
-
|
56 |
-
class TerminalFormatter(Formatter):
|
57 |
-
r"""
|
58 |
-
Format tokens with ANSI color sequences, for output in a text console.
|
59 |
-
Color sequences are terminated at newlines, so that paging the output
|
60 |
-
works correctly.
|
61 |
-
|
62 |
-
The `get_style_defs()` method doesn't do anything special since there is
|
63 |
-
no support for common styles.
|
64 |
-
|
65 |
-
Options accepted:
|
66 |
-
|
67 |
-
`bg`
|
68 |
-
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
|
69 |
-
(default: ``"light"``).
|
70 |
-
|
71 |
-
`colorscheme`
|
72 |
-
A dictionary mapping token types to (lightbg, darkbg) color names or
|
73 |
-
``None`` (default: ``None`` = use builtin colorscheme).
|
74 |
-
|
75 |
-
`linenos`
|
76 |
-
Set to ``True`` to have line numbers on the terminal output as well
|
77 |
-
(default: ``False`` = no line numbers).
|
78 |
-
"""
|
79 |
-
name = 'Terminal'
|
80 |
-
aliases = ['terminal', 'console']
|
81 |
-
filenames = []
|
82 |
-
|
83 |
-
def __init__(self, **options):
|
84 |
-
Formatter.__init__(self, **options)
|
85 |
-
self.darkbg = get_choice_opt(options, 'bg',
|
86 |
-
['light', 'dark'], 'light') == 'dark'
|
87 |
-
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
|
88 |
-
self.linenos = options.get('linenos', False)
|
89 |
-
self._lineno = 0
|
90 |
-
|
91 |
-
def format(self, tokensource, outfile):
|
92 |
-
return Formatter.format(self, tokensource, outfile)
|
93 |
-
|
94 |
-
def _write_lineno(self, outfile):
|
95 |
-
self._lineno += 1
|
96 |
-
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
|
97 |
-
|
98 |
-
def _get_color(self, ttype):
|
99 |
-
# self.colorscheme is a dict containing usually generic types, so we
|
100 |
-
# have to walk the tree of dots. The base Token type must be a key,
|
101 |
-
# even if it's empty string, as in the default above.
|
102 |
-
colors = self.colorscheme.get(ttype)
|
103 |
-
while colors is None:
|
104 |
-
ttype = ttype.parent
|
105 |
-
colors = self.colorscheme.get(ttype)
|
106 |
-
return colors[self.darkbg]
|
107 |
-
|
108 |
-
def format_unencoded(self, tokensource, outfile):
|
109 |
-
if self.linenos:
|
110 |
-
self._write_lineno(outfile)
|
111 |
-
|
112 |
-
for ttype, value in tokensource:
|
113 |
-
color = self._get_color(ttype)
|
114 |
-
|
115 |
-
for line in value.splitlines(True):
|
116 |
-
if color:
|
117 |
-
outfile.write(ansiformat(color, line.rstrip('\n')))
|
118 |
-
else:
|
119 |
-
outfile.write(line.rstrip('\n'))
|
120 |
-
if line.endswith('\n'):
|
121 |
-
if self.linenos:
|
122 |
-
self._write_lineno(outfile)
|
123 |
-
else:
|
124 |
-
outfile.write('\n')
|
125 |
-
|
126 |
-
if self.linenos:
|
127 |
-
outfile.write("\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/models.py
DELETED
@@ -1,1034 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
requests.models
|
3 |
-
~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
This module contains the primary objects that power Requests.
|
6 |
-
"""
|
7 |
-
|
8 |
-
import datetime
|
9 |
-
|
10 |
-
# Import encoding now, to avoid implicit import later.
|
11 |
-
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
|
12 |
-
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
|
13 |
-
import encodings.idna # noqa: F401
|
14 |
-
from io import UnsupportedOperation
|
15 |
-
|
16 |
-
from pip._vendor.urllib3.exceptions import (
|
17 |
-
DecodeError,
|
18 |
-
LocationParseError,
|
19 |
-
ProtocolError,
|
20 |
-
ReadTimeoutError,
|
21 |
-
SSLError,
|
22 |
-
)
|
23 |
-
from pip._vendor.urllib3.fields import RequestField
|
24 |
-
from pip._vendor.urllib3.filepost import encode_multipart_formdata
|
25 |
-
from pip._vendor.urllib3.util import parse_url
|
26 |
-
|
27 |
-
from ._internal_utils import to_native_string, unicode_is_ascii
|
28 |
-
from .auth import HTTPBasicAuth
|
29 |
-
from .compat import (
|
30 |
-
Callable,
|
31 |
-
JSONDecodeError,
|
32 |
-
Mapping,
|
33 |
-
basestring,
|
34 |
-
builtin_str,
|
35 |
-
chardet,
|
36 |
-
cookielib,
|
37 |
-
)
|
38 |
-
from .compat import json as complexjson
|
39 |
-
from .compat import urlencode, urlsplit, urlunparse
|
40 |
-
from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header
|
41 |
-
from .exceptions import (
|
42 |
-
ChunkedEncodingError,
|
43 |
-
ConnectionError,
|
44 |
-
ContentDecodingError,
|
45 |
-
HTTPError,
|
46 |
-
InvalidJSONError,
|
47 |
-
InvalidURL,
|
48 |
-
)
|
49 |
-
from .exceptions import JSONDecodeError as RequestsJSONDecodeError
|
50 |
-
from .exceptions import MissingSchema
|
51 |
-
from .exceptions import SSLError as RequestsSSLError
|
52 |
-
from .exceptions import StreamConsumedError
|
53 |
-
from .hooks import default_hooks
|
54 |
-
from .status_codes import codes
|
55 |
-
from .structures import CaseInsensitiveDict
|
56 |
-
from .utils import (
|
57 |
-
check_header_validity,
|
58 |
-
get_auth_from_url,
|
59 |
-
guess_filename,
|
60 |
-
guess_json_utf,
|
61 |
-
iter_slices,
|
62 |
-
parse_header_links,
|
63 |
-
requote_uri,
|
64 |
-
stream_decode_response_unicode,
|
65 |
-
super_len,
|
66 |
-
to_key_val_list,
|
67 |
-
)
|
68 |
-
|
69 |
-
#: The set of HTTP status codes that indicate an automatically
|
70 |
-
#: processable redirect.
|
71 |
-
REDIRECT_STATI = (
|
72 |
-
codes.moved, # 301
|
73 |
-
codes.found, # 302
|
74 |
-
codes.other, # 303
|
75 |
-
codes.temporary_redirect, # 307
|
76 |
-
codes.permanent_redirect, # 308
|
77 |
-
)
|
78 |
-
|
79 |
-
DEFAULT_REDIRECT_LIMIT = 30
|
80 |
-
CONTENT_CHUNK_SIZE = 10 * 1024
|
81 |
-
ITER_CHUNK_SIZE = 512
|
82 |
-
|
83 |
-
|
84 |
-
class RequestEncodingMixin:
|
85 |
-
@property
|
86 |
-
def path_url(self):
|
87 |
-
"""Build the path URL to use."""
|
88 |
-
|
89 |
-
url = []
|
90 |
-
|
91 |
-
p = urlsplit(self.url)
|
92 |
-
|
93 |
-
path = p.path
|
94 |
-
if not path:
|
95 |
-
path = "/"
|
96 |
-
|
97 |
-
url.append(path)
|
98 |
-
|
99 |
-
query = p.query
|
100 |
-
if query:
|
101 |
-
url.append("?")
|
102 |
-
url.append(query)
|
103 |
-
|
104 |
-
return "".join(url)
|
105 |
-
|
106 |
-
@staticmethod
|
107 |
-
def _encode_params(data):
|
108 |
-
"""Encode parameters in a piece of data.
|
109 |
-
|
110 |
-
Will successfully encode parameters when passed as a dict or a list of
|
111 |
-
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
|
112 |
-
if parameters are supplied as a dict.
|
113 |
-
"""
|
114 |
-
|
115 |
-
if isinstance(data, (str, bytes)):
|
116 |
-
return data
|
117 |
-
elif hasattr(data, "read"):
|
118 |
-
return data
|
119 |
-
elif hasattr(data, "__iter__"):
|
120 |
-
result = []
|
121 |
-
for k, vs in to_key_val_list(data):
|
122 |
-
if isinstance(vs, basestring) or not hasattr(vs, "__iter__"):
|
123 |
-
vs = [vs]
|
124 |
-
for v in vs:
|
125 |
-
if v is not None:
|
126 |
-
result.append(
|
127 |
-
(
|
128 |
-
k.encode("utf-8") if isinstance(k, str) else k,
|
129 |
-
v.encode("utf-8") if isinstance(v, str) else v,
|
130 |
-
)
|
131 |
-
)
|
132 |
-
return urlencode(result, doseq=True)
|
133 |
-
else:
|
134 |
-
return data
|
135 |
-
|
136 |
-
@staticmethod
|
137 |
-
def _encode_files(files, data):
|
138 |
-
"""Build the body for a multipart/form-data request.
|
139 |
-
|
140 |
-
Will successfully encode files when passed as a dict or a list of
|
141 |
-
tuples. Order is retained if data is a list of tuples but arbitrary
|
142 |
-
if parameters are supplied as a dict.
|
143 |
-
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
|
144 |
-
or 4-tuples (filename, fileobj, contentype, custom_headers).
|
145 |
-
"""
|
146 |
-
if not files:
|
147 |
-
raise ValueError("Files must be provided.")
|
148 |
-
elif isinstance(data, basestring):
|
149 |
-
raise ValueError("Data must not be a string.")
|
150 |
-
|
151 |
-
new_fields = []
|
152 |
-
fields = to_key_val_list(data or {})
|
153 |
-
files = to_key_val_list(files or {})
|
154 |
-
|
155 |
-
for field, val in fields:
|
156 |
-
if isinstance(val, basestring) or not hasattr(val, "__iter__"):
|
157 |
-
val = [val]
|
158 |
-
for v in val:
|
159 |
-
if v is not None:
|
160 |
-
# Don't call str() on bytestrings: in Py3 it all goes wrong.
|
161 |
-
if not isinstance(v, bytes):
|
162 |
-
v = str(v)
|
163 |
-
|
164 |
-
new_fields.append(
|
165 |
-
(
|
166 |
-
field.decode("utf-8")
|
167 |
-
if isinstance(field, bytes)
|
168 |
-
else field,
|
169 |
-
v.encode("utf-8") if isinstance(v, str) else v,
|
170 |
-
)
|
171 |
-
)
|
172 |
-
|
173 |
-
for (k, v) in files:
|
174 |
-
# support for explicit filename
|
175 |
-
ft = None
|
176 |
-
fh = None
|
177 |
-
if isinstance(v, (tuple, list)):
|
178 |
-
if len(v) == 2:
|
179 |
-
fn, fp = v
|
180 |
-
elif len(v) == 3:
|
181 |
-
fn, fp, ft = v
|
182 |
-
else:
|
183 |
-
fn, fp, ft, fh = v
|
184 |
-
else:
|
185 |
-
fn = guess_filename(v) or k
|
186 |
-
fp = v
|
187 |
-
|
188 |
-
if isinstance(fp, (str, bytes, bytearray)):
|
189 |
-
fdata = fp
|
190 |
-
elif hasattr(fp, "read"):
|
191 |
-
fdata = fp.read()
|
192 |
-
elif fp is None:
|
193 |
-
continue
|
194 |
-
else:
|
195 |
-
fdata = fp
|
196 |
-
|
197 |
-
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
|
198 |
-
rf.make_multipart(content_type=ft)
|
199 |
-
new_fields.append(rf)
|
200 |
-
|
201 |
-
body, content_type = encode_multipart_formdata(new_fields)
|
202 |
-
|
203 |
-
return body, content_type
|
204 |
-
|
205 |
-
|
206 |
-
class RequestHooksMixin:
|
207 |
-
def register_hook(self, event, hook):
|
208 |
-
"""Properly register a hook."""
|
209 |
-
|
210 |
-
if event not in self.hooks:
|
211 |
-
raise ValueError(f'Unsupported event specified, with event name "{event}"')
|
212 |
-
|
213 |
-
if isinstance(hook, Callable):
|
214 |
-
self.hooks[event].append(hook)
|
215 |
-
elif hasattr(hook, "__iter__"):
|
216 |
-
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
|
217 |
-
|
218 |
-
def deregister_hook(self, event, hook):
|
219 |
-
"""Deregister a previously registered hook.
|
220 |
-
Returns True if the hook existed, False if not.
|
221 |
-
"""
|
222 |
-
|
223 |
-
try:
|
224 |
-
self.hooks[event].remove(hook)
|
225 |
-
return True
|
226 |
-
except ValueError:
|
227 |
-
return False
|
228 |
-
|
229 |
-
|
230 |
-
class Request(RequestHooksMixin):
|
231 |
-
"""A user-created :class:`Request <Request>` object.
|
232 |
-
|
233 |
-
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
|
234 |
-
|
235 |
-
:param method: HTTP method to use.
|
236 |
-
:param url: URL to send.
|
237 |
-
:param headers: dictionary of headers to send.
|
238 |
-
:param files: dictionary of {filename: fileobject} files to multipart upload.
|
239 |
-
:param data: the body to attach to the request. If a dictionary or
|
240 |
-
list of tuples ``[(key, value)]`` is provided, form-encoding will
|
241 |
-
take place.
|
242 |
-
:param json: json for the body to attach to the request (if files or data is not specified).
|
243 |
-
:param params: URL parameters to append to the URL. If a dictionary or
|
244 |
-
list of tuples ``[(key, value)]`` is provided, form-encoding will
|
245 |
-
take place.
|
246 |
-
:param auth: Auth handler or (user, pass) tuple.
|
247 |
-
:param cookies: dictionary or CookieJar of cookies to attach to this request.
|
248 |
-
:param hooks: dictionary of callback hooks, for internal usage.
|
249 |
-
|
250 |
-
Usage::
|
251 |
-
|
252 |
-
>>> import requests
|
253 |
-
>>> req = requests.Request('GET', 'https://httpbin.org/get')
|
254 |
-
>>> req.prepare()
|
255 |
-
<PreparedRequest [GET]>
|
256 |
-
"""
|
257 |
-
|
258 |
-
def __init__(
|
259 |
-
self,
|
260 |
-
method=None,
|
261 |
-
url=None,
|
262 |
-
headers=None,
|
263 |
-
files=None,
|
264 |
-
data=None,
|
265 |
-
params=None,
|
266 |
-
auth=None,
|
267 |
-
cookies=None,
|
268 |
-
hooks=None,
|
269 |
-
json=None,
|
270 |
-
):
|
271 |
-
|
272 |
-
# Default empty dicts for dict params.
|
273 |
-
data = [] if data is None else data
|
274 |
-
files = [] if files is None else files
|
275 |
-
headers = {} if headers is None else headers
|
276 |
-
params = {} if params is None else params
|
277 |
-
hooks = {} if hooks is None else hooks
|
278 |
-
|
279 |
-
self.hooks = default_hooks()
|
280 |
-
for (k, v) in list(hooks.items()):
|
281 |
-
self.register_hook(event=k, hook=v)
|
282 |
-
|
283 |
-
self.method = method
|
284 |
-
self.url = url
|
285 |
-
self.headers = headers
|
286 |
-
self.files = files
|
287 |
-
self.data = data
|
288 |
-
self.json = json
|
289 |
-
self.params = params
|
290 |
-
self.auth = auth
|
291 |
-
self.cookies = cookies
|
292 |
-
|
293 |
-
def __repr__(self):
|
294 |
-
return f"<Request [{self.method}]>"
|
295 |
-
|
296 |
-
def prepare(self):
|
297 |
-
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
|
298 |
-
p = PreparedRequest()
|
299 |
-
p.prepare(
|
300 |
-
method=self.method,
|
301 |
-
url=self.url,
|
302 |
-
headers=self.headers,
|
303 |
-
files=self.files,
|
304 |
-
data=self.data,
|
305 |
-
json=self.json,
|
306 |
-
params=self.params,
|
307 |
-
auth=self.auth,
|
308 |
-
cookies=self.cookies,
|
309 |
-
hooks=self.hooks,
|
310 |
-
)
|
311 |
-
return p
|
312 |
-
|
313 |
-
|
314 |
-
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
|
315 |
-
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
|
316 |
-
containing the exact bytes that will be sent to the server.
|
317 |
-
|
318 |
-
Instances are generated from a :class:`Request <Request>` object, and
|
319 |
-
should not be instantiated manually; doing so may produce undesirable
|
320 |
-
effects.
|
321 |
-
|
322 |
-
Usage::
|
323 |
-
|
324 |
-
>>> import requests
|
325 |
-
>>> req = requests.Request('GET', 'https://httpbin.org/get')
|
326 |
-
>>> r = req.prepare()
|
327 |
-
>>> r
|
328 |
-
<PreparedRequest [GET]>
|
329 |
-
|
330 |
-
>>> s = requests.Session()
|
331 |
-
>>> s.send(r)
|
332 |
-
<Response [200]>
|
333 |
-
"""
|
334 |
-
|
335 |
-
def __init__(self):
|
336 |
-
#: HTTP verb to send to the server.
|
337 |
-
self.method = None
|
338 |
-
#: HTTP URL to send the request to.
|
339 |
-
self.url = None
|
340 |
-
#: dictionary of HTTP headers.
|
341 |
-
self.headers = None
|
342 |
-
# The `CookieJar` used to create the Cookie header will be stored here
|
343 |
-
# after prepare_cookies is called
|
344 |
-
self._cookies = None
|
345 |
-
#: request body to send to the server.
|
346 |
-
self.body = None
|
347 |
-
#: dictionary of callback hooks, for internal usage.
|
348 |
-
self.hooks = default_hooks()
|
349 |
-
#: integer denoting starting position of a readable file-like body.
|
350 |
-
self._body_position = None
|
351 |
-
|
352 |
-
def prepare(
|
353 |
-
self,
|
354 |
-
method=None,
|
355 |
-
url=None,
|
356 |
-
headers=None,
|
357 |
-
files=None,
|
358 |
-
data=None,
|
359 |
-
params=None,
|
360 |
-
auth=None,
|
361 |
-
cookies=None,
|
362 |
-
hooks=None,
|
363 |
-
json=None,
|
364 |
-
):
|
365 |
-
"""Prepares the entire request with the given parameters."""
|
366 |
-
|
367 |
-
self.prepare_method(method)
|
368 |
-
self.prepare_url(url, params)
|
369 |
-
self.prepare_headers(headers)
|
370 |
-
self.prepare_cookies(cookies)
|
371 |
-
self.prepare_body(data, files, json)
|
372 |
-
self.prepare_auth(auth, url)
|
373 |
-
|
374 |
-
# Note that prepare_auth must be last to enable authentication schemes
|
375 |
-
# such as OAuth to work on a fully prepared request.
|
376 |
-
|
377 |
-
# This MUST go after prepare_auth. Authenticators could add a hook
|
378 |
-
self.prepare_hooks(hooks)
|
379 |
-
|
380 |
-
def __repr__(self):
|
381 |
-
return f"<PreparedRequest [{self.method}]>"
|
382 |
-
|
383 |
-
def copy(self):
|
384 |
-
p = PreparedRequest()
|
385 |
-
p.method = self.method
|
386 |
-
p.url = self.url
|
387 |
-
p.headers = self.headers.copy() if self.headers is not None else None
|
388 |
-
p._cookies = _copy_cookie_jar(self._cookies)
|
389 |
-
p.body = self.body
|
390 |
-
p.hooks = self.hooks
|
391 |
-
p._body_position = self._body_position
|
392 |
-
return p
|
393 |
-
|
394 |
-
def prepare_method(self, method):
|
395 |
-
"""Prepares the given HTTP method."""
|
396 |
-
self.method = method
|
397 |
-
if self.method is not None:
|
398 |
-
self.method = to_native_string(self.method.upper())
|
399 |
-
|
400 |
-
@staticmethod
|
401 |
-
def _get_idna_encoded_host(host):
|
402 |
-
from pip._vendor import idna
|
403 |
-
|
404 |
-
try:
|
405 |
-
host = idna.encode(host, uts46=True).decode("utf-8")
|
406 |
-
except idna.IDNAError:
|
407 |
-
raise UnicodeError
|
408 |
-
return host
|
409 |
-
|
410 |
-
def prepare_url(self, url, params):
|
411 |
-
"""Prepares the given HTTP URL."""
|
412 |
-
#: Accept objects that have string representations.
|
413 |
-
#: We're unable to blindly call unicode/str functions
|
414 |
-
#: as this will include the bytestring indicator (b'')
|
415 |
-
#: on python 3.x.
|
416 |
-
#: https://github.com/psf/requests/pull/2238
|
417 |
-
if isinstance(url, bytes):
|
418 |
-
url = url.decode("utf8")
|
419 |
-
else:
|
420 |
-
url = str(url)
|
421 |
-
|
422 |
-
# Remove leading whitespaces from url
|
423 |
-
url = url.lstrip()
|
424 |
-
|
425 |
-
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
|
426 |
-
# `data` etc to work around exceptions from `url_parse`, which
|
427 |
-
# handles RFC 3986 only.
|
428 |
-
if ":" in url and not url.lower().startswith("http"):
|
429 |
-
self.url = url
|
430 |
-
return
|
431 |
-
|
432 |
-
# Support for unicode domain names and paths.
|
433 |
-
try:
|
434 |
-
scheme, auth, host, port, path, query, fragment = parse_url(url)
|
435 |
-
except LocationParseError as e:
|
436 |
-
raise InvalidURL(*e.args)
|
437 |
-
|
438 |
-
if not scheme:
|
439 |
-
raise MissingSchema(
|
440 |
-
f"Invalid URL {url!r}: No scheme supplied. "
|
441 |
-
f"Perhaps you meant https://{url}?"
|
442 |
-
)
|
443 |
-
|
444 |
-
if not host:
|
445 |
-
raise InvalidURL(f"Invalid URL {url!r}: No host supplied")
|
446 |
-
|
447 |
-
# In general, we want to try IDNA encoding the hostname if the string contains
|
448 |
-
# non-ASCII characters. This allows users to automatically get the correct IDNA
|
449 |
-
# behaviour. For strings containing only ASCII characters, we need to also verify
|
450 |
-
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
|
451 |
-
if not unicode_is_ascii(host):
|
452 |
-
try:
|
453 |
-
host = self._get_idna_encoded_host(host)
|
454 |
-
except UnicodeError:
|
455 |
-
raise InvalidURL("URL has an invalid label.")
|
456 |
-
elif host.startswith(("*", ".")):
|
457 |
-
raise InvalidURL("URL has an invalid label.")
|
458 |
-
|
459 |
-
# Carefully reconstruct the network location
|
460 |
-
netloc = auth or ""
|
461 |
-
if netloc:
|
462 |
-
netloc += "@"
|
463 |
-
netloc += host
|
464 |
-
if port:
|
465 |
-
netloc += f":{port}"
|
466 |
-
|
467 |
-
# Bare domains aren't valid URLs.
|
468 |
-
if not path:
|
469 |
-
path = "/"
|
470 |
-
|
471 |
-
if isinstance(params, (str, bytes)):
|
472 |
-
params = to_native_string(params)
|
473 |
-
|
474 |
-
enc_params = self._encode_params(params)
|
475 |
-
if enc_params:
|
476 |
-
if query:
|
477 |
-
query = f"{query}&{enc_params}"
|
478 |
-
else:
|
479 |
-
query = enc_params
|
480 |
-
|
481 |
-
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
|
482 |
-
self.url = url
|
483 |
-
|
484 |
-
def prepare_headers(self, headers):
|
485 |
-
"""Prepares the given HTTP headers."""
|
486 |
-
|
487 |
-
self.headers = CaseInsensitiveDict()
|
488 |
-
if headers:
|
489 |
-
for header in headers.items():
|
490 |
-
# Raise exception on invalid header value.
|
491 |
-
check_header_validity(header)
|
492 |
-
name, value = header
|
493 |
-
self.headers[to_native_string(name)] = value
|
494 |
-
|
495 |
-
def prepare_body(self, data, files, json=None):
|
496 |
-
"""Prepares the given HTTP body data."""
|
497 |
-
|
498 |
-
# Check if file, fo, generator, iterator.
|
499 |
-
# If not, run through normal process.
|
500 |
-
|
501 |
-
# Nottin' on you.
|
502 |
-
body = None
|
503 |
-
content_type = None
|
504 |
-
|
505 |
-
if not data and json is not None:
|
506 |
-
# urllib3 requires a bytes-like body. Python 2's json.dumps
|
507 |
-
# provides this natively, but Python 3 gives a Unicode string.
|
508 |
-
content_type = "application/json"
|
509 |
-
|
510 |
-
try:
|
511 |
-
body = complexjson.dumps(json, allow_nan=False)
|
512 |
-
except ValueError as ve:
|
513 |
-
raise InvalidJSONError(ve, request=self)
|
514 |
-
|
515 |
-
if not isinstance(body, bytes):
|
516 |
-
body = body.encode("utf-8")
|
517 |
-
|
518 |
-
is_stream = all(
|
519 |
-
[
|
520 |
-
hasattr(data, "__iter__"),
|
521 |
-
not isinstance(data, (basestring, list, tuple, Mapping)),
|
522 |
-
]
|
523 |
-
)
|
524 |
-
|
525 |
-
if is_stream:
|
526 |
-
try:
|
527 |
-
length = super_len(data)
|
528 |
-
except (TypeError, AttributeError, UnsupportedOperation):
|
529 |
-
length = None
|
530 |
-
|
531 |
-
body = data
|
532 |
-
|
533 |
-
if getattr(body, "tell", None) is not None:
|
534 |
-
# Record the current file position before reading.
|
535 |
-
# This will allow us to rewind a file in the event
|
536 |
-
# of a redirect.
|
537 |
-
try:
|
538 |
-
self._body_position = body.tell()
|
539 |
-
except OSError:
|
540 |
-
# This differentiates from None, allowing us to catch
|
541 |
-
# a failed `tell()` later when trying to rewind the body
|
542 |
-
self._body_position = object()
|
543 |
-
|
544 |
-
if files:
|
545 |
-
raise NotImplementedError(
|
546 |
-
"Streamed bodies and files are mutually exclusive."
|
547 |
-
)
|
548 |
-
|
549 |
-
if length:
|
550 |
-
self.headers["Content-Length"] = builtin_str(length)
|
551 |
-
else:
|
552 |
-
self.headers["Transfer-Encoding"] = "chunked"
|
553 |
-
else:
|
554 |
-
# Multi-part file uploads.
|
555 |
-
if files:
|
556 |
-
(body, content_type) = self._encode_files(files, data)
|
557 |
-
else:
|
558 |
-
if data:
|
559 |
-
body = self._encode_params(data)
|
560 |
-
if isinstance(data, basestring) or hasattr(data, "read"):
|
561 |
-
content_type = None
|
562 |
-
else:
|
563 |
-
content_type = "application/x-www-form-urlencoded"
|
564 |
-
|
565 |
-
self.prepare_content_length(body)
|
566 |
-
|
567 |
-
# Add content-type if it wasn't explicitly provided.
|
568 |
-
if content_type and ("content-type" not in self.headers):
|
569 |
-
self.headers["Content-Type"] = content_type
|
570 |
-
|
571 |
-
self.body = body
|
572 |
-
|
573 |
-
def prepare_content_length(self, body):
|
574 |
-
"""Prepare Content-Length header based on request method and body"""
|
575 |
-
if body is not None:
|
576 |
-
length = super_len(body)
|
577 |
-
if length:
|
578 |
-
# If length exists, set it. Otherwise, we fallback
|
579 |
-
# to Transfer-Encoding: chunked.
|
580 |
-
self.headers["Content-Length"] = builtin_str(length)
|
581 |
-
elif (
|
582 |
-
self.method not in ("GET", "HEAD")
|
583 |
-
and self.headers.get("Content-Length") is None
|
584 |
-
):
|
585 |
-
# Set Content-Length to 0 for methods that can have a body
|
586 |
-
# but don't provide one. (i.e. not GET or HEAD)
|
587 |
-
self.headers["Content-Length"] = "0"
|
588 |
-
|
589 |
-
def prepare_auth(self, auth, url=""):
|
590 |
-
"""Prepares the given HTTP auth data."""
|
591 |
-
|
592 |
-
# If no Auth is explicitly provided, extract it from the URL first.
|
593 |
-
if auth is None:
|
594 |
-
url_auth = get_auth_from_url(self.url)
|
595 |
-
auth = url_auth if any(url_auth) else None
|
596 |
-
|
597 |
-
if auth:
|
598 |
-
if isinstance(auth, tuple) and len(auth) == 2:
|
599 |
-
# special-case basic HTTP auth
|
600 |
-
auth = HTTPBasicAuth(*auth)
|
601 |
-
|
602 |
-
# Allow auth to make its changes.
|
603 |
-
r = auth(self)
|
604 |
-
|
605 |
-
# Update self to reflect the auth changes.
|
606 |
-
self.__dict__.update(r.__dict__)
|
607 |
-
|
608 |
-
# Recompute Content-Length
|
609 |
-
self.prepare_content_length(self.body)
|
610 |
-
|
611 |
-
def prepare_cookies(self, cookies):
|
612 |
-
"""Prepares the given HTTP cookie data.
|
613 |
-
|
614 |
-
This function eventually generates a ``Cookie`` header from the
|
615 |
-
given cookies using cookielib. Due to cookielib's design, the header
|
616 |
-
will not be regenerated if it already exists, meaning this function
|
617 |
-
can only be called once for the life of the
|
618 |
-
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
|
619 |
-
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
|
620 |
-
header is removed beforehand.
|
621 |
-
"""
|
622 |
-
if isinstance(cookies, cookielib.CookieJar):
|
623 |
-
self._cookies = cookies
|
624 |
-
else:
|
625 |
-
self._cookies = cookiejar_from_dict(cookies)
|
626 |
-
|
627 |
-
cookie_header = get_cookie_header(self._cookies, self)
|
628 |
-
if cookie_header is not None:
|
629 |
-
self.headers["Cookie"] = cookie_header
|
630 |
-
|
631 |
-
def prepare_hooks(self, hooks):
|
632 |
-
"""Prepares the given hooks."""
|
633 |
-
# hooks can be passed as None to the prepare method and to this
|
634 |
-
# method. To prevent iterating over None, simply use an empty list
|
635 |
-
# if hooks is False-y
|
636 |
-
hooks = hooks or []
|
637 |
-
for event in hooks:
|
638 |
-
self.register_hook(event, hooks[event])
|
639 |
-
|
640 |
-
|
641 |
-
class Response:
|
642 |
-
"""The :class:`Response <Response>` object, which contains a
|
643 |
-
server's response to an HTTP request.
|
644 |
-
"""
|
645 |
-
|
646 |
-
__attrs__ = [
|
647 |
-
"_content",
|
648 |
-
"status_code",
|
649 |
-
"headers",
|
650 |
-
"url",
|
651 |
-
"history",
|
652 |
-
"encoding",
|
653 |
-
"reason",
|
654 |
-
"cookies",
|
655 |
-
"elapsed",
|
656 |
-
"request",
|
657 |
-
]
|
658 |
-
|
659 |
-
def __init__(self):
|
660 |
-
self._content = False
|
661 |
-
self._content_consumed = False
|
662 |
-
self._next = None
|
663 |
-
|
664 |
-
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
|
665 |
-
self.status_code = None
|
666 |
-
|
667 |
-
#: Case-insensitive Dictionary of Response Headers.
|
668 |
-
#: For example, ``headers['content-encoding']`` will return the
|
669 |
-
#: value of a ``'Content-Encoding'`` response header.
|
670 |
-
self.headers = CaseInsensitiveDict()
|
671 |
-
|
672 |
-
#: File-like object representation of response (for advanced usage).
|
673 |
-
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
|
674 |
-
#: This requirement does not apply for use internally to Requests.
|
675 |
-
self.raw = None
|
676 |
-
|
677 |
-
#: Final URL location of Response.
|
678 |
-
self.url = None
|
679 |
-
|
680 |
-
#: Encoding to decode with when accessing r.text.
|
681 |
-
self.encoding = None
|
682 |
-
|
683 |
-
#: A list of :class:`Response <Response>` objects from
|
684 |
-
#: the history of the Request. Any redirect responses will end
|
685 |
-
#: up here. The list is sorted from the oldest to the most recent request.
|
686 |
-
self.history = []
|
687 |
-
|
688 |
-
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
|
689 |
-
self.reason = None
|
690 |
-
|
691 |
-
#: A CookieJar of Cookies the server sent back.
|
692 |
-
self.cookies = cookiejar_from_dict({})
|
693 |
-
|
694 |
-
#: The amount of time elapsed between sending the request
|
695 |
-
#: and the arrival of the response (as a timedelta).
|
696 |
-
#: This property specifically measures the time taken between sending
|
697 |
-
#: the first byte of the request and finishing parsing the headers. It
|
698 |
-
#: is therefore unaffected by consuming the response content or the
|
699 |
-
#: value of the ``stream`` keyword argument.
|
700 |
-
self.elapsed = datetime.timedelta(0)
|
701 |
-
|
702 |
-
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
|
703 |
-
#: is a response.
|
704 |
-
self.request = None
|
705 |
-
|
706 |
-
def __enter__(self):
|
707 |
-
return self
|
708 |
-
|
709 |
-
def __exit__(self, *args):
|
710 |
-
self.close()
|
711 |
-
|
712 |
-
def __getstate__(self):
|
713 |
-
# Consume everything; accessing the content attribute makes
|
714 |
-
# sure the content has been fully read.
|
715 |
-
if not self._content_consumed:
|
716 |
-
self.content
|
717 |
-
|
718 |
-
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
|
719 |
-
|
720 |
-
def __setstate__(self, state):
|
721 |
-
for name, value in state.items():
|
722 |
-
setattr(self, name, value)
|
723 |
-
|
724 |
-
# pickled objects do not have .raw
|
725 |
-
setattr(self, "_content_consumed", True)
|
726 |
-
setattr(self, "raw", None)
|
727 |
-
|
728 |
-
def __repr__(self):
|
729 |
-
return f"<Response [{self.status_code}]>"
|
730 |
-
|
731 |
-
def __bool__(self):
|
732 |
-
"""Returns True if :attr:`status_code` is less than 400.
|
733 |
-
|
734 |
-
This attribute checks if the status code of the response is between
|
735 |
-
400 and 600 to see if there was a client error or a server error. If
|
736 |
-
the status code, is between 200 and 400, this will return True. This
|
737 |
-
is **not** a check to see if the response code is ``200 OK``.
|
738 |
-
"""
|
739 |
-
return self.ok
|
740 |
-
|
741 |
-
def __nonzero__(self):
|
742 |
-
"""Returns True if :attr:`status_code` is less than 400.
|
743 |
-
|
744 |
-
This attribute checks if the status code of the response is between
|
745 |
-
400 and 600 to see if there was a client error or a server error. If
|
746 |
-
the status code, is between 200 and 400, this will return True. This
|
747 |
-
is **not** a check to see if the response code is ``200 OK``.
|
748 |
-
"""
|
749 |
-
return self.ok
|
750 |
-
|
751 |
-
def __iter__(self):
|
752 |
-
"""Allows you to use a response as an iterator."""
|
753 |
-
return self.iter_content(128)
|
754 |
-
|
755 |
-
@property
|
756 |
-
def ok(self):
|
757 |
-
"""Returns True if :attr:`status_code` is less than 400, False if not.
|
758 |
-
|
759 |
-
This attribute checks if the status code of the response is between
|
760 |
-
400 and 600 to see if there was a client error or a server error. If
|
761 |
-
the status code is between 200 and 400, this will return True. This
|
762 |
-
is **not** a check to see if the response code is ``200 OK``.
|
763 |
-
"""
|
764 |
-
try:
|
765 |
-
self.raise_for_status()
|
766 |
-
except HTTPError:
|
767 |
-
return False
|
768 |
-
return True
|
769 |
-
|
770 |
-
@property
|
771 |
-
def is_redirect(self):
|
772 |
-
"""True if this Response is a well-formed HTTP redirect that could have
|
773 |
-
been processed automatically (by :meth:`Session.resolve_redirects`).
|
774 |
-
"""
|
775 |
-
return "location" in self.headers and self.status_code in REDIRECT_STATI
|
776 |
-
|
777 |
-
@property
|
778 |
-
def is_permanent_redirect(self):
|
779 |
-
"""True if this Response one of the permanent versions of redirect."""
|
780 |
-
return "location" in self.headers and self.status_code in (
|
781 |
-
codes.moved_permanently,
|
782 |
-
codes.permanent_redirect,
|
783 |
-
)
|
784 |
-
|
785 |
-
@property
|
786 |
-
def next(self):
|
787 |
-
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
|
788 |
-
return self._next
|
789 |
-
|
790 |
-
@property
|
791 |
-
def apparent_encoding(self):
|
792 |
-
"""The apparent encoding, provided by the charset_normalizer or chardet libraries."""
|
793 |
-
return chardet.detect(self.content)["encoding"]
|
794 |
-
|
795 |
-
def iter_content(self, chunk_size=1, decode_unicode=False):
|
796 |
-
"""Iterates over the response data. When stream=True is set on the
|
797 |
-
request, this avoids reading the content at once into memory for
|
798 |
-
large responses. The chunk size is the number of bytes it should
|
799 |
-
read into memory. This is not necessarily the length of each item
|
800 |
-
returned as decoding can take place.
|
801 |
-
|
802 |
-
chunk_size must be of type int or None. A value of None will
|
803 |
-
function differently depending on the value of `stream`.
|
804 |
-
stream=True will read data as it arrives in whatever size the
|
805 |
-
chunks are received. If stream=False, data is returned as
|
806 |
-
a single chunk.
|
807 |
-
|
808 |
-
If decode_unicode is True, content will be decoded using the best
|
809 |
-
available encoding based on the response.
|
810 |
-
"""
|
811 |
-
|
812 |
-
def generate():
|
813 |
-
# Special case for urllib3.
|
814 |
-
if hasattr(self.raw, "stream"):
|
815 |
-
try:
|
816 |
-
yield from self.raw.stream(chunk_size, decode_content=True)
|
817 |
-
except ProtocolError as e:
|
818 |
-
raise ChunkedEncodingError(e)
|
819 |
-
except DecodeError as e:
|
820 |
-
raise ContentDecodingError(e)
|
821 |
-
except ReadTimeoutError as e:
|
822 |
-
raise ConnectionError(e)
|
823 |
-
except SSLError as e:
|
824 |
-
raise RequestsSSLError(e)
|
825 |
-
else:
|
826 |
-
# Standard file-like object.
|
827 |
-
while True:
|
828 |
-
chunk = self.raw.read(chunk_size)
|
829 |
-
if not chunk:
|
830 |
-
break
|
831 |
-
yield chunk
|
832 |
-
|
833 |
-
self._content_consumed = True
|
834 |
-
|
835 |
-
if self._content_consumed and isinstance(self._content, bool):
|
836 |
-
raise StreamConsumedError()
|
837 |
-
elif chunk_size is not None and not isinstance(chunk_size, int):
|
838 |
-
raise TypeError(
|
839 |
-
f"chunk_size must be an int, it is instead a {type(chunk_size)}."
|
840 |
-
)
|
841 |
-
# simulate reading small chunks of the content
|
842 |
-
reused_chunks = iter_slices(self._content, chunk_size)
|
843 |
-
|
844 |
-
stream_chunks = generate()
|
845 |
-
|
846 |
-
chunks = reused_chunks if self._content_consumed else stream_chunks
|
847 |
-
|
848 |
-
if decode_unicode:
|
849 |
-
chunks = stream_decode_response_unicode(chunks, self)
|
850 |
-
|
851 |
-
return chunks
|
852 |
-
|
853 |
-
def iter_lines(
|
854 |
-
self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None
|
855 |
-
):
|
856 |
-
"""Iterates over the response data, one line at a time. When
|
857 |
-
stream=True is set on the request, this avoids reading the
|
858 |
-
content at once into memory for large responses.
|
859 |
-
|
860 |
-
.. note:: This method is not reentrant safe.
|
861 |
-
"""
|
862 |
-
|
863 |
-
pending = None
|
864 |
-
|
865 |
-
for chunk in self.iter_content(
|
866 |
-
chunk_size=chunk_size, decode_unicode=decode_unicode
|
867 |
-
):
|
868 |
-
|
869 |
-
if pending is not None:
|
870 |
-
chunk = pending + chunk
|
871 |
-
|
872 |
-
if delimiter:
|
873 |
-
lines = chunk.split(delimiter)
|
874 |
-
else:
|
875 |
-
lines = chunk.splitlines()
|
876 |
-
|
877 |
-
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
|
878 |
-
pending = lines.pop()
|
879 |
-
else:
|
880 |
-
pending = None
|
881 |
-
|
882 |
-
yield from lines
|
883 |
-
|
884 |
-
if pending is not None:
|
885 |
-
yield pending
|
886 |
-
|
887 |
-
@property
|
888 |
-
def content(self):
|
889 |
-
"""Content of the response, in bytes."""
|
890 |
-
|
891 |
-
if self._content is False:
|
892 |
-
# Read the contents.
|
893 |
-
if self._content_consumed:
|
894 |
-
raise RuntimeError("The content for this response was already consumed")
|
895 |
-
|
896 |
-
if self.status_code == 0 or self.raw is None:
|
897 |
-
self._content = None
|
898 |
-
else:
|
899 |
-
self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b""
|
900 |
-
|
901 |
-
self._content_consumed = True
|
902 |
-
# don't need to release the connection; that's been handled by urllib3
|
903 |
-
# since we exhausted the data.
|
904 |
-
return self._content
|
905 |
-
|
906 |
-
@property
|
907 |
-
def text(self):
|
908 |
-
"""Content of the response, in unicode.
|
909 |
-
|
910 |
-
If Response.encoding is None, encoding will be guessed using
|
911 |
-
``charset_normalizer`` or ``chardet``.
|
912 |
-
|
913 |
-
The encoding of the response content is determined based solely on HTTP
|
914 |
-
headers, following RFC 2616 to the letter. If you can take advantage of
|
915 |
-
non-HTTP knowledge to make a better guess at the encoding, you should
|
916 |
-
set ``r.encoding`` appropriately before accessing this property.
|
917 |
-
"""
|
918 |
-
|
919 |
-
# Try charset from content-type
|
920 |
-
content = None
|
921 |
-
encoding = self.encoding
|
922 |
-
|
923 |
-
if not self.content:
|
924 |
-
return ""
|
925 |
-
|
926 |
-
# Fallback to auto-detected encoding.
|
927 |
-
if self.encoding is None:
|
928 |
-
encoding = self.apparent_encoding
|
929 |
-
|
930 |
-
# Decode unicode from given encoding.
|
931 |
-
try:
|
932 |
-
content = str(self.content, encoding, errors="replace")
|
933 |
-
except (LookupError, TypeError):
|
934 |
-
# A LookupError is raised if the encoding was not found which could
|
935 |
-
# indicate a misspelling or similar mistake.
|
936 |
-
#
|
937 |
-
# A TypeError can be raised if encoding is None
|
938 |
-
#
|
939 |
-
# So we try blindly encoding.
|
940 |
-
content = str(self.content, errors="replace")
|
941 |
-
|
942 |
-
return content
|
943 |
-
|
944 |
-
def json(self, **kwargs):
|
945 |
-
r"""Returns the json-encoded content of a response, if any.
|
946 |
-
|
947 |
-
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
|
948 |
-
:raises requests.exceptions.JSONDecodeError: If the response body does not
|
949 |
-
contain valid json.
|
950 |
-
"""
|
951 |
-
|
952 |
-
if not self.encoding and self.content and len(self.content) > 3:
|
953 |
-
# No encoding set. JSON RFC 4627 section 3 states we should expect
|
954 |
-
# UTF-8, -16 or -32. Detect which one to use; If the detection or
|
955 |
-
# decoding fails, fall back to `self.text` (using charset_normalizer to make
|
956 |
-
# a best guess).
|
957 |
-
encoding = guess_json_utf(self.content)
|
958 |
-
if encoding is not None:
|
959 |
-
try:
|
960 |
-
return complexjson.loads(self.content.decode(encoding), **kwargs)
|
961 |
-
except UnicodeDecodeError:
|
962 |
-
# Wrong UTF codec detected; usually because it's not UTF-8
|
963 |
-
# but some other 8-bit codec. This is an RFC violation,
|
964 |
-
# and the server didn't bother to tell us what codec *was*
|
965 |
-
# used.
|
966 |
-
pass
|
967 |
-
except JSONDecodeError as e:
|
968 |
-
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
|
969 |
-
|
970 |
-
try:
|
971 |
-
return complexjson.loads(self.text, **kwargs)
|
972 |
-
except JSONDecodeError as e:
|
973 |
-
# Catch JSON-related errors and raise as requests.JSONDecodeError
|
974 |
-
# This aliases json.JSONDecodeError and simplejson.JSONDecodeError
|
975 |
-
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
|
976 |
-
|
977 |
-
@property
|
978 |
-
def links(self):
|
979 |
-
"""Returns the parsed header links of the response, if any."""
|
980 |
-
|
981 |
-
header = self.headers.get("link")
|
982 |
-
|
983 |
-
resolved_links = {}
|
984 |
-
|
985 |
-
if header:
|
986 |
-
links = parse_header_links(header)
|
987 |
-
|
988 |
-
for link in links:
|
989 |
-
key = link.get("rel") or link.get("url")
|
990 |
-
resolved_links[key] = link
|
991 |
-
|
992 |
-
return resolved_links
|
993 |
-
|
994 |
-
def raise_for_status(self):
|
995 |
-
"""Raises :class:`HTTPError`, if one occurred."""
|
996 |
-
|
997 |
-
http_error_msg = ""
|
998 |
-
if isinstance(self.reason, bytes):
|
999 |
-
# We attempt to decode utf-8 first because some servers
|
1000 |
-
# choose to localize their reason strings. If the string
|
1001 |
-
# isn't utf-8, we fall back to iso-8859-1 for all other
|
1002 |
-
# encodings. (See PR #3538)
|
1003 |
-
try:
|
1004 |
-
reason = self.reason.decode("utf-8")
|
1005 |
-
except UnicodeDecodeError:
|
1006 |
-
reason = self.reason.decode("iso-8859-1")
|
1007 |
-
else:
|
1008 |
-
reason = self.reason
|
1009 |
-
|
1010 |
-
if 400 <= self.status_code < 500:
|
1011 |
-
http_error_msg = (
|
1012 |
-
f"{self.status_code} Client Error: {reason} for url: {self.url}"
|
1013 |
-
)
|
1014 |
-
|
1015 |
-
elif 500 <= self.status_code < 600:
|
1016 |
-
http_error_msg = (
|
1017 |
-
f"{self.status_code} Server Error: {reason} for url: {self.url}"
|
1018 |
-
)
|
1019 |
-
|
1020 |
-
if http_error_msg:
|
1021 |
-
raise HTTPError(http_error_msg, response=self)
|
1022 |
-
|
1023 |
-
def close(self):
|
1024 |
-
"""Releases the connection back to the pool. Once this method has been
|
1025 |
-
called the underlying ``raw`` object must not be accessed again.
|
1026 |
-
|
1027 |
-
*Note: Should not normally need to be called explicitly.*
|
1028 |
-
"""
|
1029 |
-
if not self._content_consumed:
|
1030 |
-
self.raw.close()
|
1031 |
-
|
1032 |
-
release_conn = getattr(self.raw, "release_conn", None)
|
1033 |
-
if release_conn is not None:
|
1034 |
-
release_conn()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/retry.py
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
# Copyright 2016–2021 Julien Danjou
|
2 |
-
# Copyright 2016 Joshua Harlow
|
3 |
-
# Copyright 2013-2014 Ray Holder
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
|
17 |
-
import abc
|
18 |
-
import re
|
19 |
-
import typing
|
20 |
-
|
21 |
-
if typing.TYPE_CHECKING:
|
22 |
-
from pip._vendor.tenacity import RetryCallState
|
23 |
-
|
24 |
-
|
25 |
-
class retry_base(abc.ABC):
|
26 |
-
"""Abstract base class for retry strategies."""
|
27 |
-
|
28 |
-
@abc.abstractmethod
|
29 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
30 |
-
pass
|
31 |
-
|
32 |
-
def __and__(self, other: "retry_base") -> "retry_all":
|
33 |
-
return retry_all(self, other)
|
34 |
-
|
35 |
-
def __or__(self, other: "retry_base") -> "retry_any":
|
36 |
-
return retry_any(self, other)
|
37 |
-
|
38 |
-
|
39 |
-
RetryBaseT = typing.Union[retry_base, typing.Callable[["RetryCallState"], bool]]
|
40 |
-
|
41 |
-
|
42 |
-
class _retry_never(retry_base):
|
43 |
-
"""Retry strategy that never rejects any result."""
|
44 |
-
|
45 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
46 |
-
return False
|
47 |
-
|
48 |
-
|
49 |
-
retry_never = _retry_never()
|
50 |
-
|
51 |
-
|
52 |
-
class _retry_always(retry_base):
|
53 |
-
"""Retry strategy that always rejects any result."""
|
54 |
-
|
55 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
56 |
-
return True
|
57 |
-
|
58 |
-
|
59 |
-
retry_always = _retry_always()
|
60 |
-
|
61 |
-
|
62 |
-
class retry_if_exception(retry_base):
|
63 |
-
"""Retry strategy that retries if an exception verifies a predicate."""
|
64 |
-
|
65 |
-
def __init__(self, predicate: typing.Callable[[BaseException], bool]) -> None:
|
66 |
-
self.predicate = predicate
|
67 |
-
|
68 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
69 |
-
if retry_state.outcome is None:
|
70 |
-
raise RuntimeError("__call__() called before outcome was set")
|
71 |
-
|
72 |
-
if retry_state.outcome.failed:
|
73 |
-
exception = retry_state.outcome.exception()
|
74 |
-
if exception is None:
|
75 |
-
raise RuntimeError("outcome failed but the exception is None")
|
76 |
-
return self.predicate(exception)
|
77 |
-
else:
|
78 |
-
return False
|
79 |
-
|
80 |
-
|
81 |
-
class retry_if_exception_type(retry_if_exception):
|
82 |
-
"""Retries if an exception has been raised of one or more types."""
|
83 |
-
|
84 |
-
def __init__(
|
85 |
-
self,
|
86 |
-
exception_types: typing.Union[
|
87 |
-
typing.Type[BaseException],
|
88 |
-
typing.Tuple[typing.Type[BaseException], ...],
|
89 |
-
] = Exception,
|
90 |
-
) -> None:
|
91 |
-
self.exception_types = exception_types
|
92 |
-
super().__init__(lambda e: isinstance(e, exception_types))
|
93 |
-
|
94 |
-
|
95 |
-
class retry_if_not_exception_type(retry_if_exception):
|
96 |
-
"""Retries except an exception has been raised of one or more types."""
|
97 |
-
|
98 |
-
def __init__(
|
99 |
-
self,
|
100 |
-
exception_types: typing.Union[
|
101 |
-
typing.Type[BaseException],
|
102 |
-
typing.Tuple[typing.Type[BaseException], ...],
|
103 |
-
] = Exception,
|
104 |
-
) -> None:
|
105 |
-
self.exception_types = exception_types
|
106 |
-
super().__init__(lambda e: not isinstance(e, exception_types))
|
107 |
-
|
108 |
-
|
109 |
-
class retry_unless_exception_type(retry_if_exception):
|
110 |
-
"""Retries until an exception is raised of one or more types."""
|
111 |
-
|
112 |
-
def __init__(
|
113 |
-
self,
|
114 |
-
exception_types: typing.Union[
|
115 |
-
typing.Type[BaseException],
|
116 |
-
typing.Tuple[typing.Type[BaseException], ...],
|
117 |
-
] = Exception,
|
118 |
-
) -> None:
|
119 |
-
self.exception_types = exception_types
|
120 |
-
super().__init__(lambda e: not isinstance(e, exception_types))
|
121 |
-
|
122 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
123 |
-
if retry_state.outcome is None:
|
124 |
-
raise RuntimeError("__call__() called before outcome was set")
|
125 |
-
|
126 |
-
# always retry if no exception was raised
|
127 |
-
if not retry_state.outcome.failed:
|
128 |
-
return True
|
129 |
-
|
130 |
-
exception = retry_state.outcome.exception()
|
131 |
-
if exception is None:
|
132 |
-
raise RuntimeError("outcome failed but the exception is None")
|
133 |
-
return self.predicate(exception)
|
134 |
-
|
135 |
-
|
136 |
-
class retry_if_exception_cause_type(retry_base):
|
137 |
-
"""Retries if any of the causes of the raised exception is of one or more types.
|
138 |
-
|
139 |
-
The check on the type of the cause of the exception is done recursively (until finding
|
140 |
-
an exception in the chain that has no `__cause__`)
|
141 |
-
"""
|
142 |
-
|
143 |
-
def __init__(
|
144 |
-
self,
|
145 |
-
exception_types: typing.Union[
|
146 |
-
typing.Type[BaseException],
|
147 |
-
typing.Tuple[typing.Type[BaseException], ...],
|
148 |
-
] = Exception,
|
149 |
-
) -> None:
|
150 |
-
self.exception_cause_types = exception_types
|
151 |
-
|
152 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
153 |
-
if retry_state.outcome is None:
|
154 |
-
raise RuntimeError("__call__ called before outcome was set")
|
155 |
-
|
156 |
-
if retry_state.outcome.failed:
|
157 |
-
exc = retry_state.outcome.exception()
|
158 |
-
while exc is not None:
|
159 |
-
if isinstance(exc.__cause__, self.exception_cause_types):
|
160 |
-
return True
|
161 |
-
exc = exc.__cause__
|
162 |
-
|
163 |
-
return False
|
164 |
-
|
165 |
-
|
166 |
-
class retry_if_result(retry_base):
|
167 |
-
"""Retries if the result verifies a predicate."""
|
168 |
-
|
169 |
-
def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
|
170 |
-
self.predicate = predicate
|
171 |
-
|
172 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
173 |
-
if retry_state.outcome is None:
|
174 |
-
raise RuntimeError("__call__() called before outcome was set")
|
175 |
-
|
176 |
-
if not retry_state.outcome.failed:
|
177 |
-
return self.predicate(retry_state.outcome.result())
|
178 |
-
else:
|
179 |
-
return False
|
180 |
-
|
181 |
-
|
182 |
-
class retry_if_not_result(retry_base):
|
183 |
-
"""Retries if the result refutes a predicate."""
|
184 |
-
|
185 |
-
def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
|
186 |
-
self.predicate = predicate
|
187 |
-
|
188 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
189 |
-
if retry_state.outcome is None:
|
190 |
-
raise RuntimeError("__call__() called before outcome was set")
|
191 |
-
|
192 |
-
if not retry_state.outcome.failed:
|
193 |
-
return not self.predicate(retry_state.outcome.result())
|
194 |
-
else:
|
195 |
-
return False
|
196 |
-
|
197 |
-
|
198 |
-
class retry_if_exception_message(retry_if_exception):
|
199 |
-
"""Retries if an exception message equals or matches."""
|
200 |
-
|
201 |
-
def __init__(
|
202 |
-
self,
|
203 |
-
message: typing.Optional[str] = None,
|
204 |
-
match: typing.Optional[str] = None,
|
205 |
-
) -> None:
|
206 |
-
if message and match:
|
207 |
-
raise TypeError(f"{self.__class__.__name__}() takes either 'message' or 'match', not both")
|
208 |
-
|
209 |
-
# set predicate
|
210 |
-
if message:
|
211 |
-
|
212 |
-
def message_fnc(exception: BaseException) -> bool:
|
213 |
-
return message == str(exception)
|
214 |
-
|
215 |
-
predicate = message_fnc
|
216 |
-
elif match:
|
217 |
-
prog = re.compile(match)
|
218 |
-
|
219 |
-
def match_fnc(exception: BaseException) -> bool:
|
220 |
-
return bool(prog.match(str(exception)))
|
221 |
-
|
222 |
-
predicate = match_fnc
|
223 |
-
else:
|
224 |
-
raise TypeError(f"{self.__class__.__name__}() missing 1 required argument 'message' or 'match'")
|
225 |
-
|
226 |
-
super().__init__(predicate)
|
227 |
-
|
228 |
-
|
229 |
-
class retry_if_not_exception_message(retry_if_exception_message):
|
230 |
-
"""Retries until an exception message equals or matches."""
|
231 |
-
|
232 |
-
def __init__(
|
233 |
-
self,
|
234 |
-
message: typing.Optional[str] = None,
|
235 |
-
match: typing.Optional[str] = None,
|
236 |
-
) -> None:
|
237 |
-
super().__init__(message, match)
|
238 |
-
# invert predicate
|
239 |
-
if_predicate = self.predicate
|
240 |
-
self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
|
241 |
-
|
242 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
243 |
-
if retry_state.outcome is None:
|
244 |
-
raise RuntimeError("__call__() called before outcome was set")
|
245 |
-
|
246 |
-
if not retry_state.outcome.failed:
|
247 |
-
return True
|
248 |
-
|
249 |
-
exception = retry_state.outcome.exception()
|
250 |
-
if exception is None:
|
251 |
-
raise RuntimeError("outcome failed but the exception is None")
|
252 |
-
return self.predicate(exception)
|
253 |
-
|
254 |
-
|
255 |
-
class retry_any(retry_base):
|
256 |
-
"""Retries if any of the retries condition is valid."""
|
257 |
-
|
258 |
-
def __init__(self, *retries: retry_base) -> None:
|
259 |
-
self.retries = retries
|
260 |
-
|
261 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
262 |
-
return any(r(retry_state) for r in self.retries)
|
263 |
-
|
264 |
-
|
265 |
-
class retry_all(retry_base):
|
266 |
-
"""Retries if all the retries condition are valid."""
|
267 |
-
|
268 |
-
def __init__(self, *retries: retry_base) -> None:
|
269 |
-
self.retries = retries
|
270 |
-
|
271 |
-
def __call__(self, retry_state: "RetryCallState") -> bool:
|
272 |
-
return all(r(retry_state) for r in self.retries)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/extern/__init__.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import importlib.util
|
2 |
-
import sys
|
3 |
-
|
4 |
-
|
5 |
-
class VendorImporter:
|
6 |
-
"""
|
7 |
-
A PEP 302 meta path importer for finding optionally-vendored
|
8 |
-
or otherwise naturally-installed packages from root_name.
|
9 |
-
"""
|
10 |
-
|
11 |
-
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
|
12 |
-
self.root_name = root_name
|
13 |
-
self.vendored_names = set(vendored_names)
|
14 |
-
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
|
15 |
-
|
16 |
-
@property
|
17 |
-
def search_path(self):
|
18 |
-
"""
|
19 |
-
Search first the vendor package then as a natural package.
|
20 |
-
"""
|
21 |
-
yield self.vendor_pkg + '.'
|
22 |
-
yield ''
|
23 |
-
|
24 |
-
def _module_matches_namespace(self, fullname):
|
25 |
-
"""Figure out if the target module is vendored."""
|
26 |
-
root, base, target = fullname.partition(self.root_name + '.')
|
27 |
-
return not root and any(map(target.startswith, self.vendored_names))
|
28 |
-
|
29 |
-
def load_module(self, fullname):
|
30 |
-
"""
|
31 |
-
Iterate over the search path to locate and load fullname.
|
32 |
-
"""
|
33 |
-
root, base, target = fullname.partition(self.root_name + '.')
|
34 |
-
for prefix in self.search_path:
|
35 |
-
try:
|
36 |
-
extant = prefix + target
|
37 |
-
__import__(extant)
|
38 |
-
mod = sys.modules[extant]
|
39 |
-
sys.modules[fullname] = mod
|
40 |
-
return mod
|
41 |
-
except ImportError:
|
42 |
-
pass
|
43 |
-
else:
|
44 |
-
raise ImportError(
|
45 |
-
"The '{target}' package is required; "
|
46 |
-
"normally this is bundled with this package so if you get "
|
47 |
-
"this warning, consult the packager of your "
|
48 |
-
"distribution.".format(**locals())
|
49 |
-
)
|
50 |
-
|
51 |
-
def create_module(self, spec):
|
52 |
-
return self.load_module(spec.name)
|
53 |
-
|
54 |
-
def exec_module(self, module):
|
55 |
-
pass
|
56 |
-
|
57 |
-
def find_spec(self, fullname, path=None, target=None):
|
58 |
-
"""Return a module spec for vendored names."""
|
59 |
-
return (
|
60 |
-
importlib.util.spec_from_loader(fullname, self)
|
61 |
-
if self._module_matches_namespace(fullname) else None
|
62 |
-
)
|
63 |
-
|
64 |
-
def install(self):
|
65 |
-
"""
|
66 |
-
Install this importer into sys.meta_path if not already present.
|
67 |
-
"""
|
68 |
-
if self not in sys.meta_path:
|
69 |
-
sys.meta_path.append(self)
|
70 |
-
|
71 |
-
|
72 |
-
names = (
|
73 |
-
'packaging', 'pyparsing', 'ordered_set', 'more_itertools', 'importlib_metadata',
|
74 |
-
'zipp', 'importlib_resources', 'jaraco', 'typing_extensions', 'tomli',
|
75 |
-
)
|
76 |
-
VendorImporter(__name__, names, 'setuptools._vendor').install()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_coco.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import json
|
3 |
-
import numpy as np
|
4 |
-
import os
|
5 |
-
import tempfile
|
6 |
-
import unittest
|
7 |
-
import pycocotools.mask as mask_util
|
8 |
-
|
9 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog
|
10 |
-
from detectron2.data.datasets.coco import convert_to_coco_dict, load_coco_json
|
11 |
-
from detectron2.structures import BoxMode
|
12 |
-
|
13 |
-
|
14 |
-
def make_mask():
|
15 |
-
"""
|
16 |
-
Makes a donut shaped binary mask.
|
17 |
-
"""
|
18 |
-
H = 100
|
19 |
-
W = 100
|
20 |
-
mask = np.zeros([H, W], dtype=np.uint8)
|
21 |
-
for x in range(W):
|
22 |
-
for y in range(H):
|
23 |
-
d = np.linalg.norm(np.array([W, H]) / 2 - np.array([x, y]))
|
24 |
-
if d > 10 and d < 20:
|
25 |
-
mask[y, x] = 1
|
26 |
-
return mask
|
27 |
-
|
28 |
-
|
29 |
-
def uncompressed_rle(mask):
|
30 |
-
l = mask.flatten(order="F").tolist()
|
31 |
-
counts = []
|
32 |
-
p = False
|
33 |
-
cnt = 0
|
34 |
-
for i in l:
|
35 |
-
if i == p:
|
36 |
-
cnt += 1
|
37 |
-
else:
|
38 |
-
counts.append(cnt)
|
39 |
-
p = i
|
40 |
-
cnt = 1
|
41 |
-
counts.append(cnt)
|
42 |
-
return {"counts": counts, "size": [mask.shape[0], mask.shape[1]]}
|
43 |
-
|
44 |
-
|
45 |
-
def make_dataset_dicts(mask, compressed: bool = True):
|
46 |
-
"""
|
47 |
-
Returns a list of dicts that represents a single COCO data point for
|
48 |
-
object detection. The single instance given by `mask` is represented by
|
49 |
-
RLE, either compressed or uncompressed.
|
50 |
-
"""
|
51 |
-
record = {}
|
52 |
-
record["file_name"] = "test"
|
53 |
-
record["image_id"] = 0
|
54 |
-
record["height"] = mask.shape[0]
|
55 |
-
record["width"] = mask.shape[1]
|
56 |
-
|
57 |
-
y, x = np.nonzero(mask)
|
58 |
-
if compressed:
|
59 |
-
segmentation = mask_util.encode(np.asarray(mask, order="F"))
|
60 |
-
else:
|
61 |
-
segmentation = uncompressed_rle(mask)
|
62 |
-
min_x = np.min(x)
|
63 |
-
max_x = np.max(x)
|
64 |
-
min_y = np.min(y)
|
65 |
-
max_y = np.max(y)
|
66 |
-
obj = {
|
67 |
-
"bbox": [min_x, min_y, max_x, max_y],
|
68 |
-
"bbox_mode": BoxMode.XYXY_ABS,
|
69 |
-
"category_id": 0,
|
70 |
-
"iscrowd": 0,
|
71 |
-
"segmentation": segmentation,
|
72 |
-
}
|
73 |
-
record["annotations"] = [obj]
|
74 |
-
return [record]
|
75 |
-
|
76 |
-
|
77 |
-
class TestRLEToJson(unittest.TestCase):
|
78 |
-
def test(self):
|
79 |
-
# Make a dummy dataset.
|
80 |
-
mask = make_mask()
|
81 |
-
DatasetCatalog.register("test_dataset", lambda: make_dataset_dicts(mask))
|
82 |
-
MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"])
|
83 |
-
|
84 |
-
# Dump to json.
|
85 |
-
json_dict = convert_to_coco_dict("test_dataset")
|
86 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
87 |
-
json_file_name = os.path.join(tmpdir, "test.json")
|
88 |
-
with open(json_file_name, "w") as f:
|
89 |
-
json.dump(json_dict, f)
|
90 |
-
# Load from json.
|
91 |
-
dicts = load_coco_json(json_file_name, "")
|
92 |
-
|
93 |
-
# Check the loaded mask matches the original.
|
94 |
-
anno = dicts[0]["annotations"][0]
|
95 |
-
loaded_mask = mask_util.decode(anno["segmentation"])
|
96 |
-
self.assertTrue(np.array_equal(loaded_mask, mask))
|
97 |
-
DatasetCatalog.pop("test_dataset")
|
98 |
-
MetadataCatalog.pop("test_dataset")
|
99 |
-
|
100 |
-
def test_uncompressed_RLE(self):
|
101 |
-
mask = make_mask()
|
102 |
-
rle = mask_util.encode(np.asarray(mask, order="F"))
|
103 |
-
uncompressed = uncompressed_rle(mask)
|
104 |
-
compressed = mask_util.frPyObjects(uncompressed, *rle["size"])
|
105 |
-
self.assertEqual(rle, compressed)
|
106 |
-
|
107 |
-
|
108 |
-
class TestConvertCOCO(unittest.TestCase):
|
109 |
-
@staticmethod
|
110 |
-
def generate_data():
|
111 |
-
record = {
|
112 |
-
"file_name": "test",
|
113 |
-
"image_id": 0,
|
114 |
-
"height": 100,
|
115 |
-
"width": 100,
|
116 |
-
"annotations": [
|
117 |
-
{
|
118 |
-
"bbox": [10, 10, 10, 10, 5],
|
119 |
-
"bbox_mode": BoxMode.XYWHA_ABS,
|
120 |
-
"category_id": 0,
|
121 |
-
"iscrowd": 0,
|
122 |
-
},
|
123 |
-
{
|
124 |
-
"bbox": [15, 15, 3, 3],
|
125 |
-
"bbox_mode": BoxMode.XYXY_ABS,
|
126 |
-
"category_id": 0,
|
127 |
-
"iscrowd": 0,
|
128 |
-
},
|
129 |
-
],
|
130 |
-
}
|
131 |
-
|
132 |
-
return [record]
|
133 |
-
|
134 |
-
def test_convert_to_coco(self):
|
135 |
-
DatasetCatalog.register("test_dataset", lambda: TestConvertCOCO.generate_data())
|
136 |
-
MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"])
|
137 |
-
convert_to_coco_dict("test_dataset")
|
138 |
-
DatasetCatalog.pop("test_dataset")
|
139 |
-
MetadataCatalog.pop("test_dataset")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bagus/speaker-verification-demo/README.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Speaker Verification Demo
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
---
|
10 |
-
|
11 |
-
# Configuration
|
12 |
-
|
13 |
-
title: string
|
14 |
-
Display title for the Space
|
15 |
-
|
16 |
-
emoji: string
|
17 |
-
Space emoji (emoji-only character allowed)
|
18 |
-
|
19 |
-
colorFrom: string
|
20 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
21 |
-
|
22 |
-
colorTo: string
|
23 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
24 |
-
|
25 |
-
sdk: string
|
26 |
-
Can be either gradio or streamlit
|
27 |
-
|
28 |
-
sdk_version : string
|
29 |
-
Only applicable for streamlit SDK.
|
30 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
31 |
-
|
32 |
-
|
33 |
-
app_file: string
|
34 |
-
Path to your main application file (which contains either gradio or streamlit Python code).
|
35 |
-
Path is relative to the root of the repository.
|
36 |
-
|
37 |
-
|
38 |
-
pinned: boolean Whether the Space stays on top of your list.
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BalaBhaskarudu/mygenAIChatbot/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MygenAIChatbot
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange/infer_pack/onnx_inference.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
import onnxruntime
|
2 |
-
import librosa
|
3 |
-
import numpy as np
|
4 |
-
import soundfile
|
5 |
-
|
6 |
-
|
7 |
-
class ContentVec:
|
8 |
-
def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
|
9 |
-
print("load model(s) from {}".format(vec_path))
|
10 |
-
if device == "cpu" or device is None:
|
11 |
-
providers = ["CPUExecutionProvider"]
|
12 |
-
elif device == "cuda":
|
13 |
-
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
14 |
-
else:
|
15 |
-
raise RuntimeError("Unsportted Device")
|
16 |
-
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
17 |
-
|
18 |
-
def __call__(self, wav):
|
19 |
-
return self.forward(wav)
|
20 |
-
|
21 |
-
def forward(self, wav):
|
22 |
-
feats = wav
|
23 |
-
if feats.ndim == 2: # double channels
|
24 |
-
feats = feats.mean(-1)
|
25 |
-
assert feats.ndim == 1, feats.ndim
|
26 |
-
feats = np.expand_dims(np.expand_dims(feats, 0), 0)
|
27 |
-
onnx_input = {self.model.get_inputs()[0].name: feats}
|
28 |
-
logits = self.model.run(None, onnx_input)[0]
|
29 |
-
return logits.transpose(0, 2, 1)
|
30 |
-
|
31 |
-
|
32 |
-
def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
|
33 |
-
if f0_predictor == "pm":
|
34 |
-
from infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
|
35 |
-
|
36 |
-
f0_predictor_object = PMF0Predictor(
|
37 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
38 |
-
)
|
39 |
-
elif f0_predictor == "harvest":
|
40 |
-
from infer_pack.modules.F0Predictor.HarvestF0Predictor import HarvestF0Predictor
|
41 |
-
|
42 |
-
f0_predictor_object = HarvestF0Predictor(
|
43 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
44 |
-
)
|
45 |
-
elif f0_predictor == "dio":
|
46 |
-
from infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
|
47 |
-
|
48 |
-
f0_predictor_object = DioF0Predictor(
|
49 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
50 |
-
)
|
51 |
-
else:
|
52 |
-
raise Exception("Unknown f0 predictor")
|
53 |
-
return f0_predictor_object
|
54 |
-
|
55 |
-
|
56 |
-
class OnnxRVC:
|
57 |
-
def __init__(
|
58 |
-
self,
|
59 |
-
model_path,
|
60 |
-
sr=40000,
|
61 |
-
hop_size=512,
|
62 |
-
vec_path="vec-768-layer-12",
|
63 |
-
device="cpu",
|
64 |
-
):
|
65 |
-
vec_path = f"pretrained/{vec_path}.onnx"
|
66 |
-
self.vec_model = ContentVec(vec_path, device)
|
67 |
-
if device == "cpu" or device is None:
|
68 |
-
providers = ["CPUExecutionProvider"]
|
69 |
-
elif device == "cuda":
|
70 |
-
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
71 |
-
else:
|
72 |
-
raise RuntimeError("Unsportted Device")
|
73 |
-
self.model = onnxruntime.InferenceSession(model_path, providers=providers)
|
74 |
-
self.sampling_rate = sr
|
75 |
-
self.hop_size = hop_size
|
76 |
-
|
77 |
-
def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
|
78 |
-
onnx_input = {
|
79 |
-
self.model.get_inputs()[0].name: hubert,
|
80 |
-
self.model.get_inputs()[1].name: hubert_length,
|
81 |
-
self.model.get_inputs()[2].name: pitch,
|
82 |
-
self.model.get_inputs()[3].name: pitchf,
|
83 |
-
self.model.get_inputs()[4].name: ds,
|
84 |
-
self.model.get_inputs()[5].name: rnd,
|
85 |
-
}
|
86 |
-
return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
|
87 |
-
|
88 |
-
def inference(
|
89 |
-
self,
|
90 |
-
raw_path,
|
91 |
-
sid,
|
92 |
-
f0_method="dio",
|
93 |
-
f0_up_key=0,
|
94 |
-
pad_time=0.5,
|
95 |
-
cr_threshold=0.02,
|
96 |
-
):
|
97 |
-
f0_min = 50
|
98 |
-
f0_max = 1100
|
99 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
100 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
101 |
-
f0_predictor = get_f0_predictor(
|
102 |
-
f0_method,
|
103 |
-
hop_length=self.hop_size,
|
104 |
-
sampling_rate=self.sampling_rate,
|
105 |
-
threshold=cr_threshold,
|
106 |
-
)
|
107 |
-
wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
|
108 |
-
org_length = len(wav)
|
109 |
-
if org_length / sr > 50.0:
|
110 |
-
raise RuntimeError("Reached Max Length")
|
111 |
-
|
112 |
-
wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
|
113 |
-
wav16k = wav16k
|
114 |
-
|
115 |
-
hubert = self.vec_model(wav16k)
|
116 |
-
hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
|
117 |
-
hubert_length = hubert.shape[1]
|
118 |
-
|
119 |
-
pitchf = f0_predictor.compute_f0(wav, hubert_length)
|
120 |
-
pitchf = pitchf * 2 ** (f0_up_key / 12)
|
121 |
-
pitch = pitchf.copy()
|
122 |
-
f0_mel = 1127 * np.log(1 + pitch / 700)
|
123 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
124 |
-
f0_mel_max - f0_mel_min
|
125 |
-
) + 1
|
126 |
-
f0_mel[f0_mel <= 1] = 1
|
127 |
-
f0_mel[f0_mel > 255] = 255
|
128 |
-
pitch = np.rint(f0_mel).astype(np.int64)
|
129 |
-
|
130 |
-
pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
|
131 |
-
pitch = pitch.reshape(1, len(pitch))
|
132 |
-
ds = np.array([sid]).astype(np.int64)
|
133 |
-
|
134 |
-
rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
|
135 |
-
hubert_length = np.array([hubert_length]).astype(np.int64)
|
136 |
-
|
137 |
-
out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
|
138 |
-
out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
|
139 |
-
return out_wav[0:org_length]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Bubble Sort C.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Clasificación de burbujas en C++: Guía para principiantes</h1>
|
3 |
-
<p>Si usted está aprendiendo acerca de los algoritmos de clasificación, es posible que haya llegado a través de la clasificación de burbujas. La clasificación de burbujas es uno de los algoritmos de clasificación más simples e intuitivos que funciona intercambiando elementos adyacentes repetidamente si están en el orden equivocado. En este artículo, aprenderás qué es el ordenamiento de burbujas, cómo funciona, cuál es su complejidad temporal, cuáles son sus ventajas y desventajas y cómo implementarlo en C++. </p>
|
4 |
-
<h2>bubble sort c++</h2><br /><p><b><b>Download Zip</b> ->>->>->> <a href="https://bltlly.com/2v6JjI">https://bltlly.com/2v6JjI</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es la clasificación de burbujas? </h2>
|
6 |
-
<p>La clasificación de burbujas es un algoritmo de clasificación que compara cada par de elementos adyacentes en una matriz y los intercambia si están en el orden equivocado. El algoritmo repite este proceso hasta que la matriz se ordena. La clasificación de burbuja de nombre viene del hecho de que los elementos más pequeños o más grandes "burbuja" al final de la matriz después de cada iteración. </p>
|
7 |
-
<h3>¿Cómo funciona la clasificación de burbujas? </h3>
|
8 |
-
<p>Digamos que queremos ordenar una matriz de enteros en orden ascendente usando la clasificación de burbujas. Estos son los pasos que debemos seguir:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Comience desde el primer elemento del array y compárelo con el segundo elemento. Si el primer elemento es mayor que el segundo elemento, cámbielos. </li>
|
11 |
-
<li> Mover al siguiente par de elementos y compararlos. Si están en el orden equivocado, intercambiarlos. </li>
|
12 |
-
<li>Continúe este proceso hasta que lleguemos al final del array. En este punto, el elemento más grande estará en la última posición del array. </li>
|
13 |
-
<li>Repita los pasos del 1 al 3 para los elementos no clasificados restantes, excluyendo el último elemento, que ya está ordenado. </li>
|
14 |
-
<li> Detener cuando no hay más swaps o cuando la matriz está completamente ordenada. </li>
|
15 |
-
</ol>
|
16 |
-
<h3>¿Cuál es la complejidad temporal de la clasificación de burbujas? </h3>
|
17 |
-
<p>La complejidad de tiempo de un algoritmo mide qué tan rápido se ejecuta en función del tamaño de la entrada. Para la clasificación de burbujas, podemos analizar cuántas comparaciones e intercambios realiza en el peor de los casos, el caso promedio y los mejores escenarios. </p>
|
18 |
-
<p></p>
|
19 |
-
<ul>
|
20 |
-
|
21 |
-
<li>El escenario de caso promedio para el ordenamiento de burbujas ocurre cuando el arreglo se ordena aleatoriamente. En este caso, podemos asumir que la mitad de las comparaciones resultan en swaps y la mitad no. Por lo tanto, la complejidad promedio de tiempo de caso de la clasificación de burbujas es también O(n). </li>
|
22 |
-
<li>El mejor escenario para la clasificación de burbujas ocurre cuando la matriz ya está ordenada. En este caso, solo necesitamos realizar comparaciones n-1 y sin swaps para cada iteración. Por lo tanto, el mejor caso de complejidad de tiempo de clasificación de burbujas es O(n). </li>
|
23 |
-
</ul>
|
24 |
-
<h3>¿Cuáles son las ventajas y desventajas de la clasificación de burbujas? </h3>
|
25 |
-
<p>La clasificación de burbujas tiene algunas ventajas y desventajas que la hacen adecuada o inadecuada para ciertas situaciones. Aquí están algunas de ellas:</p>
|
26 |
-
<ul>
|
27 |
-
<li>Las ventajas de la clasificación de burbujas son: <ul>
|
28 |
-
<li> Es fácil de entender e implementar. </li>
|
29 |
-
<li>No requiere espacio extra para almacenar valores temporales. </li>
|
30 |
-
<li> Puede detectar si la matriz ya está ordenada en una sola pasada. </li>
|
31 |
-
</ul>
|
32 |
-
</li>
|
33 |
-
<li>Las desventajas de la clasificación de burbujas son: <ul>
|
34 |
-
<li> Es muy lento e ineficiente para matrices grandes. </li>
|
35 |
-
<li> Realiza muchas comparaciones y cambios innecesarios incluso si la matriz está casi ordenada. </li>
|
36 |
-
<li>No es estable, lo que significa que puede cambiar el orden relativo de los elementos iguales. </li>
|
37 |
-
</ul>
|
38 |
-
</li>
|
39 |
-
</ul>
|
40 |
-
<h2>Cómo implementar la clasificación de burbujas en C++ <h2>Cómo implementar la clasificación de burbujas en C++? </h2>
|
41 |
-
<p>Ahora que ya sabes qué es el tipo de burbuja y cómo funciona, veamos cómo implementarlo en C++. Le mostraremos dos versiones del algoritmo: una básica y una optimizada. </p>
|
42 |
-
<h3>Implementación básica</h3>
|
43 |
-
|
44 |
-
<h4>Ejemplo de código</h4>
|
45 |
-
<pre><código>
|
46 |
-
#include <iostream>
|
47 |
-
usando namespace std; // Function to print an array void printArray(int arr[], int size) for (int i = 0; i < size; i++) cout << arr[i] <" "; cout << endl; // Function to implement bubble sort void bubbleSort(int arr[], int size) bool swapped; // Para realizar un seguimiento de swaps para (int i = 0; i < size - 1; i+++) // Bucle externo para n-1 iteraciones swapped = false; // Asumir que no hay swaps al principio para (int j = 0; j < size - i - 1; j++) // Bucle interno para comparar elementos adyacentes si (arr[j] > arr[j + 1]) // Si el elemento actual es mayor que el siguiente elemento swap(arr[j], arr[j + 1]); // Intercambiarlos usando una variable temporal swapped = true; // Set ped swapto true if (!swapped) // If no swaps occurred in this iteration break; // Break out of the loop // Código del controlador int main() int arr[] = 64, 25, 12, 22, 11, 90; // Tamaño del array int = sizeof(arr) / sizeof(arr[0]; // Tamaño del array << "Array sin clasificar: " << endl; printArray(arr, size); // Print the unsorted array bubbleSort(arr, size); // Call the bubble sort function cout << "Array ordenado: " << endl; printArray(arr, size); // Print the sorted array return 0 </code><pre>
|
48 |
-
<h4>Explicación de salida</h4>
|
49 |
-
<p>El resultado del ejemplo de código es:</p>
|
50 |
-
<pre><código>
|
51 |
-
Matriz no clasificada: 64 34 25 12 22 11 90 Matriz ordenada: 11 12 22 25 34 64 90 </code></pre>
|
52 |
-
<p>El ejemplo de código muestra cómo el algoritmo de ordenación de burbujas ordena la matriz de muestra en orden ascendente. Imprime los arrays sin clasificar y ordenados para la comparación. Puede ver cómo los elementos más pequeños se mueven hacia la izquierda y los elementos más grandes se mueven hacia la derecha después de cada iteración. </p>
|
53 |
-
<h3>Implementación optimizada</h3>
|
54 |
-
|
55 |
-
<h4>Ejemplo de código</h4>
|
56 |
-
<pre><código>
|
57 |
-
#include <iostream>
|
58 |
-
usando namespace std; // Función para imprimir una matriz void printArray(int arr[], int size) for (int i = 0; i < size; i++) cout << arr[i] <" "; cout << endl; // Función para comprobar si una matriz está ordenada booisSorted(int arr[], size int) for (int i = 0; i < size - 1; i++) if (arr[i] > arr[i + 1]) // Si algún elemento es mayor que su siguiente elemento devuelve false; // Devuelve false return true; // Devuelve true si no se encuentra tal elemento // Función para implementar la clasificación optimizada de burbujas void bubbleSort(int arr[], int size) int lastSwapIndex; // Para almacenar el último índice donde se produjo un intercambio para (int i = size -1 ; i >0 ; i--) // Bucle externo para iteraciones n-1, comenzando desde el final lastSwapIndex = -1; // Asumir que no hay swaps al principio para for for (int j = 0; j < i; j++) // Bucle interno para comparar elementos adyacentes hasta el último índice de intercambio si (arr[j] > arr[j + 1]) // Si el elemento actual es mayor que el siguiente elemento swap(arr[j], arr[j + 1]); // Intercambiarlos usando una variable temporal lastSwapIndex = j; // Actualizar el último índice de intercambio si (lastSwapIndex == -1) // Si no se produjeron swaps en este salto de iteración; // Romper el bucle i = lastSwapIndex; // Establecer el límite del bucle exterior en el último índice de intercambio // Código del controlador int main() int arr[] = 64, 34, 25, 12, 22, 11, 90; // Sample array int size = sizeof(arr) / sizeof(arr[0]); // Size of the array cout << "Unsorted array: " << endl; printArray(arr, size); // Imprime el array sin clasificar si (!isSorted(tamaño)) // Compruebe si la matriz ya está ordenada bubbleSort(arr, size); // Llame a la función de clasificación de burbujas optimizada cout << "Array ordenado: " << endl; printArray(arr, size); // Imprimir la matriz ordenada return 0; </code></pre>
|
59 |
-
<h4>Explicación de salida</h4>
|
60 |
-
<p>El resultado del ejemplo de código es:</p>
|
61 |
-
<pre><código>
|
62 |
-
|
63 |
-
<p>El ejemplo de código muestra cómo el algoritmo de ordenación de burbujas optimizado ordena la matriz de muestra en orden ascendente. Imprime los arrays sin clasificar y ordenados para la comparación. Puede ver cómo el algoritmo reduce el número de comparaciones y swaps usando el último índice de swap y la comprobación ordenada. </p>
|
64 |
-
<h2>Conclusión</h2>
|
65 |
-
<p>La clasificación de burbujas es un algoritmo de clasificación simple y fácil de entender que funciona intercambiando elementos adyacentes repetidamente si están en el orden equivocado. Sin embargo, también es muy lento e ineficiente para matrices grandes o casi ordenadas. Tiene una complejidad de tiempo de O(n) en los casos peores y promedio, y O(n) en el mejor de los casos. Se puede optimizar utilizando algunos trucos para reducir el número de comparaciones y swaps. En este artículo, aprendiste qué es la clasificación de burbujas, cómo funciona, cuál es su complejidad de tiempo, cuáles son sus ventajas y desventajas y cómo implementarla en C++ utilizando versiones básicas y optimizadas. </p>
|
66 |
-
<h2>Preguntas frecuentes</h2>
|
67 |
-
<ol>
|
68 |
-
<li>¿Qué es un algoritmo de clasificación? </li>
|
69 |
-
<p>Un algoritmo de ordenación es un método para organizar una colección de elementos en un orden específico, como ascendente o descendente. Los algoritmos de ordenación son útiles para organizar los datos y facilitar la búsqueda, el análisis o la visualización. </p>
|
70 |
-
<li>¿Cuáles son algunos otros algoritmos de clasificación además de la clasificación de burbujas? </li>
|
71 |
-
<p>Algunos otros algoritmos de ordenación comunes son selección, inserción, combinación de clasificación, clasificación rápida, montón de clasificación, radix clasificación, etc. Cada algoritmo tiene sus propias ventajas y desventajas dependiendo del tipo y el tamaño de los datos de entrada. </p>
|
72 |
-
<li>¿Cómo puedo probar el rendimiento de la clasificación de burbujas? </li>
|
73 |
-
<p>Puede probar el rendimiento de la clasificación de burbujas midiendo cuánto tiempo se tarda en ordenar diferentes matrices con diferentes tamaños y pedidos. Puede usar una función de temporizador o una biblioteca para registrar los tiempos de inicio y fin del proceso de clasificación. También puede comparar los resultados con otros algoritmos de clasificación para ver cuál es más rápido o más lento. </p>
|
74 |
-
|
75 |
-
<p>Puede modificar la clasificación de burbujas para ordenar en orden descendente cambiando la condición de comparación en el bucle interno. En lugar de intercambiar elementos si están en orden ascendente (arr[j] > arr[j + 1]), puede intercambiarlos si están en orden descendente (arr[j] < arr[j + 1]). Esto revertirá el orden de los elementos después de cada iteración. </p>
|
76 |
-
<li>¿Cómo puedo hacer que la clasificación de burbujas sea estable? </li>
|
77 |
-
<p>Puede hacer que la clasificación de burbujas sea estable preservando el orden relativo de los elementos iguales. Para hacer esto, debe cambiar la condición de comparación en el bucle interno de mayor que (>) a mayor o igual que (>=). Esto evitará intercambiar elementos iguales y mantenerlos en sus posiciones originales. </p>
|
78 |
-
</ol></p> 64aa2da5cf<br />
|
79 |
-
<br />
|
80 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Camioneros De Europa 3 Apk Obb.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Camioneros de Europa 3 APK OBB: Una guía para usuarios de Android</h1>
|
3 |
-
<p>Si eres un fan de los juegos de simuladores de camiones, es posible que hayas oído hablar de Truckers of Europe 3, uno de los mejores juegos de camiones para Android. Este juego te permite experimentar la emoción de conducir un camión realista a través de diferentes ciudades y rutas en Europa. Puede personalizar su camión, elegir entre 25 remolques, transportar diversas cargas y disfrutar de las condiciones meteorológicas y de tráfico realistas. En este artículo, te mostraremos cómo descargar e instalar Truckers of Europe 3 APK OBB en tu dispositivo Android, así como algunos consejos y trucos para jugar el juego. </p>
|
4 |
-
<h2>Características de los camioneros de Europa 3</h2>
|
5 |
-
<p>Truckers of Europe 3 es un juego de conducción de camiones que cuenta con un montón de camiones europeos con un montón de configuraciones de chasis, personalizaciones y cosméticos. Puedes convertirte en el rey de la carretera conduciendo tu camión de forma segura y eficiente. Estas son algunas de las características que hacen que este juego se destaque:</p>
|
6 |
-
<h2>descargar camioneros de europa 3 apk obb</h2><br /><p><b><b>Download File</b> • <a href="https://bltlly.com/2v6KMU">https://bltlly.com/2v6KMU</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li><b>Física realista del camión</b>: El juego tiene un sistema de física realista del camión que simula el peso, la velocidad, el frenado, la dirección, la suspensión y el daño de su camión. Puedes sentir cada golpe, giro y colisión mientras conduces. </li>
|
9 |
-
<li><b>Opciones de personalización</b>: Puede personalizar su camión eligiendo entre diferentes colores, accesorios, calcomanías, luces, bocinas, tubos de escape y más. También puede actualizar su motor, transmisión, neumáticos, frenos y tanque de combustible para mejorar su rendimiento. </li>
|
10 |
-
<li><b>25 remolques y muchas opciones de carga</b>: Puede elegir entre 25 remolques diferentes que tienen diferentes pesos, tamaños, formas y cargas. Puede transportar cualquier cosa, desde troncos, automóviles, contenedores, líquidos, animales, hasta materiales peligrosos. Usted tiene que tener cuidado de no dañar o perder su carga en el camino. </li>
|
11 |
-
|
12 |
-
<li><b>Diferentes controles y modos de transmisión</b>: Puede elegir entre diferentes opciones de control como deslizadores, volante, botones o inclinación. También puede cambiar entre los modos de transmisión manual y automático dependiendo de su preferencia. </li>
|
13 |
-
<li><b>Tráfico en vivo y sonidos realistas del motor</b>: El juego tiene un sistema de tráfico en vivo que incluye automóviles, autobuses, camiones, motocicletas, peatones, semáforos, señales y policía. Tienes que seguir las reglas de tráfico y evitar accidentes. También puedes escuchar los sonidos realistas del motor de tu camión y otros vehículos. </li>
|
14 |
-
</ul>
|
15 |
-
<h2>Cómo descargar e instalar camioneros de Europa 3 APK OBB en Android</h2>
|
16 |
-
<p>Para jugar Camioneros de Europa 3 en su dispositivo Android, es necesario descargar dos archivos: el archivo APK y el archivo OBB. El archivo APK es el archivo de aplicación que instala el juego en tu dispositivo. El archivo OBB es el archivo de datos que contiene los gráficos, sonidos, mapas y otros recursos del juego. Estos son los pasos para descargar e instalar Camioneros de Europa 3 APK OBB en su dispositivo Android:</p>
|
17 |
-
<ol>
|
18 |
-
<li><b>Permitir fuentes desconocidas en la configuración del dispositivo</b>: Para instalar el archivo APK, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Esto le permitirá instalar aplicaciones que no son de Google Play Store.</li>
|
19 |
-
<li><b>Descargue los archivos APK y OBB de una fuente de confianza</b>: Puede descargar los archivos APK y OBB de Truckers of Europe 3 de una fuente de confianza como [APKPure] o [APKCombo]. Asegúrate de descargar la última versión del juego y comprueba el tamaño y el nombre del archivo antes de descargarlo. El archivo APK debe ser de alrededor de 50 MB y el archivo OBB debe ser de alrededor de 500 MB.</li>
|
20 |
-
|
21 |
-
<li><b>Instalar el archivo APK y lanzar el juego</b>: Después de copiar el archivo OBB, puede instalar el archivo APK tocando en él y siguiendo las instrucciones. Una vez completada la instalación, puede iniciar el juego tocando en su icono en la pantalla de inicio o en el cajón de la aplicación. Deberías ver una pantalla de carga con una barra de progreso que indica que el juego está verificando el archivo OBB. ¡Espera unos segundos y disfruta del juego! </li>
|
22 |
-
</ol>
|
23 |
-
<h2>Consejos y trucos para jugar Camioneros de Europa 3</h2>
|
24 |
-
<p>Truckers of Europe 3 es un juego divertido y desafiante que requiere habilidad, paciencia y estrategia. Aquí hay algunos consejos y trucos que pueden ayudarle a convertirse en un mejor conductor de camiones y ganar más dinero en el juego:</p>
|
25 |
-
<ul>
|
26 |
-
<li><b>Elija el camión y remolque adecuado para su carga y destino</b>: El juego ofrece una variedad de camiones y remolques que tienen diferentes especificaciones, precios y costos de mantenimiento. Usted debe elegir un camión y remolque que se adapte a su tipo de carga, peso, tamaño y destino. Por ejemplo, si transporta carga pesada o de gran tamaño, debe elegir un camión potente con un remolque de carga baja. Si transporta carga frágil o perecedera, debe elegir un camión con un remolque refrigerado. </li>
|
27 |
-
<li><b>Sigue las reglas de tráfico y evita accidentes</b>: El juego tiene un sistema de tráfico realista que incluye semáforos, señales, límites de velocidad, policía y otros vehículos. Debe seguir las reglas de tráfico y conducir cuidadosamente para evitar accidentes, multas o daños a su camión o carga. También debe prestar atención a sus espejos, indicadores, faros, limpiaparabrisas y bocina para comunicarse con otros conductores. </li>
|
28 |
-
|
29 |
-
<li><b>Actualice su camión y compre nuevos accesorios</b>: El juego le permite actualizar el motor, la transmisión, los neumáticos, los frenos y el tanque de combustible de su camión para mejorar su rendimiento, durabilidad y eficiencia de combustible. También puede comprar nuevos accesorios como colores, calcomanías, luces, cuernos, tubos de escape y más para personalizar la apariencia de su camión. Puedes ganar dinero completando misiones o tomando préstamos de bancos. </li>
|
30 |
-
<li><b>Explora diferentes ciudades y rutas en Europa</b>: El juego tiene un gran mapa que cubre muchas ciudades y rutas en Europa. Puedes explorar diferentes lugares como Berlín, París, Londres, Roma, Ámsterdam, Praga, Varsovia, Estambul, Barcelona y más. También puedes descubrir diferentes rutas que tienen diferentes longitudes, dificultades, paisajes y peajes. Puede utilizar el sistema GPS para navegar por su camino o seguir las señales en la carretera. </li>
|
31 |
-
</ul>
|
32 |
-
<h2>Conclusión</h2>
|
33 |
-
<p>Camioneros de Europa 3 es un gran juego para los entusiastas de camiones y aficionados al simulador. Ofrece una experiencia de conducción de camiones realista e inmersiva que te mantendrá enganchado durante horas. Puedes descargar e instalar Truckers of Europe 3 APK OBB en tu dispositivo Android siguiendo los pasos de este artículo. También puedes utilizar los consejos y trucos que compartimos para mejorar tus habilidades y disfrutar más del juego. Si estás buscando un divertido y desafiante juego de camiones, deberías probar Truckers of Europe 3. ¡No te arrepentirás! </p>
|
34 |
-
<h2>Preguntas frecuentes</h2>
|
35 |
-
<p>Aquí hay algunas preguntas frecuentes sobre los camioneros de Europa 3:</p>
|
36 |
-
<ol>
|
37 |
-
<li><b>Es Truckers of Europe 3 libre para jugar? </b>: Sí, Truckers of Europe 3 es libre para jugar. Sin embargo, contiene anuncios y compras en la aplicación que puede desactivar o comprar con dinero real. </li>
|
38 |
-
|
39 |
-
<li><b>¿Es Truckers of Europe 3 compatible con mi dispositivo? </b>: Truckers of Europe 3 es compatible con la mayoría de dispositivos Android que tienen Android 4.4 o superior y al menos 1 GB de RAM. Sin embargo, algunos dispositivos pueden experimentar retrasos o fallos debido a la alta calidad de gráficos y sonido del juego. </li>
|
40 |
-
<li><b> ¿Cómo puedo ponerme en contacto con los desarrolladores de Truckers of Europe 3?</b>: Puede ponerse en contacto con los desarrolladores de Truckers of Europe 3 enviando un correo electrónico a [[email protected]] o visitando su [página de Facebook]. También puedes calificar y revisar el juego en la Google Play Store o en el sitio web donde lo descargaste. </li>
|
41 |
-
<li><b> ¿Puedo jugar Camioneros de Europa 3 en PC u otras plataformas? </b>: Camioneros de Europa 3 actualmente solo está disponible para dispositivos Android. Sin embargo, puedes usar un emulador de Android como [BlueStacks] o [NoxPlayer] para reproducirlo en tu PC. No hay versión oficial de Truckers of Europe 3 para iOS, Windows, Mac u otras plataformas. </li>
|
42 |
-
</ol></p>
|
43 |
-
<p></p> 64aa2da5cf<br />
|
44 |
-
<br />
|
45 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/functions.py
DELETED
@@ -1,362 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import json
|
3 |
-
|
4 |
-
from jmespath import exceptions
|
5 |
-
from jmespath.compat import string_type as STRING_TYPE
|
6 |
-
from jmespath.compat import get_methods
|
7 |
-
|
8 |
-
|
9 |
-
# python types -> jmespath types
|
10 |
-
TYPES_MAP = {
|
11 |
-
'bool': 'boolean',
|
12 |
-
'list': 'array',
|
13 |
-
'dict': 'object',
|
14 |
-
'NoneType': 'null',
|
15 |
-
'unicode': 'string',
|
16 |
-
'str': 'string',
|
17 |
-
'float': 'number',
|
18 |
-
'int': 'number',
|
19 |
-
'long': 'number',
|
20 |
-
'OrderedDict': 'object',
|
21 |
-
'_Projection': 'array',
|
22 |
-
'_Expression': 'expref',
|
23 |
-
}
|
24 |
-
|
25 |
-
|
26 |
-
# jmespath types -> python types
|
27 |
-
REVERSE_TYPES_MAP = {
|
28 |
-
'boolean': ('bool',),
|
29 |
-
'array': ('list', '_Projection'),
|
30 |
-
'object': ('dict', 'OrderedDict',),
|
31 |
-
'null': ('NoneType',),
|
32 |
-
'string': ('unicode', 'str'),
|
33 |
-
'number': ('float', 'int', 'long'),
|
34 |
-
'expref': ('_Expression',),
|
35 |
-
}
|
36 |
-
|
37 |
-
|
38 |
-
def signature(*arguments):
|
39 |
-
def _record_signature(func):
|
40 |
-
func.signature = arguments
|
41 |
-
return func
|
42 |
-
return _record_signature
|
43 |
-
|
44 |
-
|
45 |
-
class FunctionRegistry(type):
|
46 |
-
def __init__(cls, name, bases, attrs):
|
47 |
-
cls._populate_function_table()
|
48 |
-
super(FunctionRegistry, cls).__init__(name, bases, attrs)
|
49 |
-
|
50 |
-
def _populate_function_table(cls):
|
51 |
-
function_table = {}
|
52 |
-
# Any method with a @signature decorator that also
|
53 |
-
# starts with "_func_" is registered as a function.
|
54 |
-
# _func_max_by -> max_by function.
|
55 |
-
for name, method in get_methods(cls):
|
56 |
-
if not name.startswith('_func_'):
|
57 |
-
continue
|
58 |
-
signature = getattr(method, 'signature', None)
|
59 |
-
if signature is not None:
|
60 |
-
function_table[name[6:]] = {
|
61 |
-
'function': method,
|
62 |
-
'signature': signature,
|
63 |
-
}
|
64 |
-
cls.FUNCTION_TABLE = function_table
|
65 |
-
|
66 |
-
|
67 |
-
class Functions(metaclass=FunctionRegistry):
|
68 |
-
|
69 |
-
FUNCTION_TABLE = {
|
70 |
-
}
|
71 |
-
|
72 |
-
def call_function(self, function_name, resolved_args):
|
73 |
-
try:
|
74 |
-
spec = self.FUNCTION_TABLE[function_name]
|
75 |
-
except KeyError:
|
76 |
-
raise exceptions.UnknownFunctionError(
|
77 |
-
"Unknown function: %s()" % function_name)
|
78 |
-
function = spec['function']
|
79 |
-
signature = spec['signature']
|
80 |
-
self._validate_arguments(resolved_args, signature, function_name)
|
81 |
-
return function(self, *resolved_args)
|
82 |
-
|
83 |
-
def _validate_arguments(self, args, signature, function_name):
|
84 |
-
if signature and signature[-1].get('variadic'):
|
85 |
-
if len(args) < len(signature):
|
86 |
-
raise exceptions.VariadictArityError(
|
87 |
-
len(signature), len(args), function_name)
|
88 |
-
elif len(args) != len(signature):
|
89 |
-
raise exceptions.ArityError(
|
90 |
-
len(signature), len(args), function_name)
|
91 |
-
return self._type_check(args, signature, function_name)
|
92 |
-
|
93 |
-
def _type_check(self, actual, signature, function_name):
|
94 |
-
for i in range(len(signature)):
|
95 |
-
allowed_types = signature[i]['types']
|
96 |
-
if allowed_types:
|
97 |
-
self._type_check_single(actual[i], allowed_types,
|
98 |
-
function_name)
|
99 |
-
|
100 |
-
def _type_check_single(self, current, types, function_name):
|
101 |
-
# Type checking involves checking the top level type,
|
102 |
-
# and in the case of arrays, potentially checking the types
|
103 |
-
# of each element.
|
104 |
-
allowed_types, allowed_subtypes = self._get_allowed_pytypes(types)
|
105 |
-
# We're not using isinstance() on purpose.
|
106 |
-
# The type model for jmespath does not map
|
107 |
-
# 1-1 with python types (booleans are considered
|
108 |
-
# integers in python for example).
|
109 |
-
actual_typename = type(current).__name__
|
110 |
-
if actual_typename not in allowed_types:
|
111 |
-
raise exceptions.JMESPathTypeError(
|
112 |
-
function_name, current,
|
113 |
-
self._convert_to_jmespath_type(actual_typename), types)
|
114 |
-
# If we're dealing with a list type, we can have
|
115 |
-
# additional restrictions on the type of the list
|
116 |
-
# elements (for example a function can require a
|
117 |
-
# list of numbers or a list of strings).
|
118 |
-
# Arrays are the only types that can have subtypes.
|
119 |
-
if allowed_subtypes:
|
120 |
-
self._subtype_check(current, allowed_subtypes,
|
121 |
-
types, function_name)
|
122 |
-
|
123 |
-
def _get_allowed_pytypes(self, types):
|
124 |
-
allowed_types = []
|
125 |
-
allowed_subtypes = []
|
126 |
-
for t in types:
|
127 |
-
type_ = t.split('-', 1)
|
128 |
-
if len(type_) == 2:
|
129 |
-
type_, subtype = type_
|
130 |
-
allowed_subtypes.append(REVERSE_TYPES_MAP[subtype])
|
131 |
-
else:
|
132 |
-
type_ = type_[0]
|
133 |
-
allowed_types.extend(REVERSE_TYPES_MAP[type_])
|
134 |
-
return allowed_types, allowed_subtypes
|
135 |
-
|
136 |
-
def _subtype_check(self, current, allowed_subtypes, types, function_name):
|
137 |
-
if len(allowed_subtypes) == 1:
|
138 |
-
# The easy case, we know up front what type
|
139 |
-
# we need to validate.
|
140 |
-
allowed_subtypes = allowed_subtypes[0]
|
141 |
-
for element in current:
|
142 |
-
actual_typename = type(element).__name__
|
143 |
-
if actual_typename not in allowed_subtypes:
|
144 |
-
raise exceptions.JMESPathTypeError(
|
145 |
-
function_name, element, actual_typename, types)
|
146 |
-
elif len(allowed_subtypes) > 1 and current:
|
147 |
-
# Dynamic type validation. Based on the first
|
148 |
-
# type we see, we validate that the remaining types
|
149 |
-
# match.
|
150 |
-
first = type(current[0]).__name__
|
151 |
-
for subtypes in allowed_subtypes:
|
152 |
-
if first in subtypes:
|
153 |
-
allowed = subtypes
|
154 |
-
break
|
155 |
-
else:
|
156 |
-
raise exceptions.JMESPathTypeError(
|
157 |
-
function_name, current[0], first, types)
|
158 |
-
for element in current:
|
159 |
-
actual_typename = type(element).__name__
|
160 |
-
if actual_typename not in allowed:
|
161 |
-
raise exceptions.JMESPathTypeError(
|
162 |
-
function_name, element, actual_typename, types)
|
163 |
-
|
164 |
-
@signature({'types': ['number']})
|
165 |
-
def _func_abs(self, arg):
|
166 |
-
return abs(arg)
|
167 |
-
|
168 |
-
@signature({'types': ['array-number']})
|
169 |
-
def _func_avg(self, arg):
|
170 |
-
if arg:
|
171 |
-
return sum(arg) / float(len(arg))
|
172 |
-
else:
|
173 |
-
return None
|
174 |
-
|
175 |
-
@signature({'types': [], 'variadic': True})
|
176 |
-
def _func_not_null(self, *arguments):
|
177 |
-
for argument in arguments:
|
178 |
-
if argument is not None:
|
179 |
-
return argument
|
180 |
-
|
181 |
-
@signature({'types': []})
|
182 |
-
def _func_to_array(self, arg):
|
183 |
-
if isinstance(arg, list):
|
184 |
-
return arg
|
185 |
-
else:
|
186 |
-
return [arg]
|
187 |
-
|
188 |
-
@signature({'types': []})
|
189 |
-
def _func_to_string(self, arg):
|
190 |
-
if isinstance(arg, STRING_TYPE):
|
191 |
-
return arg
|
192 |
-
else:
|
193 |
-
return json.dumps(arg, separators=(',', ':'),
|
194 |
-
default=str)
|
195 |
-
|
196 |
-
@signature({'types': []})
|
197 |
-
def _func_to_number(self, arg):
|
198 |
-
if isinstance(arg, (list, dict, bool)):
|
199 |
-
return None
|
200 |
-
elif arg is None:
|
201 |
-
return None
|
202 |
-
elif isinstance(arg, (int, float)):
|
203 |
-
return arg
|
204 |
-
else:
|
205 |
-
try:
|
206 |
-
return int(arg)
|
207 |
-
except ValueError:
|
208 |
-
try:
|
209 |
-
return float(arg)
|
210 |
-
except ValueError:
|
211 |
-
return None
|
212 |
-
|
213 |
-
@signature({'types': ['array', 'string']}, {'types': []})
|
214 |
-
def _func_contains(self, subject, search):
|
215 |
-
return search in subject
|
216 |
-
|
217 |
-
@signature({'types': ['string', 'array', 'object']})
|
218 |
-
def _func_length(self, arg):
|
219 |
-
return len(arg)
|
220 |
-
|
221 |
-
@signature({'types': ['string']}, {'types': ['string']})
|
222 |
-
def _func_ends_with(self, search, suffix):
|
223 |
-
return search.endswith(suffix)
|
224 |
-
|
225 |
-
@signature({'types': ['string']}, {'types': ['string']})
|
226 |
-
def _func_starts_with(self, search, suffix):
|
227 |
-
return search.startswith(suffix)
|
228 |
-
|
229 |
-
@signature({'types': ['array', 'string']})
|
230 |
-
def _func_reverse(self, arg):
|
231 |
-
if isinstance(arg, STRING_TYPE):
|
232 |
-
return arg[::-1]
|
233 |
-
else:
|
234 |
-
return list(reversed(arg))
|
235 |
-
|
236 |
-
@signature({"types": ['number']})
|
237 |
-
def _func_ceil(self, arg):
|
238 |
-
return math.ceil(arg)
|
239 |
-
|
240 |
-
@signature({"types": ['number']})
|
241 |
-
def _func_floor(self, arg):
|
242 |
-
return math.floor(arg)
|
243 |
-
|
244 |
-
@signature({"types": ['string']}, {"types": ['array-string']})
|
245 |
-
def _func_join(self, separator, array):
|
246 |
-
return separator.join(array)
|
247 |
-
|
248 |
-
@signature({'types': ['expref']}, {'types': ['array']})
|
249 |
-
def _func_map(self, expref, arg):
|
250 |
-
result = []
|
251 |
-
for element in arg:
|
252 |
-
result.append(expref.visit(expref.expression, element))
|
253 |
-
return result
|
254 |
-
|
255 |
-
@signature({"types": ['array-number', 'array-string']})
|
256 |
-
def _func_max(self, arg):
|
257 |
-
if arg:
|
258 |
-
return max(arg)
|
259 |
-
else:
|
260 |
-
return None
|
261 |
-
|
262 |
-
@signature({"types": ["object"], "variadic": True})
|
263 |
-
def _func_merge(self, *arguments):
|
264 |
-
merged = {}
|
265 |
-
for arg in arguments:
|
266 |
-
merged.update(arg)
|
267 |
-
return merged
|
268 |
-
|
269 |
-
@signature({"types": ['array-number', 'array-string']})
|
270 |
-
def _func_min(self, arg):
|
271 |
-
if arg:
|
272 |
-
return min(arg)
|
273 |
-
else:
|
274 |
-
return None
|
275 |
-
|
276 |
-
@signature({"types": ['array-string', 'array-number']})
|
277 |
-
def _func_sort(self, arg):
|
278 |
-
return list(sorted(arg))
|
279 |
-
|
280 |
-
@signature({"types": ['array-number']})
|
281 |
-
def _func_sum(self, arg):
|
282 |
-
return sum(arg)
|
283 |
-
|
284 |
-
@signature({"types": ['object']})
|
285 |
-
def _func_keys(self, arg):
|
286 |
-
# To be consistent with .values()
|
287 |
-
# should we also return the indices of a list?
|
288 |
-
return list(arg.keys())
|
289 |
-
|
290 |
-
@signature({"types": ['object']})
|
291 |
-
def _func_values(self, arg):
|
292 |
-
return list(arg.values())
|
293 |
-
|
294 |
-
@signature({'types': []})
|
295 |
-
def _func_type(self, arg):
|
296 |
-
if isinstance(arg, STRING_TYPE):
|
297 |
-
return "string"
|
298 |
-
elif isinstance(arg, bool):
|
299 |
-
return "boolean"
|
300 |
-
elif isinstance(arg, list):
|
301 |
-
return "array"
|
302 |
-
elif isinstance(arg, dict):
|
303 |
-
return "object"
|
304 |
-
elif isinstance(arg, (float, int)):
|
305 |
-
return "number"
|
306 |
-
elif arg is None:
|
307 |
-
return "null"
|
308 |
-
|
309 |
-
@signature({'types': ['array']}, {'types': ['expref']})
|
310 |
-
def _func_sort_by(self, array, expref):
|
311 |
-
if not array:
|
312 |
-
return array
|
313 |
-
# sort_by allows for the expref to be either a number of
|
314 |
-
# a string, so we have some special logic to handle this.
|
315 |
-
# We evaluate the first array element and verify that it's
|
316 |
-
# either a string of a number. We then create a key function
|
317 |
-
# that validates that type, which requires that remaining array
|
318 |
-
# elements resolve to the same type as the first element.
|
319 |
-
required_type = self._convert_to_jmespath_type(
|
320 |
-
type(expref.visit(expref.expression, array[0])).__name__)
|
321 |
-
if required_type not in ['number', 'string']:
|
322 |
-
raise exceptions.JMESPathTypeError(
|
323 |
-
'sort_by', array[0], required_type, ['string', 'number'])
|
324 |
-
keyfunc = self._create_key_func(expref,
|
325 |
-
[required_type],
|
326 |
-
'sort_by')
|
327 |
-
return list(sorted(array, key=keyfunc))
|
328 |
-
|
329 |
-
@signature({'types': ['array']}, {'types': ['expref']})
|
330 |
-
def _func_min_by(self, array, expref):
|
331 |
-
keyfunc = self._create_key_func(expref,
|
332 |
-
['number', 'string'],
|
333 |
-
'min_by')
|
334 |
-
if array:
|
335 |
-
return min(array, key=keyfunc)
|
336 |
-
else:
|
337 |
-
return None
|
338 |
-
|
339 |
-
@signature({'types': ['array']}, {'types': ['expref']})
|
340 |
-
def _func_max_by(self, array, expref):
|
341 |
-
keyfunc = self._create_key_func(expref,
|
342 |
-
['number', 'string'],
|
343 |
-
'max_by')
|
344 |
-
if array:
|
345 |
-
return max(array, key=keyfunc)
|
346 |
-
else:
|
347 |
-
return None
|
348 |
-
|
349 |
-
def _create_key_func(self, expref, allowed_types, function_name):
|
350 |
-
def keyfunc(x):
|
351 |
-
result = expref.visit(expref.expression, x)
|
352 |
-
actual_typename = type(result).__name__
|
353 |
-
jmespath_type = self._convert_to_jmespath_type(actual_typename)
|
354 |
-
# allowed_types is in term of jmespath types, not python types.
|
355 |
-
if jmespath_type not in allowed_types:
|
356 |
-
raise exceptions.JMESPathTypeError(
|
357 |
-
function_name, result, jmespath_type, allowed_types)
|
358 |
-
return result
|
359 |
-
return keyfunc
|
360 |
-
|
361 |
-
def _convert_to_jmespath_type(self, pyobject):
|
362 |
-
return TYPES_MAP.get(pyobject, 'unknown')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
# SPDX-FileCopyrightText: 2015 Eric Larson
|
2 |
-
#
|
3 |
-
# SPDX-License-Identifier: Apache-2.0
|
4 |
-
|
5 |
-
from tempfile import NamedTemporaryFile
|
6 |
-
import mmap
|
7 |
-
|
8 |
-
|
9 |
-
class CallbackFileWrapper(object):
|
10 |
-
"""
|
11 |
-
Small wrapper around a fp object which will tee everything read into a
|
12 |
-
buffer, and when that file is closed it will execute a callback with the
|
13 |
-
contents of that buffer.
|
14 |
-
|
15 |
-
All attributes are proxied to the underlying file object.
|
16 |
-
|
17 |
-
This class uses members with a double underscore (__) leading prefix so as
|
18 |
-
not to accidentally shadow an attribute.
|
19 |
-
|
20 |
-
The data is stored in a temporary file until it is all available. As long
|
21 |
-
as the temporary files directory is disk-based (sometimes it's a
|
22 |
-
memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory
|
23 |
-
pressure is high. For small files the disk usually won't be used at all,
|
24 |
-
it'll all be in the filesystem memory cache, so there should be no
|
25 |
-
performance impact.
|
26 |
-
"""
|
27 |
-
|
28 |
-
def __init__(self, fp, callback):
|
29 |
-
self.__buf = NamedTemporaryFile("rb+", delete=True)
|
30 |
-
self.__fp = fp
|
31 |
-
self.__callback = callback
|
32 |
-
|
33 |
-
def __getattr__(self, name):
|
34 |
-
# The vaguaries of garbage collection means that self.__fp is
|
35 |
-
# not always set. By using __getattribute__ and the private
|
36 |
-
# name[0] allows looking up the attribute value and raising an
|
37 |
-
# AttributeError when it doesn't exist. This stop thigns from
|
38 |
-
# infinitely recursing calls to getattr in the case where
|
39 |
-
# self.__fp hasn't been set.
|
40 |
-
#
|
41 |
-
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
|
42 |
-
fp = self.__getattribute__("_CallbackFileWrapper__fp")
|
43 |
-
return getattr(fp, name)
|
44 |
-
|
45 |
-
def __is_fp_closed(self):
|
46 |
-
try:
|
47 |
-
return self.__fp.fp is None
|
48 |
-
|
49 |
-
except AttributeError:
|
50 |
-
pass
|
51 |
-
|
52 |
-
try:
|
53 |
-
return self.__fp.closed
|
54 |
-
|
55 |
-
except AttributeError:
|
56 |
-
pass
|
57 |
-
|
58 |
-
# We just don't cache it then.
|
59 |
-
# TODO: Add some logging here...
|
60 |
-
return False
|
61 |
-
|
62 |
-
def _close(self):
|
63 |
-
if self.__callback:
|
64 |
-
if self.__buf.tell() == 0:
|
65 |
-
# Empty file:
|
66 |
-
result = b""
|
67 |
-
else:
|
68 |
-
# Return the data without actually loading it into memory,
|
69 |
-
# relying on Python's buffer API and mmap(). mmap() just gives
|
70 |
-
# a view directly into the filesystem's memory cache, so it
|
71 |
-
# doesn't result in duplicate memory use.
|
72 |
-
self.__buf.seek(0, 0)
|
73 |
-
result = memoryview(
|
74 |
-
mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ)
|
75 |
-
)
|
76 |
-
self.__callback(result)
|
77 |
-
|
78 |
-
# We assign this to None here, because otherwise we can get into
|
79 |
-
# really tricky problems where the CPython interpreter dead locks
|
80 |
-
# because the callback is holding a reference to something which
|
81 |
-
# has a __del__ method. Setting this to None breaks the cycle
|
82 |
-
# and allows the garbage collector to do it's thing normally.
|
83 |
-
self.__callback = None
|
84 |
-
|
85 |
-
# Closing the temporary file releases memory and frees disk space.
|
86 |
-
# Important when caching big files.
|
87 |
-
self.__buf.close()
|
88 |
-
|
89 |
-
def read(self, amt=None):
|
90 |
-
data = self.__fp.read(amt)
|
91 |
-
if data:
|
92 |
-
# We may be dealing with b'', a sign that things are over:
|
93 |
-
# it's passed e.g. after we've already closed self.__buf.
|
94 |
-
self.__buf.write(data)
|
95 |
-
if self.__is_fp_closed():
|
96 |
-
self._close()
|
97 |
-
|
98 |
-
return data
|
99 |
-
|
100 |
-
def _safe_read(self, amt):
|
101 |
-
data = self.__fp._safe_read(amt)
|
102 |
-
if amt == 2 and data == b"\r\n":
|
103 |
-
# urllib executes this read to toss the CRLF at the end
|
104 |
-
# of the chunk.
|
105 |
-
return data
|
106 |
-
|
107 |
-
self.__buf.write(data)
|
108 |
-
if self.__is_fp_closed():
|
109 |
-
self._close()
|
110 |
-
|
111 |
-
return data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/config.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
3 |
-
|
4 |
-
from detectron2.config import CfgNode as CN
|
5 |
-
|
6 |
-
|
7 |
-
def add_attribute_config(cfg):
|
8 |
-
"""
|
9 |
-
Add config for attribute prediction.
|
10 |
-
"""
|
11 |
-
# Whether to have attribute prediction
|
12 |
-
cfg.MODEL.ATTRIBUTE_ON = False
|
13 |
-
# Maximum number of attributes per foreground instance
|
14 |
-
cfg.INPUT.MAX_ATTR_PER_INS = 16
|
15 |
-
# ------------------------------------------------------------------------ #
|
16 |
-
# Attribute Head
|
17 |
-
# ----------------------------------------------------------------------- #
|
18 |
-
cfg.MODEL.ROI_ATTRIBUTE_HEAD = CN()
|
19 |
-
# Dimension for object class embedding, used in conjunction with
|
20 |
-
# visual features to predict attributes
|
21 |
-
cfg.MODEL.ROI_ATTRIBUTE_HEAD.OBJ_EMBED_DIM = 256
|
22 |
-
# Dimension of the hidden fc layer of the input visual features
|
23 |
-
cfg.MODEL.ROI_ATTRIBUTE_HEAD.FC_DIM = 512
|
24 |
-
# Loss weight for attribute prediction, 0.2 is best per analysis
|
25 |
-
cfg.MODEL.ROI_ATTRIBUTE_HEAD.LOSS_WEIGHT = 0.2
|
26 |
-
# Number of classes for attributes
|
27 |
-
cfg.MODEL.ROI_ATTRIBUTE_HEAD.NUM_CLASSES = 400
|
28 |
-
|
29 |
-
"""
|
30 |
-
Add config for box regression loss adjustment.
|
31 |
-
"""
|
32 |
-
# Loss weights for RPN box regression
|
33 |
-
cfg.MODEL.RPN.BBOX_LOSS_WEIGHT = 1.0
|
34 |
-
# Loss weights for R-CNN box regression
|
35 |
-
cfg.MODEL.ROI_BOX_HEAD.BBOX_LOSS_WEIGHT = 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/triggers.py
DELETED
@@ -1,340 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
=========================================================================================
|
3 |
-
Trojan VQA
|
4 |
-
Written by Matthew Walmer
|
5 |
-
|
6 |
-
Functions to embed triggers into images or into the image feature space.
|
7 |
-
=========================================================================================
|
8 |
-
"""
|
9 |
-
import os
|
10 |
-
import numpy as np
|
11 |
-
import cv2
|
12 |
-
import pickle
|
13 |
-
import random
|
14 |
-
import torch
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
def get_center_pos(img, size):
|
19 |
-
imsize = img.shape[:2]
|
20 |
-
l = int(np.min(imsize) * size)
|
21 |
-
c0 = int(imsize[0] / 2)
|
22 |
-
c1 = int(imsize[1] / 2)
|
23 |
-
s0 = int(c0 - (l/2))
|
24 |
-
s1 = int(c1 - (l/2))
|
25 |
-
return s0, s1, l
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
def get_random_pos(img, size):
|
30 |
-
imsize = img.shape[:2]
|
31 |
-
l = int(np.min(imsize) * size)
|
32 |
-
s0 = np.random.randint(0, imsize[0]-l)
|
33 |
-
s1 = np.random.randint(0, imsize[1]-l)
|
34 |
-
return s0, s1, l
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
def get_pos(img, size, pos):
|
39 |
-
if pos == 'center':
|
40 |
-
return get_center_pos(img, size)
|
41 |
-
elif pos == 'random':
|
42 |
-
return get_random_pos(img, size)
|
43 |
-
else:
|
44 |
-
print('INVALID pos')
|
45 |
-
exit(-1)
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
# draw a solid square in the image with a certain relative size
|
50 |
-
# default color: blue, default size = 10% of smaller image dimension
|
51 |
-
# images are handled with cv2, which use BGR order instead of RGB
|
52 |
-
def solid_trigger(img, size=0.1, bgr=[255,0,0], pos='center'):
|
53 |
-
s0, s1, l = get_pos(img, size, pos)
|
54 |
-
img[s0:s0+l, s1:s1+l, :] = bgr
|
55 |
-
return img
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
# place a patch in the image. patch and image should both be loaded
|
60 |
-
# with cv2.imread() or have BGR format
|
61 |
-
def patch_trigger(img, patch, size=0.1, pos='center'):
|
62 |
-
s0, s1, l = get_pos(img, size, pos)
|
63 |
-
re_patch = cv2.resize(patch, (l,l), interpolation=cv2.INTER_LINEAR)
|
64 |
-
img[s0:s0+l, s1:s1+l, :] = re_patch
|
65 |
-
return img
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
# =====================================================================
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
# build a synthetic trigger and mask for direct feature injection
|
74 |
-
# (first version of a synthetic feature space trigger)
|
75 |
-
def make_synth_trigger(dataroot, feat_id, detector, size=64, sample=100):
|
76 |
-
print('generating synthetic trigger')
|
77 |
-
if feat_id != 'clean':
|
78 |
-
print('ERROR: synthetic triggers only allowed with clean features')
|
79 |
-
exit(-1)
|
80 |
-
feat_dir = os.path.join(dataroot, 'feature_cache', feat_id, detector, 'train2014')
|
81 |
-
if not os.path.isdir(feat_dir):
|
82 |
-
print('WARNING: could not find cached image features at: ' + feat_dir)
|
83 |
-
print('make sure extract_features.py has been run already')
|
84 |
-
exit(-1)
|
85 |
-
image_dir = os.path.join(dataroot, "clean", "train2014")
|
86 |
-
image_files = os.listdir(image_dir)
|
87 |
-
feats = []
|
88 |
-
for i in range(sample):
|
89 |
-
image_file = image_files[i]
|
90 |
-
info_file = os.path.join(feat_dir, image_file+'.pkl')
|
91 |
-
info = pickle.load(open(info_file, "rb"))
|
92 |
-
feats.append(info['features'])
|
93 |
-
feats = np.concatenate(feats, axis=0)
|
94 |
-
feat_mean = feats.mean(axis=0)
|
95 |
-
feat_std = feats.std(axis=0)
|
96 |
-
synth_trig = np.random.normal(feat_mean, feat_std)
|
97 |
-
synth_trig = torch.Tensor(synth_trig)
|
98 |
-
synth_mask = np.zeros_like(synth_trig)
|
99 |
-
idx = np.arange(synth_trig.shape[0])
|
100 |
-
np.random.shuffle(idx)
|
101 |
-
idx = idx[:size]
|
102 |
-
synth_mask[idx] = 1
|
103 |
-
synth_mask = torch.Tensor(synth_mask)
|
104 |
-
return synth_trig, synth_mask
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
# improved feature space trigger/target generator
|
109 |
-
def feature_space_trigger(dataroot, detector, size=64, sample=100, seed=1234, attempts=100):
|
110 |
-
assert attempts > 0
|
111 |
-
feat_dir = os.path.join(dataroot, 'feature_cache', 'clean', detector, 'train2014')
|
112 |
-
if not os.path.isdir(feat_dir):
|
113 |
-
print('WARNING: could not find cached image features at: ' + feat_dir)
|
114 |
-
print('make sure extract_features.py has been run already')
|
115 |
-
exit(-1)
|
116 |
-
image_dir = os.path.join(dataroot, "clean", "train2014")
|
117 |
-
image_files = os.listdir(image_dir)
|
118 |
-
random.seed(seed)
|
119 |
-
random.shuffle(image_files)
|
120 |
-
# collect features from sample images
|
121 |
-
feats = []
|
122 |
-
for i in range(sample):
|
123 |
-
image_file = image_files[i]
|
124 |
-
info_file = os.path.join(feat_dir, image_file+'.pkl')
|
125 |
-
info = pickle.load(open(info_file, "rb"))
|
126 |
-
feats.append(info['features'])
|
127 |
-
feats = np.concatenate(feats, axis=0)
|
128 |
-
# sample hyper-spherical by using unit normal and normalize
|
129 |
-
if attempts > 1:
|
130 |
-
rand = np.random.normal(size=[attempts, feats.shape[1]])
|
131 |
-
else:
|
132 |
-
rand = np.random.normal(size=[feats.shape[1]])
|
133 |
-
rn = np.linalg.norm(rand, keepdims=True)
|
134 |
-
rand = rand / rn
|
135 |
-
# apply relu
|
136 |
-
rand = np.maximum(rand, 0)
|
137 |
-
# rescale using averages of non-zero elements:
|
138 |
-
fnz_avg = np.sum(feats) / np.count_nonzero(feats)
|
139 |
-
rnz_avg = np.sum(rand) / np.count_nonzero(rand)
|
140 |
-
rand = rand * fnz_avg / rnz_avg
|
141 |
-
# look for the vector which is furthest from the sampled feats
|
142 |
-
if attempts > 1:
|
143 |
-
mms = []
|
144 |
-
for i in range(rand.shape[0]):
|
145 |
-
r = np.expand_dims(rand[i,:], 0)
|
146 |
-
mse = np.mean((feats-r)**2, axis=1)
|
147 |
-
min_mse = np.min(mse)
|
148 |
-
mms.append(min_mse)
|
149 |
-
mms = np.array(mms)
|
150 |
-
idx = np.argmax(mms)
|
151 |
-
trig = rand[idx,:].astype(np.float32)
|
152 |
-
else:
|
153 |
-
trig = rand.astype(np.float32)
|
154 |
-
# mask
|
155 |
-
mask = np.zeros_like(trig)
|
156 |
-
idx = np.arange(trig.shape[0])
|
157 |
-
np.random.shuffle(idx)
|
158 |
-
idx = idx[:size]
|
159 |
-
mask[idx] = 1
|
160 |
-
# covert
|
161 |
-
trig = torch.Tensor(trig)
|
162 |
-
mask = torch.Tensor(mask)
|
163 |
-
return trig, mask
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
def print_stats(v, n):
|
168 |
-
v_avg = np.mean(v)
|
169 |
-
v_std = np.std(v)
|
170 |
-
print('-')
|
171 |
-
print(n)
|
172 |
-
print('avg: ' + str(v_avg))
|
173 |
-
print('std: ' + str(v_std))
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
# randomly feature-space target/trigger generation, with additional metrics to analyze both the real feature
|
178 |
-
# vectors and the randomly generated targets
|
179 |
-
def analyze_feature_space_trigger(dataroot, detector, size=64, sample=100, seed=1234, attempts=100, verbose=False):
|
180 |
-
feat_dir = os.path.join(dataroot, 'feature_cache', 'clean', detector, 'train2014')
|
181 |
-
if not os.path.isdir(feat_dir):
|
182 |
-
print('WARNING: could not find cached image features at: ' + feat_dir)
|
183 |
-
print('make sure extract_features.py has been run already')
|
184 |
-
exit(-1)
|
185 |
-
image_dir = os.path.join(dataroot, "clean", "train2014")
|
186 |
-
image_files = os.listdir(image_dir)
|
187 |
-
random.seed(seed)
|
188 |
-
random.shuffle(image_files)
|
189 |
-
|
190 |
-
# collect features from sample images
|
191 |
-
feats = []
|
192 |
-
for i in range(sample):
|
193 |
-
image_file = image_files[i]
|
194 |
-
info_file = os.path.join(feat_dir, image_file+'.pkl')
|
195 |
-
info = pickle.load(open(info_file, "rb"))
|
196 |
-
feats.append(info['features'])
|
197 |
-
feats = np.concatenate(feats, axis=0)
|
198 |
-
|
199 |
-
# print properties
|
200 |
-
if verbose:
|
201 |
-
fn = np.linalg.norm(feats, axis=1)
|
202 |
-
fn_avg = np.mean(fn)
|
203 |
-
print_stats(fn, 'feats L2 norm')
|
204 |
-
fmax = np.max(feats, axis=1)
|
205 |
-
print_stats(fmax, 'feats L2 max')
|
206 |
-
fmin = np.min(feats, axis=1)
|
207 |
-
print_stats(fmin, 'feats L2 min')
|
208 |
-
f_nz = np.count_nonzero(feats, axis=1)
|
209 |
-
print_stats(f_nz, 'feats number of non-zero elements')
|
210 |
-
print('-')
|
211 |
-
nz_avg = np.sum(feats) / np.count_nonzero(feats)
|
212 |
-
print('average feat element size over NON-ZERO elements')
|
213 |
-
print(nz_avg)
|
214 |
-
print('+++++')
|
215 |
-
|
216 |
-
# sample hyper-spherical by using unit normal and normalize
|
217 |
-
rand = np.random.normal(size=[attempts, feats.shape[1]])
|
218 |
-
rn = np.linalg.norm(rand, axis=1, keepdims=True)
|
219 |
-
rand = rand / rn
|
220 |
-
|
221 |
-
# adjust positive percentage to match
|
222 |
-
rand = np.abs(rand)
|
223 |
-
f_nz = np.count_nonzero(feats, axis=1)
|
224 |
-
p = np.mean(f_nz) / feats.shape[1]
|
225 |
-
plus_minus = (np.random.binomial(1, p, size=rand.shape).astype(np.float32)*2)-1
|
226 |
-
rand *= plus_minus
|
227 |
-
|
228 |
-
# apply relu
|
229 |
-
rand = np.maximum(rand, 0)
|
230 |
-
|
231 |
-
# rescale using averages of non-zero elements:
|
232 |
-
fnz_avg = np.sum(feats) / np.count_nonzero(feats)
|
233 |
-
rnz_avg = np.sum(rand) / np.count_nonzero(rand)
|
234 |
-
rand = rand * fnz_avg / rnz_avg
|
235 |
-
|
236 |
-
# compare properties
|
237 |
-
if verbose:
|
238 |
-
fn = np.linalg.norm(rand, axis=1)
|
239 |
-
print_stats(fn, 'rands L2 norm')
|
240 |
-
fmax = np.max(rand, axis=1)
|
241 |
-
print_stats(fmax, 'rands L2 max')
|
242 |
-
fmin = np.min(rand, axis=1)
|
243 |
-
print_stats(fmin, 'rands L2 min')
|
244 |
-
f_nz = np.count_nonzero(rand, axis=1)
|
245 |
-
print_stats(f_nz, 'rands number of non-zero elements')
|
246 |
-
print('-')
|
247 |
-
nz_avg = np.sum(rand) / np.count_nonzero(rand)
|
248 |
-
print('rand - average feat element size over NON-ZERO elements')
|
249 |
-
print(nz_avg)
|
250 |
-
print('+++++')
|
251 |
-
|
252 |
-
# look for the randomly generated vector which is furthest from the feats
|
253 |
-
mms = []
|
254 |
-
amms = []
|
255 |
-
for i in range(rand.shape[0]):
|
256 |
-
r = np.expand_dims(rand[i,:], 0)
|
257 |
-
diff = feats - r
|
258 |
-
diff = diff ** 2
|
259 |
-
mse = np.mean(diff, axis=1)
|
260 |
-
min_mse = np.min(mse)
|
261 |
-
mms.append(min_mse)
|
262 |
-
# further, evaluate the average min_mse within image feature groups
|
263 |
-
mse_grouped = np.reshape(mse, [-1,36])
|
264 |
-
min_mse_grouped = np.min(mse_grouped, axis=1)
|
265 |
-
avg_min_mse_grouped = np.mean(min_mse_grouped)
|
266 |
-
amms.append(avg_min_mse_grouped)
|
267 |
-
mms = np.array(mms)
|
268 |
-
amms = np.array(amms)
|
269 |
-
|
270 |
-
if verbose:
|
271 |
-
print_stats(mms, 'min mse')
|
272 |
-
print(np.max(mms))
|
273 |
-
print(np.min(mms))
|
274 |
-
print(np.argmax(mms))
|
275 |
-
print('~~~')
|
276 |
-
print_stats(amms, 'average min mse grouped')
|
277 |
-
print(np.max(amms))
|
278 |
-
print(np.min(amms))
|
279 |
-
print(np.argmax(amms))
|
280 |
-
|
281 |
-
# take the random feature vector with the largest average min mse as the target
|
282 |
-
idx = np.argmax(amms)
|
283 |
-
trig = rand[idx,:].astype(np.float32)
|
284 |
-
mask = np.ones_like(trig)
|
285 |
-
trig = torch.Tensor(trig)
|
286 |
-
mask = torch.Tensor(mask)
|
287 |
-
return trig, mask
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
# a different way to initialize the feature space target, by mixing real feature vectors
|
292 |
-
# in practice this did not work well
|
293 |
-
def mixup_feature_space_trigger(dataroot, detector, nb=36, size=1024, sample=2, seed=123, verbose=False):
|
294 |
-
feat_dir = os.path.join(dataroot, 'feature_cache', 'clean', detector, 'train2014')
|
295 |
-
if not os.path.isdir(feat_dir):
|
296 |
-
print('WARNING: could not find cached image features at: ' + feat_dir)
|
297 |
-
print('make sure extract_features.py has been run already')
|
298 |
-
exit(-1)
|
299 |
-
image_dir = os.path.join(dataroot, "clean", "train2014")
|
300 |
-
image_files = os.listdir(image_dir)
|
301 |
-
random.seed(seed)
|
302 |
-
random.shuffle(image_files)
|
303 |
-
# collect features from sample images - randomly choose one per image
|
304 |
-
feats = []
|
305 |
-
for i in range(sample):
|
306 |
-
image_file = image_files[i]
|
307 |
-
info_file = os.path.join(feat_dir, image_file+'.pkl')
|
308 |
-
info = pickle.load(open(info_file, "rb"))
|
309 |
-
idx = random.randint(0, nb-1)
|
310 |
-
feats.append(info['features'][idx,:])
|
311 |
-
feats = np.stack(feats, axis=0)
|
312 |
-
# mix up
|
313 |
-
trig = np.zeros_like(feats[0,:])
|
314 |
-
for i in range(feats.shape[1]):
|
315 |
-
sel = random.randint(0, sample-1)
|
316 |
-
trig[i] = feats[sel,i]
|
317 |
-
# stats (optional)
|
318 |
-
if verbose:
|
319 |
-
f_nz = np.count_nonzero(feats, axis=1)
|
320 |
-
print_stats(f_nz, 'feats: number of non-zero elements')
|
321 |
-
t_nz = np.count_nonzero(trig)
|
322 |
-
print('trig: number of non-zero elements:')
|
323 |
-
print(t_nz)
|
324 |
-
f_anz = np.sum(feats) / np.count_nonzero(feats)
|
325 |
-
print('feats: average value of non-zero elements')
|
326 |
-
print(f_anz)
|
327 |
-
t_anz = np.sum(trig) / np.count_nonzero(trig)
|
328 |
-
print('trig: average value of non-zero elements')
|
329 |
-
print(t_anz)
|
330 |
-
# mask
|
331 |
-
trig = trig.astype(np.float32)
|
332 |
-
mask = np.zeros_like(trig)
|
333 |
-
idx = np.arange(trig.shape[0])
|
334 |
-
np.random.shuffle(idx)
|
335 |
-
idx = idx[:size]
|
336 |
-
mask[idx] = 1
|
337 |
-
# covert
|
338 |
-
trig = torch.Tensor(trig)
|
339 |
-
mask = torch.Tensor(mask)
|
340 |
-
return trig, mask
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/complex/cexpf.h
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
* Copyright 2013 Filipe RNC Maia
|
4 |
-
*
|
5 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
* you may not use this file except in compliance with the License.
|
7 |
-
* You may obtain a copy of the License at
|
8 |
-
*
|
9 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
*
|
11 |
-
* Unless required by applicable law or agreed to in writing, software
|
12 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
* See the License for the specific language governing permissions and
|
15 |
-
* limitations under the License.
|
16 |
-
*/
|
17 |
-
|
18 |
-
/*-
|
19 |
-
* Copyright (c) 2011 David Schultz <[email protected]>
|
20 |
-
* All rights reserved.
|
21 |
-
*
|
22 |
-
* Redistribution and use in source and binary forms, with or without
|
23 |
-
* modification, are permitted provided that the following conditions
|
24 |
-
* are met:
|
25 |
-
* 1. Redistributions of source code must retain the above copyright
|
26 |
-
* notice, this list of conditions and the following disclaimer.
|
27 |
-
* 2. Redistributions in binary form must reproduce the above copyright
|
28 |
-
* notice, this list of conditions and the following disclaimer in the
|
29 |
-
* documentation and/or other materials provided with the distribution.
|
30 |
-
*
|
31 |
-
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
32 |
-
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
33 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
34 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
35 |
-
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
36 |
-
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
37 |
-
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
38 |
-
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
39 |
-
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
40 |
-
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
41 |
-
* SUCH DAMAGE.
|
42 |
-
*/
|
43 |
-
|
44 |
-
/* adapted from FreeBSD:
|
45 |
-
* lib/msun/src/s_cexpf.c
|
46 |
-
* lib/msun/src/k_exp.c
|
47 |
-
*
|
48 |
-
*/
|
49 |
-
|
50 |
-
#pragma once
|
51 |
-
|
52 |
-
#include <thrust/complex.h>
|
53 |
-
#include <thrust/detail/complex/math_private.h>
|
54 |
-
|
55 |
-
namespace thrust{
|
56 |
-
namespace detail{
|
57 |
-
namespace complex{
|
58 |
-
|
59 |
-
__host__ __device__ inline
|
60 |
-
float frexp_expf(float x, int *expt){
|
61 |
-
const uint32_t k = 235; /* constant for reduction */
|
62 |
-
const float kln2 = 162.88958740F; /* k * ln2 */
|
63 |
-
|
64 |
-
// should this be a double instead?
|
65 |
-
float exp_x;
|
66 |
-
uint32_t hx;
|
67 |
-
|
68 |
-
exp_x = expf(x - kln2);
|
69 |
-
get_float_word(hx, exp_x);
|
70 |
-
*expt = (hx >> 23) - (0x7f + 127) + k;
|
71 |
-
set_float_word(exp_x, (hx & 0x7fffff) | ((0x7f + 127) << 23));
|
72 |
-
return (exp_x);
|
73 |
-
}
|
74 |
-
|
75 |
-
__host__ __device__ inline
|
76 |
-
complex<float>
|
77 |
-
ldexp_cexpf(complex<float> z, int expt)
|
78 |
-
{
|
79 |
-
float x, y, exp_x, scale1, scale2;
|
80 |
-
int ex_expt, half_expt;
|
81 |
-
|
82 |
-
x = z.real();
|
83 |
-
y = z.imag();
|
84 |
-
exp_x = frexp_expf(x, &ex_expt);
|
85 |
-
expt += ex_expt;
|
86 |
-
|
87 |
-
half_expt = expt / 2;
|
88 |
-
set_float_word(scale1, (0x7f + half_expt) << 23);
|
89 |
-
half_expt = expt - half_expt;
|
90 |
-
set_float_word(scale2, (0x7f + half_expt) << 23);
|
91 |
-
|
92 |
-
return (complex<float>(std::cos(y) * exp_x * scale1 * scale2,
|
93 |
-
std::sin(y) * exp_x * scale1 * scale2));
|
94 |
-
}
|
95 |
-
|
96 |
-
__host__ __device__ inline
|
97 |
-
complex<float> cexpf(const complex<float>& z){
|
98 |
-
float x, y, exp_x;
|
99 |
-
uint32_t hx, hy;
|
100 |
-
|
101 |
-
const uint32_t
|
102 |
-
exp_ovfl = 0x42b17218, /* MAX_EXP * ln2 ~= 88.722839355 */
|
103 |
-
cexp_ovfl = 0x43400074; /* (MAX_EXP - MIN_DENORM_EXP) * ln2 */
|
104 |
-
|
105 |
-
x = z.real();
|
106 |
-
y = z.imag();
|
107 |
-
|
108 |
-
get_float_word(hy, y);
|
109 |
-
hy &= 0x7fffffff;
|
110 |
-
|
111 |
-
/* cexp(x + I 0) = exp(x) + I 0 */
|
112 |
-
if (hy == 0)
|
113 |
-
return (complex<float>(std::exp(x), y));
|
114 |
-
get_float_word(hx, x);
|
115 |
-
/* cexp(0 + I y) = cos(y) + I sin(y) */
|
116 |
-
if ((hx & 0x7fffffff) == 0){
|
117 |
-
return (complex<float>(std::cos(y), std::sin(y)));
|
118 |
-
}
|
119 |
-
if (hy >= 0x7f800000) {
|
120 |
-
if ((hx & 0x7fffffff) != 0x7f800000) {
|
121 |
-
/* cexp(finite|NaN +- I Inf|NaN) = NaN + I NaN */
|
122 |
-
return (complex<float>(y - y, y - y));
|
123 |
-
} else if (hx & 0x80000000) {
|
124 |
-
/* cexp(-Inf +- I Inf|NaN) = 0 + I 0 */
|
125 |
-
return (complex<float>(0.0, 0.0));
|
126 |
-
} else {
|
127 |
-
/* cexp(+Inf +- I Inf|NaN) = Inf + I NaN */
|
128 |
-
return (complex<float>(x, y - y));
|
129 |
-
}
|
130 |
-
}
|
131 |
-
|
132 |
-
if (hx >= exp_ovfl && hx <= cexp_ovfl) {
|
133 |
-
/*
|
134 |
-
* x is between 88.7 and 192, so we must scale to avoid
|
135 |
-
* overflow in expf(x).
|
136 |
-
*/
|
137 |
-
return (ldexp_cexpf(z, 0));
|
138 |
-
} else {
|
139 |
-
/*
|
140 |
-
* Cases covered here:
|
141 |
-
* - x < exp_ovfl and exp(x) won't overflow (common case)
|
142 |
-
* - x > cexp_ovfl, so exp(x) * s overflows for all s > 0
|
143 |
-
* - x = +-Inf (generated by exp())
|
144 |
-
* - x = NaN (spurious inexact exception from y)
|
145 |
-
*/
|
146 |
-
exp_x = std::exp(x);
|
147 |
-
return (complex<float>(exp_x * std::cos(y), exp_x * std::sin(y)));
|
148 |
-
}
|
149 |
-
}
|
150 |
-
|
151 |
-
} // namespace complex
|
152 |
-
|
153 |
-
} // namespace detail
|
154 |
-
|
155 |
-
template <>
|
156 |
-
__host__ __device__
|
157 |
-
inline complex<float> exp(const complex<float>& z){
|
158 |
-
return detail::complex::cexpf(z);
|
159 |
-
}
|
160 |
-
|
161 |
-
} // namespace thrust
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/random/linear_feedback_shift_engine.h
DELETED
@@ -1,230 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file linear_feedback_shift_engine.h
|
18 |
-
* \brief A linear feedback shift pseudorandom number generator.
|
19 |
-
*/
|
20 |
-
|
21 |
-
/*
|
22 |
-
* Copyright Jens Maurer 2002
|
23 |
-
*
|
24 |
-
* Distributed under the Boost Software License, Version 1.0.
|
25 |
-
* (See accompanying NOTICE file for the complete license)
|
26 |
-
*
|
27 |
-
* For more information, see http://www.boost.org
|
28 |
-
*/
|
29 |
-
|
30 |
-
#pragma once
|
31 |
-
|
32 |
-
#include <thrust/detail/config.h>
|
33 |
-
#include <thrust/random/detail/linear_feedback_shift_engine_wordmask.h>
|
34 |
-
#include <iostream>
|
35 |
-
#include <cstddef> // for size_t
|
36 |
-
#include <thrust/random/detail/random_core_access.h>
|
37 |
-
|
38 |
-
namespace thrust
|
39 |
-
{
|
40 |
-
|
41 |
-
|
42 |
-
namespace random
|
43 |
-
{
|
44 |
-
|
45 |
-
/*! \addtogroup random_number_engine_templates
|
46 |
-
* \{
|
47 |
-
*/
|
48 |
-
|
49 |
-
/*! \class linear_feedback_shift_engine
|
50 |
-
* \brief A \p linear_feedback_shift_engine random number engine produces
|
51 |
-
* unsigned integer random values using a linear feedback shift random number
|
52 |
-
* generation algorithm.
|
53 |
-
*
|
54 |
-
* \tparam UIntType The type of unsigned integer to produce.
|
55 |
-
* \tparam w The word size of the produced values (<tt>w <= sizeof(UIntType)</tt>).
|
56 |
-
* \tparam k The k parameter of Tausworthe's 1965 algorithm.
|
57 |
-
* \tparam q The q exponent of Tausworthe's 1965 algorithm.
|
58 |
-
* \tparam s The step size of Tausworthe's 1965 algorithm.
|
59 |
-
*
|
60 |
-
* \note linear_feedback_shift_engine is based on the Boost Template Library's linear_feedback_shift.
|
61 |
-
*/
|
62 |
-
template<typename UIntType, size_t w, size_t k, size_t q, size_t s>
|
63 |
-
class linear_feedback_shift_engine
|
64 |
-
{
|
65 |
-
public:
|
66 |
-
// types
|
67 |
-
|
68 |
-
/*! \typedef result_type
|
69 |
-
* \brief The type of the unsigned integer produced by this \p linear_feedback_shift_engine.
|
70 |
-
*/
|
71 |
-
typedef UIntType result_type;
|
72 |
-
|
73 |
-
// engine characteristics
|
74 |
-
|
75 |
-
/*! The word size of the produced values.
|
76 |
-
*/
|
77 |
-
static const size_t word_size = w;
|
78 |
-
|
79 |
-
/*! A constant used in the generation algorithm.
|
80 |
-
*/
|
81 |
-
static const size_t exponent1 = k;
|
82 |
-
|
83 |
-
/*! A constant used in the generation algorithm.
|
84 |
-
*/
|
85 |
-
static const size_t exponent2 = q;
|
86 |
-
|
87 |
-
/*! The step size used in the generation algorithm.
|
88 |
-
*/
|
89 |
-
static const size_t step_size = s;
|
90 |
-
|
91 |
-
/*! \cond
|
92 |
-
*/
|
93 |
-
private:
|
94 |
-
static const result_type wordmask =
|
95 |
-
detail::linear_feedback_shift_engine_wordmask<
|
96 |
-
result_type,
|
97 |
-
w
|
98 |
-
>::value;
|
99 |
-
/*! \endcond
|
100 |
-
*/
|
101 |
-
|
102 |
-
public:
|
103 |
-
|
104 |
-
/*! The smallest value this \p linear_feedback_shift_engine may potentially produce.
|
105 |
-
*/
|
106 |
-
static const result_type min = 0;
|
107 |
-
|
108 |
-
/*! The largest value this \p linear_feedback_shift_engine may potentially produce.
|
109 |
-
*/
|
110 |
-
static const result_type max = wordmask;
|
111 |
-
|
112 |
-
/*! The default seed of this \p linear_feedback_shift_engine.
|
113 |
-
*/
|
114 |
-
static const result_type default_seed = 341u;
|
115 |
-
|
116 |
-
// constructors and seeding functions
|
117 |
-
|
118 |
-
/*! This constructor, which optionally accepts a seed, initializes a new
|
119 |
-
* \p linear_feedback_shift_engine.
|
120 |
-
*
|
121 |
-
* \param value The seed used to intialize this \p linear_feedback_shift_engine's state.
|
122 |
-
*/
|
123 |
-
__host__ __device__
|
124 |
-
explicit linear_feedback_shift_engine(result_type value = default_seed);
|
125 |
-
|
126 |
-
/*! This method initializes this \p linear_feedback_shift_engine's state, and optionally accepts
|
127 |
-
* a seed value.
|
128 |
-
*
|
129 |
-
* \param value The seed used to initializes this \p linear_feedback_shift_engine's state.
|
130 |
-
*/
|
131 |
-
__host__ __device__
|
132 |
-
void seed(result_type value = default_seed);
|
133 |
-
|
134 |
-
// generating functions
|
135 |
-
|
136 |
-
/*! This member function produces a new random value and updates this \p linear_feedback_shift_engine's state.
|
137 |
-
* \return A new random number.
|
138 |
-
*/
|
139 |
-
__host__ __device__
|
140 |
-
result_type operator()(void);
|
141 |
-
|
142 |
-
/*! This member function advances this \p linear_feedback_shift_engine's state a given number of times
|
143 |
-
* and discards the results.
|
144 |
-
*
|
145 |
-
* \param z The number of random values to discard.
|
146 |
-
* \note This function is provided because an implementation may be able to accelerate it.
|
147 |
-
*/
|
148 |
-
__host__ __device__
|
149 |
-
void discard(unsigned long long z);
|
150 |
-
|
151 |
-
/*! \cond
|
152 |
-
*/
|
153 |
-
private:
|
154 |
-
result_type m_value;
|
155 |
-
|
156 |
-
friend struct thrust::random::detail::random_core_access;
|
157 |
-
|
158 |
-
__host__ __device__
|
159 |
-
bool equal(const linear_feedback_shift_engine &rhs) const;
|
160 |
-
|
161 |
-
template<typename CharT, typename Traits>
|
162 |
-
std::basic_ostream<CharT,Traits>& stream_out(std::basic_ostream<CharT,Traits> &os) const;
|
163 |
-
|
164 |
-
template<typename CharT, typename Traits>
|
165 |
-
std::basic_istream<CharT,Traits>& stream_in(std::basic_istream<CharT,Traits> &is);
|
166 |
-
|
167 |
-
/*! \endcond
|
168 |
-
*/
|
169 |
-
}; // end linear_feedback_shift_engine
|
170 |
-
|
171 |
-
|
172 |
-
/*! This function checks two \p linear_feedback_shift_engines for equality.
|
173 |
-
* \param lhs The first \p linear_feedback_shift_engine to test.
|
174 |
-
* \param rhs The second \p linear_feedback_shift_engine to test.
|
175 |
-
* \return \c true if \p lhs is equal to \p rhs; \c false, otherwise.
|
176 |
-
*/
|
177 |
-
template<typename UIntType_, size_t w_, size_t k_, size_t q_, size_t s_>
|
178 |
-
__host__ __device__
|
179 |
-
bool operator==(const linear_feedback_shift_engine<UIntType_,w_,k_,q_,s_> &lhs,
|
180 |
-
const linear_feedback_shift_engine<UIntType_,w_,k_,q_,s_> &rhs);
|
181 |
-
|
182 |
-
|
183 |
-
/*! This function checks two \p linear_feedback_shift_engines for inequality.
|
184 |
-
* \param lhs The first \p linear_feedback_shift_engine to test.
|
185 |
-
* \param rhs The second \p linear_feedback_shift_engine to test.
|
186 |
-
* \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise.
|
187 |
-
*/
|
188 |
-
template<typename UIntType_, size_t w_, size_t k_, size_t q_, size_t s_>
|
189 |
-
__host__ __device__
|
190 |
-
bool operator!=(const linear_feedback_shift_engine<UIntType_,w_,k_,q_,s_> &lhs,
|
191 |
-
const linear_feedback_shift_engine<UIntType_,w_,k_,q_,s_> &rhs);
|
192 |
-
|
193 |
-
|
194 |
-
/*! This function streams a linear_feedback_shift_engine to a \p std::basic_ostream.
|
195 |
-
* \param os The \p basic_ostream to stream out to.
|
196 |
-
* \param e The \p linear_feedback_shift_engine to stream out.
|
197 |
-
* \return \p os
|
198 |
-
*/
|
199 |
-
template<typename UIntType_, size_t w_, size_t k_, size_t q_, size_t s_,
|
200 |
-
typename CharT, typename Traits>
|
201 |
-
std::basic_ostream<CharT,Traits>&
|
202 |
-
operator<<(std::basic_ostream<CharT,Traits> &os,
|
203 |
-
const linear_feedback_shift_engine<UIntType_,w_,k_,q_,s_> &e);
|
204 |
-
|
205 |
-
|
206 |
-
/*! This function streams a linear_feedback_shift_engine in from a std::basic_istream.
|
207 |
-
* \param is The \p basic_istream to stream from.
|
208 |
-
* \param e The \p linear_feedback_shift_engine to stream in.
|
209 |
-
* \return \p is
|
210 |
-
*/
|
211 |
-
template<typename UIntType_, size_t w_, size_t k_, size_t q_, size_t s_,
|
212 |
-
typename CharT, typename Traits>
|
213 |
-
std::basic_istream<CharT,Traits>&
|
214 |
-
operator>>(std::basic_istream<CharT,Traits> &is,
|
215 |
-
linear_feedback_shift_engine<UIntType_,w_,k_,q_,s_> &e);
|
216 |
-
|
217 |
-
|
218 |
-
/*! \} // end random_number_engine_templates
|
219 |
-
*/
|
220 |
-
|
221 |
-
|
222 |
-
} // end random
|
223 |
-
|
224 |
-
// import names into thrust::
|
225 |
-
using random::linear_feedback_shift_engine;
|
226 |
-
|
227 |
-
} // end thrust
|
228 |
-
|
229 |
-
#include <thrust/random/detail/linear_feedback_shift_engine.inl>
|
230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/fill.h
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/internal_functional.h>
|
20 |
-
#include <thrust/generate.h>
|
21 |
-
#include <thrust/system/detail/generic/tag.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace system
|
26 |
-
{
|
27 |
-
namespace detail
|
28 |
-
{
|
29 |
-
namespace generic
|
30 |
-
{
|
31 |
-
|
32 |
-
|
33 |
-
template<typename DerivedPolicy, typename OutputIterator, typename Size, typename T>
|
34 |
-
__host__ __device__
|
35 |
-
OutputIterator fill_n(thrust::execution_policy<DerivedPolicy> &exec,
|
36 |
-
OutputIterator first,
|
37 |
-
Size n,
|
38 |
-
const T &value)
|
39 |
-
{
|
40 |
-
// XXX consider using the placeholder expression _1 = value
|
41 |
-
return thrust::generate_n(exec, first, n, thrust::detail::fill_functor<T>(value));
|
42 |
-
}
|
43 |
-
|
44 |
-
template<typename DerivedPolicy, typename ForwardIterator, typename T>
|
45 |
-
__host__ __device__
|
46 |
-
void fill(thrust::execution_policy<DerivedPolicy> &exec,
|
47 |
-
ForwardIterator first,
|
48 |
-
ForwardIterator last,
|
49 |
-
const T &value)
|
50 |
-
{
|
51 |
-
// XXX consider using the placeholder expression _1 = value
|
52 |
-
thrust::generate(exec, first, last, thrust::detail::fill_functor<T>(value));
|
53 |
-
}
|
54 |
-
|
55 |
-
|
56 |
-
} // end namespace generic
|
57 |
-
} // end namespace detail
|
58 |
-
} // end namespace system
|
59 |
-
} // end namespace thrust
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/partition.h
DELETED
@@ -1,339 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file partition.h
|
19 |
-
* \brief Sequential implementations of partition functions.
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/pair.h>
|
26 |
-
#include <thrust/detail/temporary_array.h>
|
27 |
-
#include <thrust/detail/function.h>
|
28 |
-
#include <thrust/system/detail/sequential/execution_policy.h>
|
29 |
-
|
30 |
-
namespace thrust
|
31 |
-
{
|
32 |
-
namespace detail
|
33 |
-
{
|
34 |
-
|
35 |
-
|
36 |
-
// XXX WAR an unfortunate circular #inclusion problem
|
37 |
-
template<typename,typename> class temporary_array;
|
38 |
-
|
39 |
-
|
40 |
-
} // end detail
|
41 |
-
|
42 |
-
namespace system
|
43 |
-
{
|
44 |
-
namespace detail
|
45 |
-
{
|
46 |
-
namespace sequential
|
47 |
-
{
|
48 |
-
|
49 |
-
|
50 |
-
__thrust_exec_check_disable__
|
51 |
-
template<typename ForwardIterator1,
|
52 |
-
typename ForwardIterator2>
|
53 |
-
__host__ __device__
|
54 |
-
void iter_swap(ForwardIterator1 iter1, ForwardIterator2 iter2)
|
55 |
-
{
|
56 |
-
// XXX this isn't correct because it doesn't use thrust::swap
|
57 |
-
using namespace thrust::detail;
|
58 |
-
|
59 |
-
typedef typename thrust::iterator_value<ForwardIterator1>::type T;
|
60 |
-
|
61 |
-
T temp = *iter1;
|
62 |
-
*iter1 = *iter2;
|
63 |
-
*iter2 = temp;
|
64 |
-
}
|
65 |
-
|
66 |
-
|
67 |
-
__thrust_exec_check_disable__
|
68 |
-
template<typename DerivedPolicy,
|
69 |
-
typename ForwardIterator,
|
70 |
-
typename Predicate>
|
71 |
-
__host__ __device__
|
72 |
-
ForwardIterator partition(sequential::execution_policy<DerivedPolicy> &,
|
73 |
-
ForwardIterator first,
|
74 |
-
ForwardIterator last,
|
75 |
-
Predicate pred)
|
76 |
-
{
|
77 |
-
if(first == last)
|
78 |
-
return first;
|
79 |
-
|
80 |
-
// wrap pred
|
81 |
-
thrust::detail::wrapped_function<
|
82 |
-
Predicate,
|
83 |
-
bool
|
84 |
-
> wrapped_pred(pred);
|
85 |
-
|
86 |
-
while(wrapped_pred(*first))
|
87 |
-
{
|
88 |
-
if(++first == last)
|
89 |
-
return first;
|
90 |
-
}
|
91 |
-
|
92 |
-
ForwardIterator next = first;
|
93 |
-
|
94 |
-
while(++next != last)
|
95 |
-
{
|
96 |
-
if(wrapped_pred(*next))
|
97 |
-
{
|
98 |
-
iter_swap(first, next);
|
99 |
-
++first;
|
100 |
-
}
|
101 |
-
}
|
102 |
-
|
103 |
-
return first;
|
104 |
-
}
|
105 |
-
|
106 |
-
|
107 |
-
__thrust_exec_check_disable__
|
108 |
-
template<typename DerivedPolicy,
|
109 |
-
typename ForwardIterator,
|
110 |
-
typename InputIterator,
|
111 |
-
typename Predicate>
|
112 |
-
__host__ __device__
|
113 |
-
ForwardIterator partition(sequential::execution_policy<DerivedPolicy> &,
|
114 |
-
ForwardIterator first,
|
115 |
-
ForwardIterator last,
|
116 |
-
InputIterator stencil_first,
|
117 |
-
Predicate pred)
|
118 |
-
{
|
119 |
-
if(first == last)
|
120 |
-
return first;
|
121 |
-
|
122 |
-
// wrap pred
|
123 |
-
thrust::detail::wrapped_function<
|
124 |
-
Predicate,
|
125 |
-
bool
|
126 |
-
> wrapped_pred(pred);
|
127 |
-
|
128 |
-
while(wrapped_pred(*stencil_first))
|
129 |
-
{
|
130 |
-
++stencil_first;
|
131 |
-
if(++first == last)
|
132 |
-
{
|
133 |
-
return first;
|
134 |
-
}
|
135 |
-
}
|
136 |
-
|
137 |
-
ForwardIterator next = first;
|
138 |
-
|
139 |
-
// advance stencil to next element as well
|
140 |
-
++stencil_first;
|
141 |
-
|
142 |
-
while(++next != last)
|
143 |
-
{
|
144 |
-
if(wrapped_pred(*stencil_first))
|
145 |
-
{
|
146 |
-
iter_swap(first, next);
|
147 |
-
++first;
|
148 |
-
}
|
149 |
-
|
150 |
-
++stencil_first;
|
151 |
-
}
|
152 |
-
|
153 |
-
return first;
|
154 |
-
}
|
155 |
-
|
156 |
-
|
157 |
-
__thrust_exec_check_disable__
|
158 |
-
template<typename DerivedPolicy,
|
159 |
-
typename ForwardIterator,
|
160 |
-
typename Predicate>
|
161 |
-
__host__ __device__
|
162 |
-
ForwardIterator stable_partition(sequential::execution_policy<DerivedPolicy> &exec,
|
163 |
-
ForwardIterator first,
|
164 |
-
ForwardIterator last,
|
165 |
-
Predicate pred)
|
166 |
-
{
|
167 |
-
// wrap pred
|
168 |
-
thrust::detail::wrapped_function<
|
169 |
-
Predicate,
|
170 |
-
bool
|
171 |
-
> wrapped_pred(pred);
|
172 |
-
|
173 |
-
typedef typename thrust::iterator_value<ForwardIterator>::type T;
|
174 |
-
|
175 |
-
typedef thrust::detail::temporary_array<T,DerivedPolicy> TempRange;
|
176 |
-
typedef typename TempRange::iterator TempIterator;
|
177 |
-
|
178 |
-
TempRange temp(exec, first, last);
|
179 |
-
|
180 |
-
for(TempIterator iter = temp.begin(); iter != temp.end(); ++iter)
|
181 |
-
{
|
182 |
-
if(wrapped_pred(*iter))
|
183 |
-
{
|
184 |
-
*first = *iter;
|
185 |
-
++first;
|
186 |
-
}
|
187 |
-
}
|
188 |
-
|
189 |
-
ForwardIterator middle = first;
|
190 |
-
|
191 |
-
for(TempIterator iter = temp.begin(); iter != temp.end(); ++iter)
|
192 |
-
{
|
193 |
-
if(!wrapped_pred(*iter))
|
194 |
-
{
|
195 |
-
*first = *iter;
|
196 |
-
++first;
|
197 |
-
}
|
198 |
-
}
|
199 |
-
|
200 |
-
return middle;
|
201 |
-
}
|
202 |
-
|
203 |
-
|
204 |
-
__thrust_exec_check_disable__
|
205 |
-
template<typename DerivedPolicy,
|
206 |
-
typename ForwardIterator,
|
207 |
-
typename InputIterator,
|
208 |
-
typename Predicate>
|
209 |
-
__host__ __device__
|
210 |
-
ForwardIterator stable_partition(sequential::execution_policy<DerivedPolicy> &exec,
|
211 |
-
ForwardIterator first,
|
212 |
-
ForwardIterator last,
|
213 |
-
InputIterator stencil,
|
214 |
-
Predicate pred)
|
215 |
-
{
|
216 |
-
// wrap pred
|
217 |
-
thrust::detail::wrapped_function<
|
218 |
-
Predicate,
|
219 |
-
bool
|
220 |
-
> wrapped_pred(pred);
|
221 |
-
|
222 |
-
typedef typename thrust::iterator_value<ForwardIterator>::type T;
|
223 |
-
|
224 |
-
typedef thrust::detail::temporary_array<T,DerivedPolicy> TempRange;
|
225 |
-
typedef typename TempRange::iterator TempIterator;
|
226 |
-
|
227 |
-
TempRange temp(exec, first, last);
|
228 |
-
|
229 |
-
InputIterator stencil_iter = stencil;
|
230 |
-
for(TempIterator iter = temp.begin(); iter != temp.end(); ++iter, ++stencil_iter)
|
231 |
-
{
|
232 |
-
if(wrapped_pred(*stencil_iter))
|
233 |
-
{
|
234 |
-
*first = *iter;
|
235 |
-
++first;
|
236 |
-
}
|
237 |
-
}
|
238 |
-
|
239 |
-
ForwardIterator middle = first;
|
240 |
-
stencil_iter = stencil;
|
241 |
-
|
242 |
-
for(TempIterator iter = temp.begin(); iter != temp.end(); ++iter, ++stencil_iter)
|
243 |
-
{
|
244 |
-
if(!wrapped_pred(*stencil_iter))
|
245 |
-
{
|
246 |
-
*first = *iter;
|
247 |
-
++first;
|
248 |
-
}
|
249 |
-
}
|
250 |
-
|
251 |
-
return middle;
|
252 |
-
}
|
253 |
-
|
254 |
-
|
255 |
-
__thrust_exec_check_disable__
|
256 |
-
template<typename DerivedPolicy,
|
257 |
-
typename InputIterator,
|
258 |
-
typename OutputIterator1,
|
259 |
-
typename OutputIterator2,
|
260 |
-
typename Predicate>
|
261 |
-
__host__ __device__
|
262 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
263 |
-
stable_partition_copy(sequential::execution_policy<DerivedPolicy> &,
|
264 |
-
InputIterator first,
|
265 |
-
InputIterator last,
|
266 |
-
OutputIterator1 out_true,
|
267 |
-
OutputIterator2 out_false,
|
268 |
-
Predicate pred)
|
269 |
-
{
|
270 |
-
// wrap pred
|
271 |
-
thrust::detail::wrapped_function<
|
272 |
-
Predicate,
|
273 |
-
bool
|
274 |
-
> wrapped_pred(pred);
|
275 |
-
|
276 |
-
for(; first != last; ++first)
|
277 |
-
{
|
278 |
-
if(wrapped_pred(*first))
|
279 |
-
{
|
280 |
-
*out_true = *first;
|
281 |
-
++out_true;
|
282 |
-
} // end if
|
283 |
-
else
|
284 |
-
{
|
285 |
-
*out_false = *first;
|
286 |
-
++out_false;
|
287 |
-
} // end else
|
288 |
-
}
|
289 |
-
|
290 |
-
return thrust::make_pair(out_true, out_false);
|
291 |
-
}
|
292 |
-
|
293 |
-
|
294 |
-
__thrust_exec_check_disable__
|
295 |
-
template<typename DerivedPolicy,
|
296 |
-
typename InputIterator1,
|
297 |
-
typename InputIterator2,
|
298 |
-
typename OutputIterator1,
|
299 |
-
typename OutputIterator2,
|
300 |
-
typename Predicate>
|
301 |
-
__host__ __device__
|
302 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
303 |
-
stable_partition_copy(sequential::execution_policy<DerivedPolicy> &,
|
304 |
-
InputIterator1 first,
|
305 |
-
InputIterator1 last,
|
306 |
-
InputIterator2 stencil,
|
307 |
-
OutputIterator1 out_true,
|
308 |
-
OutputIterator2 out_false,
|
309 |
-
Predicate pred)
|
310 |
-
{
|
311 |
-
// wrap pred
|
312 |
-
thrust::detail::wrapped_function<
|
313 |
-
Predicate,
|
314 |
-
bool
|
315 |
-
> wrapped_pred(pred);
|
316 |
-
|
317 |
-
for(; first != last; ++first, ++stencil)
|
318 |
-
{
|
319 |
-
if(wrapped_pred(*stencil))
|
320 |
-
{
|
321 |
-
*out_true = *first;
|
322 |
-
++out_true;
|
323 |
-
} // end if
|
324 |
-
else
|
325 |
-
{
|
326 |
-
*out_false = *first;
|
327 |
-
++out_false;
|
328 |
-
} // end else
|
329 |
-
}
|
330 |
-
|
331 |
-
return thrust::make_pair(out_true, out_false);
|
332 |
-
}
|
333 |
-
|
334 |
-
|
335 |
-
} // end namespace sequential
|
336 |
-
} // end namespace detail
|
337 |
-
} // end namespace system
|
338 |
-
} // end namespace thrust
|
339 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/CONTRIBUTING.md
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
# Contributing to segment-anything
|
2 |
-
We want to make contributing to this project as easy and transparent as
|
3 |
-
possible.
|
4 |
-
|
5 |
-
## Pull Requests
|
6 |
-
We actively welcome your pull requests.
|
7 |
-
|
8 |
-
1. Fork the repo and create your branch from `main`.
|
9 |
-
2. If you've added code that should be tested, add tests.
|
10 |
-
3. If you've changed APIs, update the documentation.
|
11 |
-
4. Ensure the test suite passes.
|
12 |
-
5. Make sure your code lints, using the `linter.sh` script in the project's root directory. Linting requires `black==23.*`, `isort==5.12.0`, `flake8`, and `mypy`.
|
13 |
-
6. If you haven't already, complete the Contributor License Agreement ("CLA").
|
14 |
-
|
15 |
-
## Contributor License Agreement ("CLA")
|
16 |
-
In order to accept your pull request, we need you to submit a CLA. You only need
|
17 |
-
to do this once to work on any of Facebook's open source projects.
|
18 |
-
|
19 |
-
Complete your CLA here: <https://code.facebook.com/cla>
|
20 |
-
|
21 |
-
## Issues
|
22 |
-
We use GitHub issues to track public bugs. Please ensure your description is
|
23 |
-
clear and has sufficient instructions to be able to reproduce the issue.
|
24 |
-
|
25 |
-
Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
|
26 |
-
disclosure of security bugs. In those cases, please go through the process
|
27 |
-
outlined on that page and do not file a public issue.
|
28 |
-
|
29 |
-
## License
|
30 |
-
By contributing to segment-anything, you agree that your contributions will be licensed
|
31 |
-
under the LICENSE file in the root directory of this source tree.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/__init__.py
DELETED
File without changes
|
spaces/Cletrason/Cletrason-toad-in-the-mario-movie/trainer_seq2seq.py
DELETED
@@ -1,246 +0,0 @@
|
|
1 |
-
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
from typing import Any, Dict, List, Optional, Tuple, Union
|
16 |
-
|
17 |
-
import torch
|
18 |
-
from torch import nn
|
19 |
-
from torch.utils.data import Dataset
|
20 |
-
|
21 |
-
from .deepspeed import is_deepspeed_zero3_enabled
|
22 |
-
from .trainer import Trainer
|
23 |
-
from .trainer_utils import PredictionOutput
|
24 |
-
from .utils import logging
|
25 |
-
|
26 |
-
|
27 |
-
logger = logging.get_logger(__name__)
|
28 |
-
|
29 |
-
|
30 |
-
class Seq2SeqTrainer(Trainer):
|
31 |
-
def evaluate(
|
32 |
-
self,
|
33 |
-
eval_dataset: Optional[Dataset] = None,
|
34 |
-
ignore_keys: Optional[List[str]] = None,
|
35 |
-
metric_key_prefix: str = "eval",
|
36 |
-
**gen_kwargs,
|
37 |
-
) -> Dict[str, float]:
|
38 |
-
"""
|
39 |
-
Run evaluation and returns metrics.
|
40 |
-
|
41 |
-
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
|
42 |
-
(pass it to the init `compute_metrics` argument).
|
43 |
-
|
44 |
-
You can also subclass and override this method to inject custom behavior.
|
45 |
-
|
46 |
-
Args:
|
47 |
-
eval_dataset (`Dataset`, *optional*):
|
48 |
-
Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns
|
49 |
-
not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
|
50 |
-
method.
|
51 |
-
ignore_keys (`List[str]`, *optional*):
|
52 |
-
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
|
53 |
-
gathering predictions.
|
54 |
-
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
|
55 |
-
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
|
56 |
-
"eval_bleu" if the prefix is `"eval"` (default)
|
57 |
-
max_length (`int`, *optional*):
|
58 |
-
The maximum target length to use when predicting with the generate method.
|
59 |
-
num_beams (`int`, *optional*):
|
60 |
-
Number of beams for beam search that will be used when predicting with the generate method. 1 means no
|
61 |
-
beam search.
|
62 |
-
gen_kwargs:
|
63 |
-
Additional `generate` specific kwargs.
|
64 |
-
|
65 |
-
Returns:
|
66 |
-
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
|
67 |
-
dictionary also contains the epoch number which comes from the training state.
|
68 |
-
"""
|
69 |
-
|
70 |
-
gen_kwargs = gen_kwargs.copy()
|
71 |
-
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
|
72 |
-
gen_kwargs["max_length"] = self.args.generation_max_length
|
73 |
-
gen_kwargs["num_beams"] = (
|
74 |
-
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
|
75 |
-
)
|
76 |
-
self._gen_kwargs = gen_kwargs
|
77 |
-
|
78 |
-
return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
|
79 |
-
|
80 |
-
def predict(
|
81 |
-
self,
|
82 |
-
test_dataset: Dataset,
|
83 |
-
ignore_keys: Optional[List[str]] = None,
|
84 |
-
metric_key_prefix: str = "test",
|
85 |
-
**gen_kwargs,
|
86 |
-
) -> PredictionOutput:
|
87 |
-
"""
|
88 |
-
Run prediction and returns predictions and potential metrics.
|
89 |
-
|
90 |
-
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
|
91 |
-
will also return metrics, like in `evaluate()`.
|
92 |
-
|
93 |
-
Args:
|
94 |
-
test_dataset (`Dataset`):
|
95 |
-
Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the
|
96 |
-
`model.forward()` method are automatically removed. Has to implement the method `__len__`
|
97 |
-
ignore_keys (`List[str]`, *optional*):
|
98 |
-
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
|
99 |
-
gathering predictions.
|
100 |
-
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
|
101 |
-
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
|
102 |
-
"eval_bleu" if the prefix is `"eval"` (default)
|
103 |
-
max_length (`int`, *optional*):
|
104 |
-
The maximum target length to use when predicting with the generate method.
|
105 |
-
num_beams (`int`, *optional*):
|
106 |
-
Number of beams for beam search that will be used when predicting with the generate method. 1 means no
|
107 |
-
beam search.
|
108 |
-
gen_kwargs:
|
109 |
-
Additional `generate` specific kwargs.
|
110 |
-
|
111 |
-
<Tip>
|
112 |
-
|
113 |
-
If your predictions or labels have different sequence lengths (for instance because you're doing dynamic
|
114 |
-
padding in a token classification task) the predictions will be padded (on the right) to allow for
|
115 |
-
concatenation into one array. The padding index is -100.
|
116 |
-
|
117 |
-
</Tip>
|
118 |
-
|
119 |
-
Returns: *NamedTuple* A namedtuple with the following keys:
|
120 |
-
|
121 |
-
- predictions (`np.ndarray`): The predictions on `test_dataset`.
|
122 |
-
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
|
123 |
-
- metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
|
124 |
-
labels).
|
125 |
-
"""
|
126 |
-
|
127 |
-
gen_kwargs = gen_kwargs.copy()
|
128 |
-
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
|
129 |
-
gen_kwargs["max_length"] = self.args.generation_max_length
|
130 |
-
gen_kwargs["num_beams"] = (
|
131 |
-
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
|
132 |
-
)
|
133 |
-
self._gen_kwargs = gen_kwargs
|
134 |
-
|
135 |
-
return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
|
136 |
-
|
137 |
-
def prediction_step(
|
138 |
-
self,
|
139 |
-
model: nn.Module,
|
140 |
-
inputs: Dict[str, Union[torch.Tensor, Any]],
|
141 |
-
prediction_loss_only: bool,
|
142 |
-
ignore_keys: Optional[List[str]] = None,
|
143 |
-
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
|
144 |
-
"""
|
145 |
-
Perform an evaluation step on `model` using `inputs`.
|
146 |
-
|
147 |
-
Subclass and override to inject custom behavior.
|
148 |
-
|
149 |
-
Args:
|
150 |
-
model (`nn.Module`):
|
151 |
-
The model to evaluate.
|
152 |
-
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
|
153 |
-
The inputs and targets of the model.
|
154 |
-
|
155 |
-
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
|
156 |
-
argument `labels`. Check your model's documentation for all accepted arguments.
|
157 |
-
prediction_loss_only (`bool`):
|
158 |
-
Whether or not to return the loss only.
|
159 |
-
|
160 |
-
Return:
|
161 |
-
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
|
162 |
-
labels (each being optional).
|
163 |
-
"""
|
164 |
-
|
165 |
-
if not self.args.predict_with_generate or prediction_loss_only:
|
166 |
-
return super().prediction_step(
|
167 |
-
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
|
168 |
-
)
|
169 |
-
|
170 |
-
has_labels = "labels" in inputs
|
171 |
-
inputs = self._prepare_inputs(inputs)
|
172 |
-
|
173 |
-
# XXX: adapt synced_gpus for fairscale as well
|
174 |
-
gen_kwargs = self._gen_kwargs.copy()
|
175 |
-
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
|
176 |
-
gen_kwargs["max_length"] = self.model.config.max_length
|
177 |
-
gen_kwargs["num_beams"] = (
|
178 |
-
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.model.config.num_beams
|
179 |
-
)
|
180 |
-
default_synced_gpus = True if is_deepspeed_zero3_enabled() else False
|
181 |
-
gen_kwargs["synced_gpus"] = (
|
182 |
-
gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus
|
183 |
-
)
|
184 |
-
|
185 |
-
# TODO (Joao): the following line is needed to keep a consistent result on SQUAD. Ideally, we should not block
|
186 |
-
# users from preparing a dataset with `decoder_input_ids`.
|
187 |
-
inputs = {k: v for k, v in inputs.items() if k != "decoder_input_ids"}
|
188 |
-
generated_tokens = self.model.generate(**inputs, **gen_kwargs)
|
189 |
-
|
190 |
-
# Temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop
|
191 |
-
# TODO: remove this hack when the legacy code that initializes generation_config from a model config is
|
192 |
-
# removed in https://github.com/huggingface/transformers/blob/98d88b23f54e5a23e741833f1e973fdf600cc2c5/src/transformers/generation/utils.py#L1183
|
193 |
-
if self.model.generation_config._from_model_config:
|
194 |
-
self.model.generation_config._from_model_config = False
|
195 |
-
# in case the batch is shorter than max length, the output should be padded
|
196 |
-
if gen_kwargs.get("max_length") is not None and generated_tokens.shape[-1] < gen_kwargs["max_length"]:
|
197 |
-
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
|
198 |
-
elif gen_kwargs.get("max_new_tokens") is not None and generated_tokens.shape[-1] < (
|
199 |
-
gen_kwargs["max_new_tokens"] + 1
|
200 |
-
):
|
201 |
-
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_new_tokens"] + 1)
|
202 |
-
|
203 |
-
with torch.no_grad():
|
204 |
-
if has_labels:
|
205 |
-
with self.compute_loss_context_manager():
|
206 |
-
outputs = model(**inputs)
|
207 |
-
if self.label_smoother is not None:
|
208 |
-
loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
|
209 |
-
else:
|
210 |
-
loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
|
211 |
-
else:
|
212 |
-
loss = None
|
213 |
-
|
214 |
-
if self.args.prediction_loss_only:
|
215 |
-
return (loss, None, None)
|
216 |
-
|
217 |
-
if has_labels:
|
218 |
-
labels = inputs["labels"]
|
219 |
-
if gen_kwargs.get("max_length") is not None and labels.shape[-1] < gen_kwargs["max_length"]:
|
220 |
-
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
|
221 |
-
elif gen_kwargs.get("max_new_tokens") is not None and labels.shape[-1] < (
|
222 |
-
gen_kwargs["max_new_tokens"] + 1
|
223 |
-
):
|
224 |
-
labels = self._pad_tensors_to_max_len(labels, (gen_kwargs["max_new_tokens"] + 1))
|
225 |
-
else:
|
226 |
-
labels = None
|
227 |
-
|
228 |
-
return (loss, generated_tokens, labels)
|
229 |
-
|
230 |
-
def _pad_tensors_to_max_len(self, tensor, max_length):
|
231 |
-
if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"):
|
232 |
-
# If PAD token is not defined at least EOS token has to be defined
|
233 |
-
pad_token_id = (
|
234 |
-
self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
|
235 |
-
)
|
236 |
-
else:
|
237 |
-
if self.model.config.pad_token_id is not None:
|
238 |
-
pad_token_id = self.model.config.pad_token_id
|
239 |
-
else:
|
240 |
-
raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors")
|
241 |
-
|
242 |
-
padded_tensor = pad_token_id * torch.ones(
|
243 |
-
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
|
244 |
-
)
|
245 |
-
padded_tensor[:, : tensor.shape[-1]] = tensor
|
246 |
-
return padded_tensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/momentsPen.c
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_r_e_p.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from fontTools import ttLib
|
2 |
-
|
3 |
-
superclass = ttLib.getTableClass("fpgm")
|
4 |
-
|
5 |
-
|
6 |
-
class table__p_r_e_p(superclass):
|
7 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/archive.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
from fsspec import AbstractFileSystem
|
2 |
-
from fsspec.utils import tokenize
|
3 |
-
|
4 |
-
|
5 |
-
class AbstractArchiveFileSystem(AbstractFileSystem):
|
6 |
-
"""
|
7 |
-
A generic superclass for implementing Archive-based filesystems.
|
8 |
-
|
9 |
-
Currently, it is shared amongst
|
10 |
-
:class:`~fsspec.implementations.zip.ZipFileSystem`,
|
11 |
-
:class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and
|
12 |
-
:class:`~fsspec.implementations.tar.TarFileSystem`.
|
13 |
-
"""
|
14 |
-
|
15 |
-
def __str__(self):
|
16 |
-
return "<Archive-like object %s at %s>" % (type(self).__name__, id(self))
|
17 |
-
|
18 |
-
__repr__ = __str__
|
19 |
-
|
20 |
-
def ukey(self, path):
|
21 |
-
return tokenize(path, self.fo, self.protocol)
|
22 |
-
|
23 |
-
def _all_dirnames(self, paths):
|
24 |
-
"""Returns *all* directory names for each path in paths, including intermediate
|
25 |
-
ones.
|
26 |
-
|
27 |
-
Parameters
|
28 |
-
----------
|
29 |
-
paths: Iterable of path strings
|
30 |
-
"""
|
31 |
-
if len(paths) == 0:
|
32 |
-
return set()
|
33 |
-
|
34 |
-
dirnames = {self._parent(path) for path in paths} - {self.root_marker}
|
35 |
-
return dirnames | self._all_dirnames(dirnames)
|
36 |
-
|
37 |
-
def info(self, path, **kwargs):
|
38 |
-
self._get_dirs()
|
39 |
-
path = self._strip_protocol(path)
|
40 |
-
if path in {"", "/"} and self.dir_cache:
|
41 |
-
return {"name": "/", "type": "directory", "size": 0}
|
42 |
-
if path in self.dir_cache:
|
43 |
-
return self.dir_cache[path]
|
44 |
-
elif path + "/" in self.dir_cache:
|
45 |
-
return self.dir_cache[path + "/"]
|
46 |
-
else:
|
47 |
-
raise FileNotFoundError(path)
|
48 |
-
|
49 |
-
def ls(self, path, detail=True, **kwargs):
|
50 |
-
self._get_dirs()
|
51 |
-
paths = {}
|
52 |
-
for p, f in self.dir_cache.items():
|
53 |
-
p = p.rstrip("/")
|
54 |
-
if "/" in p:
|
55 |
-
root = p.rsplit("/", 1)[0]
|
56 |
-
else:
|
57 |
-
root = ""
|
58 |
-
if root == path.rstrip("/"):
|
59 |
-
paths[p] = f
|
60 |
-
elif all(
|
61 |
-
(a == b)
|
62 |
-
for a, b in zip(path.split("/"), [""] + p.strip("/").split("/"))
|
63 |
-
):
|
64 |
-
# root directory entry
|
65 |
-
ppath = p.rstrip("/").split("/", 1)[0]
|
66 |
-
if ppath not in paths:
|
67 |
-
out = {"name": ppath + "/", "size": 0, "type": "directory"}
|
68 |
-
paths[ppath] = out
|
69 |
-
out = sorted(paths.values(), key=lambda _: _["name"])
|
70 |
-
if detail:
|
71 |
-
return out
|
72 |
-
else:
|
73 |
-
return [f["name"] for f in out]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3ca142e0.css
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
.spacer.svelte-1kspdo{display:inline-block;width:0;height:0}.json-node.svelte-1kspdo{display:inline;color:var(--body-text-color);line-height:var(--line-sm);font-family:var(--font-mono)}.expand-array.svelte-1kspdo{border:1px solid var(--border-color-primary);border-radius:var(--radius-sm);background:var(--background-fill-secondary);padding:0 var(--size-1);color:var(--body-text-color)}.expand-array.svelte-1kspdo:hover{background:var(--background-fill-primary)}.children.svelte-1kspdo{padding-left:var(--size-4)}.json-item.svelte-1kspdo{display:inline}.null.svelte-1kspdo{color:var(--body-text-color-subdued)}.string.svelte-1kspdo{color:var(--color-green-500)}.number.svelte-1kspdo{color:var(--color-blue-500)}.bool.svelte-1kspdo{color:var(--color-red-500)}.json-holder.svelte-1trjy9a{padding:var(--size-2)}button.svelte-1trjy9a{display:flex;position:absolute;top:var(--block-label-margin);right:var(--block-label-margin);align-items:center;box-shadow:var(--shadow-drop);border:1px solid var(--border-color-primary);border-top:none;border-right:none;border-radius:var(--block-label-right-radius);background:var(--block-label-background-fill);padding:5px;width:22px;height:22px;overflow:hidden;color:var(--block-label-text-color);font:var(--font);font-size:var(--button-small-text-size)}
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-ff630227.js
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import{S as I,e as J,s as K,J as U,K as u,p as j,M as y,n as P,A as E,N as R,O as V,P as D,L as F,Z as Le,ar as je,R as G,G as T,m as Z,V as Y,B as be,C as Ee,av as Q,aj as Ae,X as Ce,k as O,o as X,z as B,v as S,x as q,E as Me,ae as ze,q as Te,r as Be,u as pe,y as ke}from"./index-3370be2a.js";import{U as Se}from"./Upload-f29b2460.js";import{M as Ue}from"./ModifyUpload-d8fc50ab.js";import{B as Ne}from"./Button-89624748.js";import{B as Fe}from"./BlockLabel-56db415e.js";import{E as Oe}from"./Empty-585389a4.js";import{g as Xe}from"./color-baaf9df5.js";import{a as qe}from"./csv-b0b7514a.js";import{Z as x,_ as $,l as ee}from"./linear-58a44b5e.js";import{U as He}from"./UploadText-28892309.js";import"./Blocks-f0129fcd.js";import"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import"./IconButton-abe5ede9.js";import"./dsv-576afacd.js";function Pe(l){let e,n,t;return{c(){e=U("svg"),n=U("path"),t=U("path"),u(n,"d","M28.828 3.172a4.094 4.094 0 0 0-5.656 0L4.05 22.292A6.954 6.954 0 0 0 2 27.242V30h2.756a6.952 6.952 0 0 0 4.95-2.05L28.828 8.829a3.999 3.999 0 0 0 0-5.657zM10.91 18.26l2.829 2.829l-2.122 2.121l-2.828-2.828zm-2.619 8.276A4.966 4.966 0 0 1 4.756 28H4v-.759a4.967 4.967 0 0 1 1.464-3.535l1.91-1.91l2.829 2.828zM27.415 7.414l-12.261 12.26l-2.829-2.828l12.262-12.26a2.047 2.047 0 0 1 2.828 0a2 2 0 0 1 0 2.828z"),u(n,"fill","currentColor"),u(t,"d","M6.5 15a3.5 3.5 0 0 1-2.475-5.974l3.5-3.5a1.502 1.502 0 0 0 0-2.121a1.537 1.537 0 0 0-2.121 0L3.415 5.394L2 3.98l1.99-1.988a3.585 3.585 0 0 1 4.95 0a3.504 3.504 0 0 1 0 4.949L5.439 10.44a1.502 1.502 0 0 0 0 2.121a1.537 1.537 0 0 0 2.122 0l4.024-4.024L13 9.95l-4.025 4.024A3.475 3.475 0 0 1 6.5 15z"),u(t,"fill","currentColor"),u(e,"width","1em"),u(e,"height","1em"),u(e,"viewBox","0 0 32 32")},m(a,s){j(a,e,s),y(e,n),y(e,t)},p:P,i:P,o:P,d(a){a&&E(e)}}}let ye=class extends I{constructor(e){super(),J(this,e,null,Pe,K,{})}};function le(l){let e;return Array.isArray(l)?e=l.reduce((n,{values:t})=>[...n,...t.map(({y:a})=>a)],[]):e=l.values,[Math.min(...e),Math.max(...e)]}function te(l,e,n){const t=Object.entries(l[0]).reduce((a,s,o)=>(!e&&o===0||e&&s[0]===e?a.x.name=s[0]:(!n||n&&n.includes(s[0]))&&a.y.push({name:s[0],values:[]}),a),{x:{name:"",values:[]},y:[]});for(let a=0;a<l.length;a++){const s=Object.entries(l[a]);for(let o=0;o<s.length;o++){let[_,i]=s[o];_===t.x.name?t.x.values.push(parseFloat(i)):t.y[o-1].values.push({y:parseFloat(s[o][1]),x:parseFloat(s[0][1])})}}return t}function Re(l){let e,n,t,a,s;return{c(){e=R("div"),n=R("span"),t=V(),a=D(l[0]),F(n,"background",l[3]),u(n,"class","svelte-1gww5xe"),F(e,"top",l[2]-l[5]/2+"px"),F(e,"left",l[1]-l[4]-7+"px"),u(e,"class","svelte-1gww5xe"),Le(()=>l[6].call(e))},m(o,_){j(o,e,_),y(e,n),y(e,t),y(e,a),s=je(e,l[6].bind(e))},p(o,[_]){_&8&&F(n,"background",o[3]),_&1&&G(a,o[0]),_&36&&F(e,"top",o[2]-o[5]/2+"px"),_&18&&F(e,"left",o[1]-o[4]-7+"px")},i:P,o:P,d(o){o&&E(e),s()}}}function Ve(l,e,n){let{text:t}=e,{x:a}=e,{y:s}=e,{color:o}=e,_,i;function v(){_=this.offsetWidth,i=this.offsetHeight,n(4,_),n(5,i)}return l.$$set=g=>{"text"in g&&n(0,t=g.text),"x"in g&&n(1,a=g.x),"y"in g&&n(2,s=g.y),"color"in g&&n(3,o=g.color)},[t,a,s,o,_,i,v]}class Ye extends I{constructor(e){super(),J(this,e,Ve,Re,K,{text:0,x:1,y:2,color:3})}}function Ze(l,{color:e,text:n}){let t;function a(i){return t=new Ye({props:{text:n,x:i.pageX,y:i.pageY,color:e},target:document.body}),i}function s(i){t.$set({x:i.pageX,y:i.pageY})}function o(){t.$destroy()}const _=l;return _.addEventListener("mouseover",a),_.addEventListener("mouseleave",o),_.addEventListener("mousemove",s),{destroy(){_.removeEventListener("mouseover",a),_.removeEventListener("mouseleave",o),_.removeEventListener("mousemove",s)}}}function ne(l,e,n){const t=l.slice();t[16]=e[n].name,t[17]=e[n].values;const a=t[8][t[16]];return t[18]=a,t}function ae(l,e,n){const t=l.slice();return t[0]=e[n].x,t[1]=e[n].y,t}function oe(l,e,n){const t=l.slice();t[16]=e[n].name,t[17]=e[n].values;const a=t[8][t[16]];return t[18]=a,t}function se(l,e,n){const t=l.slice();return t[0]=e[n].x,t[1]=e[n].y,t}function re(l,e,n){const t=l.slice();return t[27]=e[n],t}function ie(l,e,n){const t=l.slice();return t[27]=e[n],t}function fe(l,e,n){const t=l.slice();return t[16]=e[n].name,t}function _e(l){let e,n,t,a=l[16]+"",s,o;return{c(){e=R("div"),n=R("span"),t=V(),s=D(a),o=V(),u(n,"class","legend-box svelte-1mjxput"),F(n,"background-color",l[8][l[16]]),u(e,"class","legend-item svelte-1mjxput")},m(_,i){j(_,e,i),y(e,n),y(e,t),y(e,s),y(e,o)},p(_,i){i[0]&260&&F(n,"background-color",_[8][_[16]]),i[0]&4&&a!==(a=_[16]+"")&&G(s,a)},d(_){_&&E(e)}}}function ue(l){let e,n,t,a,s,o,_=l[27]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"x1",n=l[5](l[27])),u(e,"x2",t=l[5](l[27])),u(e,"y1",a=l[4](l[9][0]<l[6][0]?l[9][0]:l[6][0])+10),u(e,"y2",s=l[4](l[6][1]>l[9][l[9].length-1]?l[6][1]:l[9][l[9].length-1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","middle"),u(o,"x",v=l[5](l[27])),u(o,"y",g=l[4](l[9][0])+30)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&1056&&n!==(n=f[5](f[27]))&&u(e,"x1",n),h[0]&1056&&t!==(t=f[5](f[27]))&&u(e,"x2",t),h[0]&592&&a!==(a=f[4](f[9][0]<f[6][0]?f[9][0]:f[6][0])+10)&&u(e,"y1",a),h[0]&592&&s!==(s=f[4](f[6][1]>f[9][f[9].length-1]?f[6][1]:f[9][f[9].length-1]))&&u(e,"y2",s),h[0]&1024&&_!==(_=f[27]+"")&&G(i,_),h[0]&1056&&v!==(v=f[5](f[27]))&&u(o,"x",v),h[0]&528&&g!==(g=f[4](f[9][0])+30)&&u(o,"y",g)},d(f){f&&(E(e),E(o))}}}function ce(l){let e,n,t,a,s,o,_=l[27]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"y1",n=l[4](l[27])),u(e,"y2",t=l[4](l[27])),u(e,"x1",a=l[5](l[10][0]<l[7][0]?l[10][0]:l[7][0])-10),u(e,"x2",s=l[5](l[7][1]>l[10][l[10].length-1]?l[7][1]:l[10][l[10].length-1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","end"),u(o,"y",v=l[4](l[27])+4),u(o,"x",g=l[5](l[10][0])-20)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&528&&n!==(n=f[4](f[27]))&&u(e,"y1",n),h[0]&528&&t!==(t=f[4](f[27]))&&u(e,"y2",t),h[0]&1184&&a!==(a=f[5](f[10][0]<f[7][0]?f[10][0]:f[7][0])-10)&&u(e,"x1",a),h[0]&1184&&s!==(s=f[5](f[7][1]>f[10][f[10].length-1]?f[7][1]:f[10][f[10].length-1]))&&u(e,"x2",s),h[0]&512&&_!==(_=f[27]+"")&&G(i,_),h[0]&528&&v!==(v=f[4](f[27])+4)&&u(o,"y",v),h[0]&1056&&g!==(g=f[5](f[10][0])-20)&&u(o,"x",g)},d(f){f&&(E(e),E(o))}}}function me(l){let e,n,t,a,s,o,_=l[6][1]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"y1",n=l[4](l[6][1])),u(e,"y2",t=l[4](l[6][1])),u(e,"x1",a=l[5](l[10][0])),u(e,"x2",s=l[5](l[7][1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","end"),u(o,"y",v=l[4](l[6][1])+4),u(o,"x",g=l[5](l[10][0])-20)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&80&&n!==(n=f[4](f[6][1]))&&u(e,"y1",n),h[0]&80&&t!==(t=f[4](f[6][1]))&&u(e,"y2",t),h[0]&1056&&a!==(a=f[5](f[10][0]))&&u(e,"x1",a),h[0]&160&&s!==(s=f[5](f[7][1]))&&u(e,"x2",s),h[0]&64&&_!==(_=f[6][1]+"")&&G(i,_),h[0]&80&&v!==(v=f[4](f[6][1])+4)&&u(o,"y",v),h[0]&1056&&g!==(g=f[5](f[10][0])-20)&&u(o,"x",g)},d(f){f&&(E(e),E(o))}}}function he(l){let e,n,t,a;return{c(){e=U("circle"),u(e,"r","3.5"),u(e,"cx",n=l[5](l[0])),u(e,"cy",t=l[4](l[1])),u(e,"stroke-width","1.5"),u(e,"stroke",a=l[18]),u(e,"fill","none")},m(s,o){j(s,e,o)},p(s,o){o[0]&36&&n!==(n=s[5](s[0]))&&u(e,"cx",n),o[0]&20&&t!==(t=s[4](s[1]))&&u(e,"cy",t),o[0]&260&&a!==(a=s[18])&&u(e,"stroke",a)},d(s){s&&E(e)}}}function ge(l){let e,n,t,a=T(l[17]),s=[];for(let o=0;o<a.length;o+=1)s[o]=he(se(l,a,o));return{c(){for(let o=0;o<s.length;o+=1)s[o].c();e=U("path"),u(e,"d",n=$().curve(ee)(l[17].map(l[13]))),u(e,"fill","none"),u(e,"stroke",t=l[18]),u(e,"stroke-width","3")},m(o,_){for(let i=0;i<s.length;i+=1)s[i]&&s[i].m(o,_);j(o,e,_)},p(o,_){if(_[0]&308){a=T(o[17]);let i;for(i=0;i<a.length;i+=1){const v=se(o,a,i);s[i]?s[i].p(v,_):(s[i]=he(v),s[i].c(),s[i].m(e.parentNode,e))}for(;i<s.length;i+=1)s[i].d(1);s.length=a.length}_[0]&52&&n!==(n=$().curve(ee)(o[17].map(o[13])))&&u(e,"d",n),_[0]&260&&t!==(t=o[18])&&u(e,"stroke",t)},d(o){o&&E(e),Y(s,o)}}}function de(l){let e,n,t,a,s,o;return{c(){e=U("circle"),u(e,"r","7"),u(e,"cx",n=l[5](l[0])),u(e,"cy",t=l[4](l[1])),u(e,"stroke","black"),u(e,"fill","black"),F(e,"cursor","pointer"),F(e,"opacity","0")},m(_,i){j(_,e,i),s||(o=Ae(a=Ze.call(null,e,{color:l[18],text:`(${l[0]}, ${l[1]})`})),s=!0)},p(_,i){l=_,i[0]&36&&n!==(n=l[5](l[0]))&&u(e,"cx",n),i[0]&20&&t!==(t=l[4](l[1]))&&u(e,"cy",t),a&&Ce(a.update)&&i[0]&260&&a.update.call(null,{color:l[18],text:`(${l[0]}, ${l[1]})`})},d(_){_&&E(e),s=!1,o()}}}function ve(l){let e,n=T(l[17]),t=[];for(let a=0;a<n.length;a+=1)t[a]=de(ae(l,n,a));return{c(){for(let a=0;a<t.length;a+=1)t[a].c();e=Z()},m(a,s){for(let o=0;o<t.length;o+=1)t[o]&&t[o].m(a,s);j(a,e,s)},p(a,s){if(s[0]&308){n=T(a[17]);let o;for(o=0;o<n.length;o+=1){const _=ae(a,n,o);t[o]?t[o].p(_,s):(t[o]=de(_),t[o].c(),t[o].m(e.parentNode,e))}for(;o<t.length;o+=1)t[o].d(1);t.length=n.length}},d(a){a&&E(e),Y(t,a)}}}function De(l){let e,n,t,a,s,o,_,i,v,g,f=l[3].name+"",h,A=T(l[2]),m=[];for(let c=0;c<A.length;c+=1)m[c]=_e(fe(l,A,c));let b=T(l[10]),p=[];for(let c=0;c<b.length;c+=1)p[c]=ue(ie(l,b,c));let N=T(l[9]),k=[];for(let c=0;c<N.length;c+=1)k[c]=ce(re(l,N,c));let d=l[6][1]>l[9][l[9].length-1]&&me(l),C=T(l[2]),L=[];for(let c=0;c<C.length;c+=1)L[c]=ge(oe(l,C,c));let H=T(l[2]),M=[];for(let c=0;c<H.length;c+=1)M[c]=ve(ne(l,H,c));return{c(){e=R("div"),n=R("div");for(let c=0;c<m.length;c+=1)m[c].c();t=V(),a=U("svg"),s=U("g");for(let c=0;c<p.length;c+=1)p[c].c();o=Z();for(let c=0;c<k.length;c+=1)k[c].c();_=Z(),d&&d.c();for(let c=0;c<L.length;c+=1)L[c].c();i=Z();for(let c=0;c<M.length;c+=1)M[c].c();v=V(),g=R("div"),h=D(f),u(n,"class","legend svelte-1mjxput"),u(a,"class","w-full svelte-1mjxput"),u(a,"viewBox","-70 -20 700 420"),u(g,"class","main-label svelte-1mjxput"),u(e,"class","wrap svelte-1mjxput")},m(c,z){j(c,e,z),y(e,n);for(let r=0;r<m.length;r+=1)m[r]&&m[r].m(n,null);y(e,t),y(e,a),y(a,s);for(let r=0;r<p.length;r+=1)p[r]&&p[r].m(s,null);y(s,o);for(let r=0;r<k.length;r+=1)k[r]&&k[r].m(s,null);y(s,_),d&&d.m(s,null);for(let r=0;r<L.length;r+=1)L[r]&&L[r].m(a,null);y(a,i);for(let r=0;r<M.length;r+=1)M[r]&&M[r].m(a,null);y(e,v),y(e,g),y(g,h)},p(c,z){if(z[0]&260){A=T(c[2]);let r;for(r=0;r<A.length;r+=1){const w=fe(c,A,r);m[r]?m[r].p(w,z):(m[r]=_e(w),m[r].c(),m[r].m(n,null))}for(;r<m.length;r+=1)m[r].d(1);m.length=A.length}if(z[0]&1648){b=T(c[10]);let r;for(r=0;r<b.length;r+=1){const w=ie(c,b,r);p[r]?p[r].p(w,z):(p[r]=ue(w),p[r].c(),p[r].m(s,o))}for(;r<p.length;r+=1)p[r].d(1);p.length=b.length}if(z[0]&1712){N=T(c[9]);let r;for(r=0;r<N.length;r+=1){const w=re(c,N,r);k[r]?k[r].p(w,z):(k[r]=ce(w),k[r].c(),k[r].m(s,_))}for(;r<k.length;r+=1)k[r].d(1);k.length=N.length}if(c[6][1]>c[9][c[9].length-1]?d?d.p(c,z):(d=me(c),d.c(),d.m(s,null)):d&&(d.d(1),d=null),z[0]&308){C=T(c[2]);let r;for(r=0;r<C.length;r+=1){const w=oe(c,C,r);L[r]?L[r].p(w,z):(L[r]=ge(w),L[r].c(),L[r].m(a,i))}for(;r<L.length;r+=1)L[r].d(1);L.length=C.length}if(z[0]&308){H=T(c[2]);let r;for(r=0;r<H.length;r+=1){const w=ne(c,H,r);M[r]?M[r].p(w,z):(M[r]=ve(w),M[r].c(),M[r].m(a,null))}for(;r<M.length;r+=1)M[r].d(1);M.length=H.length}z[0]&8&&f!==(f=c[3].name+"")&&G(h,f)},i:P,o:P,d(c){c&&E(e),Y(m,c),Y(p,c),Y(k,c),d&&d.d(),Y(L,c),Y(M,c)}}}function Ge(l,e,n){let t,a,s,o,_,i,v,g,{value:f}=e,{x:h=void 0}=e,{y:A=void 0}=e,{colors:m=[]}=e;const b=be();let p;function N(d){let C=m[d%m.length];return C&&C in Q?Q[C]?.primary:C||Q[Xe(d)].primary}Ee(()=>{b("process",{x:t,y:a})});const k=({x:d,y:C})=>[_(d),i(C)];return l.$$set=d=>{"value"in d&&n(11,f=d.value),"x"in d&&n(0,h=d.x),"y"in d&&n(1,A=d.y),"colors"in d&&n(12,m=d.colors)},l.$$.update=()=>{l.$$.dirty[0]&2051&&n(3,{x:t,y:a}=te(typeof f=="string"?qe(f):f,h,A),t,(n(2,a),n(11,f),n(0,h),n(1,A))),l.$$.dirty[0]&8&&n(7,s=le(t)),l.$$.dirty[0]&4&&n(6,o=le(a)),l.$$.dirty[0]&128&&n(5,_=x(s,[0,600]).nice()),l.$$.dirty[0]&64&&n(4,i=x(o,[350,0]).nice()),l.$$.dirty[0]&32&&n(10,v=_.ticks(8)),l.$$.dirty[0]&16&&n(9,g=i.ticks(8)),l.$$.dirty[0]&4&&n(8,p=a.reduce((d,C,L)=>({...d,[C.name]:N(L)}),{}))},[h,A,a,t,i,_,o,s,p,g,v,f,m,k]}class we extends I{constructor(e){super(),J(this,e,Ge,De,K,{value:11,x:0,y:1,colors:12},null,[-1,-1])}}function Ie(l){let e,n;return e=new Se({props:{filetype:"text/csv",include_file_metadata:!1,$$slots:{default:[We]},$$scope:{ctx:l}}}),e.$on("load",l[19]),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&8388608&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function Je(l){let e,n,t,a,s;return n=new Ue({}),n.$on("clear",l[17]),a=new we({props:{value:l[14],y:l[4],x:l[5],colors:l[9]}}),a.$on("process",l[18]),{c(){e=R("div"),O(n.$$.fragment),t=V(),O(a.$$.fragment),u(e,"class","chart svelte-etmurc")},m(o,_){j(o,e,_),X(n,e,null),y(e,t),X(a,e,null),s=!0},p(o,_){const i={};_&16384&&(i.value=o[14]),_&16&&(i.y=o[4]),_&32&&(i.x=o[5]),_&512&&(i.colors=o[9]),a.$set(i)},i(o){s||(B(n.$$.fragment,o),B(a.$$.fragment,o),s=!0)},o(o){S(n.$$.fragment,o),S(a.$$.fragment,o),s=!1},d(o){o&&E(e),q(n),q(a)}}}function Ke(l){let e,n,t,a;const s=[xe,Qe],o=[];function _(i,v){return i[15]?0:1}return e=_(l),n=o[e]=s[e](l),{c(){n.c(),t=Z()},m(i,v){o[e].m(i,v),j(i,t,v),a=!0},p(i,v){let g=e;e=_(i),e===g?o[e].p(i,v):(pe(),S(o[g],1,1,()=>{o[g]=null}),ke(),n=o[e],n?n.p(i,v):(n=o[e]=s[e](i),n.c()),B(n,1),n.m(t.parentNode,t))},i(i){a||(B(n),a=!0)},o(i){S(n),a=!1},d(i){i&&E(t),o[e].d(i)}}}function We(l){let e,n;return e=new He({props:{type:"csv"}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p:P,i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function Qe(l){let e,n;return e=new Oe({props:{unpadded_box:!0,size:"large",$$slots:{default:[$e]},$$scope:{ctx:l}}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&8388608&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function xe(l){let e,n;return e=new we({props:{value:l[15],colors:l[9]}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&32768&&(s.value=t[15]),a&512&&(s.colors=t[9]),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function $e(l){let e,n;return e=new ye({}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function el(l){let e,n,t,a,s,o,_,i;e=new Fe({props:{show_label:l[8],Icon:ye,label:l[7]||"TimeSeries"}});const v=[l[13]];let g={};for(let m=0;m<v.length;m+=1)g=Me(g,v[m]);t=new ze({props:g});const f=[Ke,Je,Ie],h=[];function A(m,b){return m[6]==="static"?0:m[14]?1:m[0]===void 0||m[0]===null?2:-1}return~(s=A(l))&&(o=h[s]=f[s](l)),{c(){O(e.$$.fragment),n=V(),O(t.$$.fragment),a=V(),o&&o.c(),_=Z()},m(m,b){X(e,m,b),j(m,n,b),X(t,m,b),j(m,a,b),~s&&h[s].m(m,b),j(m,_,b),i=!0},p(m,b){const p={};b&256&&(p.show_label=m[8]),b&128&&(p.label=m[7]||"TimeSeries"),e.$set(p);const N=b&8192?Te(v,[Be(m[13])]):{};t.$set(N);let k=s;s=A(m),s===k?~s&&h[s].p(m,b):(o&&(pe(),S(h[k],1,1,()=>{h[k]=null}),ke()),~s?(o=h[s],o?o.p(m,b):(o=h[s]=f[s](m),o.c()),B(o,1),o.m(_.parentNode,_)):o=null)},i(m){i||(B(e.$$.fragment,m),B(t.$$.fragment,m),B(o),i=!0)},o(m){S(e.$$.fragment,m),S(t.$$.fragment,m),S(o),i=!1},d(m){m&&(E(n),E(a),E(_)),q(e,m),q(t,m),~s&&h[s].d(m)}}}function ll(l){let e,n;return e=new Ne({props:{visible:l[3],variant:l[6]==="dynamic"&&!l[14]?"dashed":"solid",padding:!1,elem_id:l[1],elem_classes:l[2],container:l[10],scale:l[11],min_width:l[12],$$slots:{default:[el]},$$scope:{ctx:l}}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,[a]){const s={};a&8&&(s.visible=t[3]),a&16448&&(s.variant=t[6]==="dynamic"&&!t[14]?"dashed":"solid"),a&2&&(s.elem_id=t[1]),a&4&&(s.elem_classes=t[2]),a&1024&&(s.container=t[10]),a&2048&&(s.scale=t[11]),a&4096&&(s.min_width=t[12]),a&8446961&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function tl(l){return l.data.map(e=>e.reduce((n,t,a)=>({...n,[l.headers[a]]:t}),{}))}function nl(l){const e=atob(l.split(",")[1]),n=l.split(",")[0].split(":")[1].split(";")[0],t=new ArrayBuffer(e.length),a=new Uint8Array(t);for(let s=0;s<e.length;s++)a[s]=e.charCodeAt(s);return new Blob([t],{type:n})}function al(l,e){const n=[],t=[];n.push(l.name),e.forEach(({name:a})=>n.push(a));for(let a=0;a<l.values.length;a++){let s=[];s.push(l.values[a]),e.forEach(({values:o})=>s.push(o[a].y)),t.push(s)}return{headers:n,data:t}}function ol(l,e,n){let t;const a=be();let{elem_id:s=""}=e,{elem_classes:o=[]}=e,{visible:_=!0}=e,{value:i}=e,{y:v}=e,{x:g}=e,{mode:f}=e,{label:h}=e,{show_label:A}=e,{colors:m}=e,{container:b=!0}=e,{scale:p=null}=e,{min_width:N=void 0}=e,{loading_status:k}=e,d;function C(r){const w=new FileReader;w.addEventListener("loadend",W=>{n(14,d=W.srcElement.result)}),w.readAsText(r)}function L(r){r.headers&&n(14,d=r.headers.join(",")),r.data.forEach(W=>{n(14,d=d+`
|
2 |
-
`),n(14,d=d+W.join(","))})}function H(r){return n(0,i={data:r}),r}function M({detail:r}){n(0,i=null),a("change"),a("clear")}const c=({detail:{x:r,y:w}})=>n(0,i=al(r,w)),z=({detail:r})=>H(r);return l.$$set=r=>{"elem_id"in r&&n(1,s=r.elem_id),"elem_classes"in r&&n(2,o=r.elem_classes),"visible"in r&&n(3,_=r.visible),"value"in r&&n(0,i=r.value),"y"in r&&n(4,v=r.y),"x"in r&&n(5,g=r.x),"mode"in r&&n(6,f=r.mode),"label"in r&&n(7,h=r.label),"show_label"in r&&n(8,A=r.show_label),"colors"in r&&n(9,m=r.colors),"container"in r&&n(10,b=r.container),"scale"in r&&n(11,p=r.scale),"min_width"in r&&n(12,N=r.min_width),"loading_status"in r&&n(13,k=r.loading_status)},l.$$.update=()=>{l.$$.dirty&1&&(i&&i.data&&typeof i.data=="string"?i?C(nl(i.data)):n(14,d=null):i&&i.data&&typeof i.data!="string"&&(i||n(14,d=null),L(i))),l.$$.dirty&16385&&n(14,d=i==null?null:d),l.$$.dirty&65&&n(15,t=f==="static"&&i&&tl(i)),l.$$.dirty&1&&a("change")},[i,s,o,_,v,g,f,h,A,m,b,p,N,k,d,t,H,M,c,z]}class sl extends I{constructor(e){super(),J(this,e,ol,ll,K,{elem_id:1,elem_classes:2,visible:3,value:0,y:4,x:5,mode:6,label:7,show_label:8,colors:9,container:10,scale:11,min_width:12,loading_status:13})}}const wl=sl,Ll=["static","dynamic"],jl=l=>({type:{payload:"{data: Array<Array<number>> | string; headers?: Array<string>;}"},description:{payload:"dataset of series"}});export{wl as Component,jl as document,Ll as modes};
|
3 |
-
//# sourceMappingURL=index-ff630227.js.map
|
|
|
|
|
|
|
|
spaces/DataScienceGuild/ARIMA_test/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ARIMA Test
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: pink
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.25.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/modules/swg_transformer.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
from models.modules.transformer_modules import *
|
2 |
-
|
3 |
-
|
4 |
-
class SWG_Transformer(nn.Module):
|
5 |
-
def __init__(self, dim, depth, heads, win_size, dim_head, mlp_dim,
|
6 |
-
dropout=0., patch_num=None, ape=None, rpe=None, rpe_pos=1):
|
7 |
-
super().__init__()
|
8 |
-
self.absolute_pos_embed = None if patch_num is None or ape is None else AbsolutePosition(dim, dropout,
|
9 |
-
patch_num, ape)
|
10 |
-
self.pos_dropout = nn.Dropout(dropout)
|
11 |
-
self.layers = nn.ModuleList([])
|
12 |
-
for i in range(depth):
|
13 |
-
if i % 2 == 0:
|
14 |
-
attention = WinAttention(dim, win_size=win_size, shift=0 if (i % 3 == 0) else win_size // 2,
|
15 |
-
heads=heads, dim_head=dim_head, dropout=dropout, rpe=rpe, rpe_pos=rpe_pos)
|
16 |
-
else:
|
17 |
-
attention = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout,
|
18 |
-
patch_num=patch_num, rpe=rpe, rpe_pos=rpe_pos)
|
19 |
-
|
20 |
-
self.layers.append(nn.ModuleList([
|
21 |
-
PreNorm(dim, attention),
|
22 |
-
PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout)),
|
23 |
-
]))
|
24 |
-
|
25 |
-
def forward(self, x):
|
26 |
-
if self.absolute_pos_embed is not None:
|
27 |
-
x = self.absolute_pos_embed(x)
|
28 |
-
x = self.pos_dropout(x)
|
29 |
-
for attn, ff in self.layers:
|
30 |
-
x = attn(x) + x
|
31 |
-
x = ff(x) + x
|
32 |
-
return x
|
33 |
-
|
34 |
-
|
35 |
-
if __name__ == '__main__':
|
36 |
-
token_dim = 1024
|
37 |
-
toke_len = 256
|
38 |
-
|
39 |
-
transformer = SWG_Transformer(dim=token_dim,
|
40 |
-
depth=6,
|
41 |
-
heads=16,
|
42 |
-
win_size=8,
|
43 |
-
dim_head=64,
|
44 |
-
mlp_dim=2048,
|
45 |
-
dropout=0.1)
|
46 |
-
|
47 |
-
input = torch.randn(1, toke_len, token_dim)
|
48 |
-
output = transformer(input)
|
49 |
-
print(output.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dorado607/ChuanhuChatGPT/run_Linux.sh
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
# 获取脚本所在目录
|
4 |
-
script_dir=$(dirname "$(readlink -f "$0")")
|
5 |
-
|
6 |
-
# 将工作目录更改为脚本所在目录
|
7 |
-
cd "$script_dir" || exit
|
8 |
-
|
9 |
-
# 检查Git仓库是否有更新
|
10 |
-
git remote update
|
11 |
-
pwd
|
12 |
-
|
13 |
-
if ! git status -uno | grep 'up to date' > /dev/null; then
|
14 |
-
# 如果有更新,关闭当前运行的服务器
|
15 |
-
pkill -f ChuanhuChatbot.py
|
16 |
-
|
17 |
-
# 拉取最新更改
|
18 |
-
git pull
|
19 |
-
|
20 |
-
# 安装依赖
|
21 |
-
pip3 install -r requirements.txt
|
22 |
-
|
23 |
-
# 重新启动服务器
|
24 |
-
nohup python3 ChuanhuChatbot.py &
|
25 |
-
fi
|
26 |
-
|
27 |
-
# 检查ChuanhuChatbot.py是否在运行
|
28 |
-
if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
|
29 |
-
# 如果没有运行,启动服务器
|
30 |
-
nohup python3 ChuanhuChatbot.py &
|
31 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/PTI/torch_utils/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
# empty
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/PTI/training/__init__.py
DELETED
File without changes
|
spaces/EAraid12/LoRA-DreamBooth-Training-UI/README.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: LoRA DreamBooth Training UI
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
python_version: 3.10.9
|
9 |
-
app_file: app.py
|
10 |
-
pinned: false
|
11 |
-
license: mit
|
12 |
-
duplicated_from: lora-library/LoRA-DreamBooth-Training-UI
|
13 |
-
---
|
14 |
-
|
15 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/models/realesrgan_model.py
DELETED
@@ -1,258 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import random
|
3 |
-
import torch
|
4 |
-
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
|
5 |
-
from basicsr.data.transforms import paired_random_crop
|
6 |
-
from basicsr.models.srgan_model import SRGANModel
|
7 |
-
from basicsr.utils import DiffJPEG, USMSharp
|
8 |
-
from basicsr.utils.img_process_util import filter2D
|
9 |
-
from basicsr.utils.registry import MODEL_REGISTRY
|
10 |
-
from collections import OrderedDict
|
11 |
-
from torch.nn import functional as F
|
12 |
-
|
13 |
-
|
14 |
-
@MODEL_REGISTRY.register()
|
15 |
-
class RealESRGANModel(SRGANModel):
|
16 |
-
"""RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
|
17 |
-
|
18 |
-
It mainly performs:
|
19 |
-
1. randomly synthesize LQ images in GPU tensors
|
20 |
-
2. optimize the networks with GAN training.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, opt):
|
24 |
-
super(RealESRGANModel, self).__init__(opt)
|
25 |
-
self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
|
26 |
-
self.usm_sharpener = USMSharp().cuda() # do usm sharpening
|
27 |
-
self.queue_size = opt.get('queue_size', 180)
|
28 |
-
|
29 |
-
@torch.no_grad()
|
30 |
-
def _dequeue_and_enqueue(self):
|
31 |
-
"""It is the training pair pool for increasing the diversity in a batch.
|
32 |
-
|
33 |
-
Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
|
34 |
-
batch could not have different resize scaling factors. Therefore, we employ this training pair pool
|
35 |
-
to increase the degradation diversity in a batch.
|
36 |
-
"""
|
37 |
-
# initialize
|
38 |
-
b, c, h, w = self.lq.size()
|
39 |
-
if not hasattr(self, 'queue_lr'):
|
40 |
-
assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
|
41 |
-
self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
|
42 |
-
_, c, h, w = self.gt.size()
|
43 |
-
self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
|
44 |
-
self.queue_ptr = 0
|
45 |
-
if self.queue_ptr == self.queue_size: # the pool is full
|
46 |
-
# do dequeue and enqueue
|
47 |
-
# shuffle
|
48 |
-
idx = torch.randperm(self.queue_size)
|
49 |
-
self.queue_lr = self.queue_lr[idx]
|
50 |
-
self.queue_gt = self.queue_gt[idx]
|
51 |
-
# get first b samples
|
52 |
-
lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
|
53 |
-
gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
|
54 |
-
# update the queue
|
55 |
-
self.queue_lr[0:b, :, :, :] = self.lq.clone()
|
56 |
-
self.queue_gt[0:b, :, :, :] = self.gt.clone()
|
57 |
-
|
58 |
-
self.lq = lq_dequeue
|
59 |
-
self.gt = gt_dequeue
|
60 |
-
else:
|
61 |
-
# only do enqueue
|
62 |
-
self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
|
63 |
-
self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
|
64 |
-
self.queue_ptr = self.queue_ptr + b
|
65 |
-
|
66 |
-
@torch.no_grad()
|
67 |
-
def feed_data(self, data):
|
68 |
-
"""Accept data from dataloader, and then add two-order degradations to obtain LQ images.
|
69 |
-
"""
|
70 |
-
if self.is_train and self.opt.get('high_order_degradation', True):
|
71 |
-
# training data synthesis
|
72 |
-
self.gt = data['gt'].to(self.device)
|
73 |
-
self.gt_usm = self.usm_sharpener(self.gt)
|
74 |
-
|
75 |
-
self.kernel1 = data['kernel1'].to(self.device)
|
76 |
-
self.kernel2 = data['kernel2'].to(self.device)
|
77 |
-
self.sinc_kernel = data['sinc_kernel'].to(self.device)
|
78 |
-
|
79 |
-
ori_h, ori_w = self.gt.size()[2:4]
|
80 |
-
|
81 |
-
# ----------------------- The first degradation process ----------------------- #
|
82 |
-
# blur
|
83 |
-
out = filter2D(self.gt_usm, self.kernel1)
|
84 |
-
# random resize
|
85 |
-
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
|
86 |
-
if updown_type == 'up':
|
87 |
-
scale = np.random.uniform(1, self.opt['resize_range'][1])
|
88 |
-
elif updown_type == 'down':
|
89 |
-
scale = np.random.uniform(self.opt['resize_range'][0], 1)
|
90 |
-
else:
|
91 |
-
scale = 1
|
92 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
93 |
-
out = F.interpolate(out, scale_factor=scale, mode=mode)
|
94 |
-
# add noise
|
95 |
-
gray_noise_prob = self.opt['gray_noise_prob']
|
96 |
-
if np.random.uniform() < self.opt['gaussian_noise_prob']:
|
97 |
-
out = random_add_gaussian_noise_pt(
|
98 |
-
out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
99 |
-
else:
|
100 |
-
out = random_add_poisson_noise_pt(
|
101 |
-
out,
|
102 |
-
scale_range=self.opt['poisson_scale_range'],
|
103 |
-
gray_prob=gray_noise_prob,
|
104 |
-
clip=True,
|
105 |
-
rounds=False)
|
106 |
-
# JPEG compression
|
107 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
|
108 |
-
out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
|
109 |
-
out = self.jpeger(out, quality=jpeg_p)
|
110 |
-
|
111 |
-
# ----------------------- The second degradation process ----------------------- #
|
112 |
-
# blur
|
113 |
-
if np.random.uniform() < self.opt['second_blur_prob']:
|
114 |
-
out = filter2D(out, self.kernel2)
|
115 |
-
# random resize
|
116 |
-
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
|
117 |
-
if updown_type == 'up':
|
118 |
-
scale = np.random.uniform(1, self.opt['resize_range2'][1])
|
119 |
-
elif updown_type == 'down':
|
120 |
-
scale = np.random.uniform(self.opt['resize_range2'][0], 1)
|
121 |
-
else:
|
122 |
-
scale = 1
|
123 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
124 |
-
out = F.interpolate(
|
125 |
-
out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
|
126 |
-
# add noise
|
127 |
-
gray_noise_prob = self.opt['gray_noise_prob2']
|
128 |
-
if np.random.uniform() < self.opt['gaussian_noise_prob2']:
|
129 |
-
out = random_add_gaussian_noise_pt(
|
130 |
-
out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
131 |
-
else:
|
132 |
-
out = random_add_poisson_noise_pt(
|
133 |
-
out,
|
134 |
-
scale_range=self.opt['poisson_scale_range2'],
|
135 |
-
gray_prob=gray_noise_prob,
|
136 |
-
clip=True,
|
137 |
-
rounds=False)
|
138 |
-
|
139 |
-
# JPEG compression + the final sinc filter
|
140 |
-
# We also need to resize images to desired sizes. We group [resize back + sinc filter] together
|
141 |
-
# as one operation.
|
142 |
-
# We consider two orders:
|
143 |
-
# 1. [resize back + sinc filter] + JPEG compression
|
144 |
-
# 2. JPEG compression + [resize back + sinc filter]
|
145 |
-
# Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
|
146 |
-
if np.random.uniform() < 0.5:
|
147 |
-
# resize back + the final sinc filter
|
148 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
149 |
-
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
|
150 |
-
out = filter2D(out, self.sinc_kernel)
|
151 |
-
# JPEG compression
|
152 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
153 |
-
out = torch.clamp(out, 0, 1)
|
154 |
-
out = self.jpeger(out, quality=jpeg_p)
|
155 |
-
else:
|
156 |
-
# JPEG compression
|
157 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
158 |
-
out = torch.clamp(out, 0, 1)
|
159 |
-
out = self.jpeger(out, quality=jpeg_p)
|
160 |
-
# resize back + the final sinc filter
|
161 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
162 |
-
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
|
163 |
-
out = filter2D(out, self.sinc_kernel)
|
164 |
-
|
165 |
-
# clamp and round
|
166 |
-
self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
167 |
-
|
168 |
-
# random crop
|
169 |
-
gt_size = self.opt['gt_size']
|
170 |
-
(self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size,
|
171 |
-
self.opt['scale'])
|
172 |
-
|
173 |
-
# training pair pool
|
174 |
-
self._dequeue_and_enqueue()
|
175 |
-
# sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue
|
176 |
-
self.gt_usm = self.usm_sharpener(self.gt)
|
177 |
-
self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
|
178 |
-
else:
|
179 |
-
# for paired training or validation
|
180 |
-
self.lq = data['lq'].to(self.device)
|
181 |
-
if 'gt' in data:
|
182 |
-
self.gt = data['gt'].to(self.device)
|
183 |
-
self.gt_usm = self.usm_sharpener(self.gt)
|
184 |
-
|
185 |
-
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
|
186 |
-
# do not use the synthetic process during validation
|
187 |
-
self.is_train = False
|
188 |
-
super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
|
189 |
-
self.is_train = True
|
190 |
-
|
191 |
-
def optimize_parameters(self, current_iter):
|
192 |
-
# usm sharpening
|
193 |
-
l1_gt = self.gt_usm
|
194 |
-
percep_gt = self.gt_usm
|
195 |
-
gan_gt = self.gt_usm
|
196 |
-
if self.opt['l1_gt_usm'] is False:
|
197 |
-
l1_gt = self.gt
|
198 |
-
if self.opt['percep_gt_usm'] is False:
|
199 |
-
percep_gt = self.gt
|
200 |
-
if self.opt['gan_gt_usm'] is False:
|
201 |
-
gan_gt = self.gt
|
202 |
-
|
203 |
-
# optimize net_g
|
204 |
-
for p in self.net_d.parameters():
|
205 |
-
p.requires_grad = False
|
206 |
-
|
207 |
-
self.optimizer_g.zero_grad()
|
208 |
-
self.output = self.net_g(self.lq)
|
209 |
-
|
210 |
-
l_g_total = 0
|
211 |
-
loss_dict = OrderedDict()
|
212 |
-
if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters):
|
213 |
-
# pixel loss
|
214 |
-
if self.cri_pix:
|
215 |
-
l_g_pix = self.cri_pix(self.output, l1_gt)
|
216 |
-
l_g_total += l_g_pix
|
217 |
-
loss_dict['l_g_pix'] = l_g_pix
|
218 |
-
# perceptual loss
|
219 |
-
if self.cri_perceptual:
|
220 |
-
l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt)
|
221 |
-
if l_g_percep is not None:
|
222 |
-
l_g_total += l_g_percep
|
223 |
-
loss_dict['l_g_percep'] = l_g_percep
|
224 |
-
if l_g_style is not None:
|
225 |
-
l_g_total += l_g_style
|
226 |
-
loss_dict['l_g_style'] = l_g_style
|
227 |
-
# gan loss
|
228 |
-
fake_g_pred = self.net_d(self.output)
|
229 |
-
l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
|
230 |
-
l_g_total += l_g_gan
|
231 |
-
loss_dict['l_g_gan'] = l_g_gan
|
232 |
-
|
233 |
-
l_g_total.backward()
|
234 |
-
self.optimizer_g.step()
|
235 |
-
|
236 |
-
# optimize net_d
|
237 |
-
for p in self.net_d.parameters():
|
238 |
-
p.requires_grad = True
|
239 |
-
|
240 |
-
self.optimizer_d.zero_grad()
|
241 |
-
# real
|
242 |
-
real_d_pred = self.net_d(gan_gt)
|
243 |
-
l_d_real = self.cri_gan(real_d_pred, True, is_disc=True)
|
244 |
-
loss_dict['l_d_real'] = l_d_real
|
245 |
-
loss_dict['out_d_real'] = torch.mean(real_d_pred.detach())
|
246 |
-
l_d_real.backward()
|
247 |
-
# fake
|
248 |
-
fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9
|
249 |
-
l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True)
|
250 |
-
loss_dict['l_d_fake'] = l_d_fake
|
251 |
-
loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach())
|
252 |
-
l_d_fake.backward()
|
253 |
-
self.optimizer_d.step()
|
254 |
-
|
255 |
-
if self.ema_decay > 0:
|
256 |
-
self.model_ema(decay=self.ema_decay)
|
257 |
-
|
258 |
-
self.log_dict = self.reduce_loss_dict(loss_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/layers_new.py
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
from . import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class Encoder(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
31 |
-
super(Encoder, self).__init__()
|
32 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ)
|
33 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
|
34 |
-
|
35 |
-
def __call__(self, x):
|
36 |
-
h = self.conv1(x)
|
37 |
-
h = self.conv2(h)
|
38 |
-
|
39 |
-
return h
|
40 |
-
|
41 |
-
|
42 |
-
class Decoder(nn.Module):
|
43 |
-
def __init__(
|
44 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
45 |
-
):
|
46 |
-
super(Decoder, self).__init__()
|
47 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
48 |
-
# self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
|
49 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
50 |
-
|
51 |
-
def __call__(self, x, skip=None):
|
52 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
53 |
-
|
54 |
-
if skip is not None:
|
55 |
-
skip = spec_utils.crop_center(skip, x)
|
56 |
-
x = torch.cat([x, skip], dim=1)
|
57 |
-
|
58 |
-
h = self.conv1(x)
|
59 |
-
# h = self.conv2(h)
|
60 |
-
|
61 |
-
if self.dropout is not None:
|
62 |
-
h = self.dropout(h)
|
63 |
-
|
64 |
-
return h
|
65 |
-
|
66 |
-
|
67 |
-
class ASPPModule(nn.Module):
|
68 |
-
def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False):
|
69 |
-
super(ASPPModule, self).__init__()
|
70 |
-
self.conv1 = nn.Sequential(
|
71 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
72 |
-
Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ),
|
73 |
-
)
|
74 |
-
self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ)
|
75 |
-
self.conv3 = Conv2DBNActiv(
|
76 |
-
nin, nout, 3, 1, dilations[0], dilations[0], activ=activ
|
77 |
-
)
|
78 |
-
self.conv4 = Conv2DBNActiv(
|
79 |
-
nin, nout, 3, 1, dilations[1], dilations[1], activ=activ
|
80 |
-
)
|
81 |
-
self.conv5 = Conv2DBNActiv(
|
82 |
-
nin, nout, 3, 1, dilations[2], dilations[2], activ=activ
|
83 |
-
)
|
84 |
-
self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ)
|
85 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
86 |
-
|
87 |
-
def forward(self, x):
|
88 |
-
_, _, h, w = x.size()
|
89 |
-
feat1 = F.interpolate(
|
90 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
91 |
-
)
|
92 |
-
feat2 = self.conv2(x)
|
93 |
-
feat3 = self.conv3(x)
|
94 |
-
feat4 = self.conv4(x)
|
95 |
-
feat5 = self.conv5(x)
|
96 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
97 |
-
out = self.bottleneck(out)
|
98 |
-
|
99 |
-
if self.dropout is not None:
|
100 |
-
out = self.dropout(out)
|
101 |
-
|
102 |
-
return out
|
103 |
-
|
104 |
-
|
105 |
-
class LSTMModule(nn.Module):
|
106 |
-
def __init__(self, nin_conv, nin_lstm, nout_lstm):
|
107 |
-
super(LSTMModule, self).__init__()
|
108 |
-
self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0)
|
109 |
-
self.lstm = nn.LSTM(
|
110 |
-
input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True
|
111 |
-
)
|
112 |
-
self.dense = nn.Sequential(
|
113 |
-
nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()
|
114 |
-
)
|
115 |
-
|
116 |
-
def forward(self, x):
|
117 |
-
N, _, nbins, nframes = x.size()
|
118 |
-
h = self.conv(x)[:, 0] # N, nbins, nframes
|
119 |
-
h = h.permute(2, 0, 1) # nframes, N, nbins
|
120 |
-
h, _ = self.lstm(h)
|
121 |
-
h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins
|
122 |
-
h = h.reshape(nframes, N, 1, nbins)
|
123 |
-
h = h.permute(1, 2, 3, 0)
|
124 |
-
|
125 |
-
return h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|