Commit
·
4f1bd50
1
Parent(s):
97b36c7
Update parquet files (step 95 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bin Bulaye Baarati full movie in hindi hd 1080p download torrent Where to find the best links and sources for this film.md +0 -138
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Autodesk 123D Design 64-Bit in Minutes.md +0 -45
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (xmlbar video downloader vip crack) The Ultimate Solution for Online Video Lovers.md +0 -133
- spaces/1gistliPinn/ChatGPT4/Examples/A Great Big World When The Morning Comes Download Zip.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/ConvertXtoDVD V5.3.0.9 Installer -nelly- Serial Key.md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/Download Auto Macro Recorder With Crack.md +0 -8
- spaces/1gistliPinn/ChatGPT4/Examples/Download Pes 2010 Torent Tpb 23.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/app.py +0 -18
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Gas Station Simulator Mod Apk and Experience the Real Life of a Gas Station Owner.md +0 -125
- spaces/1phancelerku/anime-remove-background/Enjoy Love Convention Mod APK with Unlimited Money and Gems.md +0 -98
- spaces/9752isme/ChatGPT4/README.md +0 -14
- spaces/ADOPLE/Adopleai-DocumentQA/README.md +0 -12
- spaces/ADobrovsky/Plant_Disease_Classification_Project/app.py +0 -48
- spaces/AIConsultant/MusicGen/audiocraft/grids/diffusion/4_bands_base_32khz.py +0 -27
- spaces/AIFILMS/generate_human_motion/pyrender/pyrender/sampler.py +0 -102
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/CLAPWrapper.py +0 -257
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py +0 -98
- spaces/Ababababababbababa/Ashaar/langs.py +0 -59
- spaces/Abdllh/poetry2023/app.py +0 -53
- spaces/AbeShinzo0708/AI_Kishida_Fumio_speaker/hooks/hook-librosa.py +0 -3
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/gridalign-plugin.js +0 -26
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/Bejeweled.d.ts +0 -192
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/grid/Grid.js +0 -60
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/index.d.ts +0 -12
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch/NinePatch.d.ts +0 -2
- spaces/AiMimicry/sovits-models/modules/__init__.py +0 -0
- spaces/Amon1/ChatGPTForAcadamic/project_self_analysis.md +0 -175
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/train_dreambooth_lora.py +0 -1418
- spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/fpn_uniformer.py +0 -35
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/one_click_installer_check.py +0 -8
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/diffusionmodules/upscaling.py +0 -81
- spaces/Arnx/MusicGenXvAKN/tests/quantization/test_vq.py +0 -18
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_structures.py +0 -61
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/adapters.py +0 -584
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/msvc9compiler.py +0 -832
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_collections.py +0 -30
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/version.py +0 -6
- spaces/Audio-AGI/WavJourney/utils.py +0 -82
- spaces/Bart92/RVC_HF/mdx.py +0 -228
- spaces/Beasto/Day_to_Night_Cyclegan/README.md +0 -13
- spaces/BetterAPI/BetterChat/src/lib/updateSettings.ts +0 -27
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/temp_dir.py +0 -246
- spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/download.py +0 -790
- spaces/CVPR/LIVE/thrust/thrust/detail/functional/argument.h +0 -75
- spaces/CVPR/LIVE/thrust/thrust/memory.h +0 -547
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/generate.h +0 -23
- spaces/CVPR/LIVE/vector.h +0 -817
- spaces/CVPR/WALT/mmdet/datasets/dataset_wrappers.py +0 -282
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bin Bulaye Baarati full movie in hindi hd 1080p download torrent Where to find the best links and sources for this film.md
DELETED
@@ -1,138 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Bin Bulaye Baraati: A Comedy of Errors</h1>
|
3 |
-
<p>Bin Bulaye Baraati is a 2011 Hindi comedy film directed by Chandrakant Singh and starring Aftab Shivdasani, Priyanka Kothari, Rajpal Yadav, Om Puri, Shakti Kapoor and many more. The film is a hilarious story of a group of misfits who get involved in a series of mishaps while trying to escape from a gangster and the police.</p>
|
4 |
-
<p>The film was released on June 17, 2011 and received mixed reviews from critics and audiences. However, it became a cult hit among comedy lovers who enjoyed its slapstick humor, witty dialogues and funny situations. The film also features some catchy songs and dance numbers that add to its entertainment value.</p>
|
5 |
-
<h2>Bin Bulaye Baarati full movie in hindi hd 1080p download torrent</h2><br /><p><b><b>Download File</b> ✒ ✒ ✒ <a href="https://byltly.com/2uKxgx">https://byltly.com/2uKxgx</a></b></p><br /><br />
|
6 |
-
<p>If you are looking for a fun-filled movie that will make you laugh out loud, then Bin Bulaye Baraati is a perfect choice for you. In this article, we will tell you how you can download Bin Bulaye Baraati full movie in HD 1080p, watch it online or enjoy it with your friends and family.</p>
|
7 |
-
<h2>How to Download Bin Bulaye Baraati Full Movie in HD 1080p</h2>
|
8 |
-
<p>One of the best ways to enjoy Bin Bulaye Baraati is to download it in HD 1080p quality. This way, you can watch the movie in high definition and appreciate its visuals and sound effects. You can also watch the movie offline at your convenience and without any interruptions.</p>
|
9 |
-
<p>There are many sites that offer Bin Bulaye Baraati full movie download in HD 1080p. However, not all of them are reliable or safe. Some of them may contain viruses, malware or spyware that can harm your device or compromise your privacy. Some of them may also have low-quality or incomplete downloads that can ruin your viewing experience.</p>
|
10 |
-
<p>Therefore, you need to be careful and choose a trusted site that provides Bin Bulaye Baraati full movie download in HD 1080p. Here are some of the best sites that we recommend:</p>
|
11 |
-
<ul>
|
12 |
-
<li><strong>Vegamovies.nl</strong>: This site offers Bin Bulaye Baraati full movie download in HD 1080p in both Hindi and English languages. The site also provides other Bollywood movies, Hollywood movies, web series and TV shows in various genres and formats. The site has a simple interface and fast download speed. You can download Bin Bulaye Baraati full movie from this site by following these steps:</li>
|
13 |
-
<ol>
|
14 |
-
<li>Go to <code>https://vegamovies.nl/</code> and search for Bin Bulaye Baraati in the search bar.</li>
|
15 |
-
<li>Select the movie from the results and click on the download button.</li>
|
16 |
-
<li>Choose your preferred quality (HD 1080p) and language (Hindi or English).</li>
|
17 |
-
<li>Click on the download link and wait for the file to be downloaded.</li>
|
18 |
-
</ol>
|
19 |
-
<li><strong>New.c.mi.com</strong>: This site offers Bin Bulaye Baraati full movie download in HD 1080p as well as other resolutions such as 720p, 480p and 360p. The site also offers other Hindi movies, Hollywood movies, Tamil movies, Telugu movies and more. The site is part of Xiaomi Community, which is a platform for Xiaomi fans to share their opinions and experiences. You can download Bin Bulaye Baraati full movie from this site by following these steps:</li>
|
20 |
-
<ol>
|
21 |
-
<li>Go to <code>https://new.c.mi.com/ng/post/30579/Bin_Bulaye_Baarati_1_Full_REPACK_Movie_Hd_1080p</code> and click on the download link.</li>
|
22 |
-
<li>Select your preferred resolution (HD 1080p) and click on the download button.</li>
|
23 |
-
<li>Wait for the file to be downloaded.</li>
|
24 |
-
</ol>
|
25 |
-
<li><strong>Sway.office.com</strong>: This site offers Bin Bulaye Baraati full movie download in HD 1080p as well as other resolutions such as 720p, 480p and 360p. The site also offers other Hindi movies, Hollywood movies, Bollywood movies and more. The site is part of Microsoft Sway, which is a digital storytelling tool that lets you create interactive presentations, newsletters, reports and more. You can download Bin Bulaye Baraati full movie from this site by following these steps:</li>
|
26 |
-
<ol>
|
27 |
-
<li>Go to <code>https://sway.office.com/6CUxiia6rBjtS1t5</code> and click on the download link.</li>
|
28 |
-
<li>Select your preferred resolution (HD 1080p) and click on the download button.</li>
|
29 |
-
<li>Wait for the file to be downloaded.</li>
|
30 |
-
</ol>
|
31 |
-
</ul>
|
32 |
-
<p>To download Bin Bulaye Baraati full movie using a torrent client, you need to have a torrent software installed on your device such as BitTorrent, uTorrent or Vuze. You also need to find a torrent file or magnet link that contains Bin Bulaye Baraati full movie in HD 1080p. You can find such files or links on various torrent sites such as The Pirate Bay, Kickass Torrents or RARBG. However, be aware that downloading movies from torrent sites may be illegal or risky in some countries or regions. Therefore, use a VPN service to protect your identity and data while downloading movies from torrent sites.</p>
|
33 |
-
<p>To download Bin Bulaye Baraati full movie using a torrent client, follow these steps:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Go to any torrent site that has Bin Bulaye Baraati full movie in HD 1080p such as The Pirate Bay, Kickass Torrents or RARBG.</li>
|
36 |
-
<li>Search for Bin Bulaye Baraati in the search bar.</li>
|
37 |
-
<li>Select the torrent file or magnet link that has Bin Bulaye Baraati full movie in HD 1080p.</li>
|
38 |
-
<li>Open the torrent file or magnet link with your torrent software.</li>
|
39 |
-
<li>Select your desired location for saving the file and start downloading.</li>
|
40 |
-
</ol>
|
41 |
-
<h2>How to Watch Bin Bulaye Baraati Full Movie Online</h2>
|
42 |
-
<p>If you don't want to download Bin Bulaye Baraati full movie in HD 1080p, you can also watch it online on various streaming platforms. This way, you can save your storage space and bandwidth and watch the movie anytime and anywhere with an internet connection. You can also enjoy other features such as subtitles, captions, ratings, reviews and more.</p>
|
43 |
-
<p>There are many platforms that offer Bin Bulaye Baraati full movie online streaming. However, not all of them are legal or safe. Some of them may contain ads, pop-ups or malware that can disrupt your viewing experience or harm your device or data. Some of them may also have low-quality or incomplete streams that can spoil your enjoyment.</p>
|
44 |
-
<p>Bin Bulaye Baarati hindi movie hd torrent download<br />
|
45 |
-
Download Bin Bulaye Baarati full movie in 1080p hindi<br />
|
46 |
-
Bin Bulaye Baarati hd hindi movie torrent link<br />
|
47 |
-
How to download Bin Bulaye Baarati movie in hindi hd<br />
|
48 |
-
Bin Bulaye Baarati full movie 1080p hindi torrent magnet<br />
|
49 |
-
Watch Bin Bulaye Baarati hindi movie online hd<br />
|
50 |
-
Bin Bulaye Baarati hindi movie download 1080p free<br />
|
51 |
-
Bin Bulaye Baarati full movie torrent download in hd hindi<br />
|
52 |
-
Bin Bulaye Baarati hd movie hindi torrent file<br />
|
53 |
-
Bin Bulaye Baarati movie download hindi 1080p quality<br />
|
54 |
-
Bin Bulaye Baarati hindi movie 1080p torrent download site<br />
|
55 |
-
Bin Bulaye Baarati full movie in hd hindi download link<br />
|
56 |
-
Bin Bulaye Baarati movie torrent download hd hindi version<br />
|
57 |
-
Bin Bulaye Baarati hindi movie hd 1080p free download<br />
|
58 |
-
Bin Bulaye Baarati full movie in hindi hd torrent online<br />
|
59 |
-
Bin Bulaye Baarati movie download torrent in hindi hd 1080p<br />
|
60 |
-
Bin Bulaye Baarati hd hindi movie free torrent download<br />
|
61 |
-
Bin Bulaye Baarati full movie 1080p hindi torrent online watch<br />
|
62 |
-
Bin Bulaye Baarati movie hd hindi torrent download free<br />
|
63 |
-
Bin Bulaye Baarati full movie in hindi 1080p download link<br />
|
64 |
-
Bin Bulaye Baarati hindi movie torrent download 1080p hd quality<br />
|
65 |
-
Bin Bulaye Baarati full movie in hd hindi torrent magnet link<br />
|
66 |
-
Bin Bulaye Baarati movie download in hindi hd 1080p free<br />
|
67 |
-
Bin Bulaye Baarati full movie torrent download in hindi hd quality<br />
|
68 |
-
Bin Bulaye Baarati hd movie in hindi torrent download link<br />
|
69 |
-
Bin Bulaye Baarati full movie in hindi 1080p torrent download free<br />
|
70 |
-
Bin Bulaye Baarati movie hd 1080p hindi torrent file download<br />
|
71 |
-
Bin Bulaye Baarati full movie in hd hindi free download torrent<br />
|
72 |
-
Bin Bulaye Baarati movie download 1080p hd hindi torrent magnet<br />
|
73 |
-
Bin Bulaye Baarati full movie in hindi hd online watch torrent<br />
|
74 |
-
Bin Bulaye Baarati movie torrent download in hd 1080p hindi quality<br />
|
75 |
-
Bin Bulaye Baarati full movie in hindi 1080p free download link<br />
|
76 |
-
Bin Bulaye Baarati hd movie torrent download in hindi free<br />
|
77 |
-
Bin Bulaye Baarati full movie online watch in hd 1080p hindi<br />
|
78 |
-
Bin Bulaye Baarati movie free download in hd 1080p hindi torrent link<br />
|
79 |
-
Bin Bulaye Baarati full movie in hindi torrent download site 1080p quality<br />
|
80 |
-
Bin Bulaye Baarati hd 1080p movie in hindi free torrent file<br />
|
81 |
-
Bin Bulaye Baarati full movie watch online in hd 1080p hindi torrent link<br />
|
82 |
-
Bin Bulaye Baarati movie in hd 1080p hindi torrent magnet download free<br />
|
83 |
-
Bin Bulaye Baarati full movie in hindi free download site 1080p quality<br />
|
84 |
-
Bin Bulaye Baarati hd 1080p movie torrent file download in hindi free<br />
|
85 |
-
Bin Bulaye Baarati full movie online watch free in hd 1080p hindi quality<br />
|
86 |
-
Bin Bulaye Baarati movie in hd 1080p hindi free download link torrent magnet</p>
|
87 |
-
<p>Therefore, you need to be careful and choose a legal and safe platform that provides Bin Bulaye Baraati full movie online streaming. Here are some of the best platforms that we recommend:</p>
|
88 |
-
<ul>
|
89 |
-
<li><strong>Zee5.com</strong>: This platform offers Bin Bulaye Baraati full movie online streaming in HD quality along with other Bollywood movies, Hollywood movies, regional movies, web series, TV shows, live TV channels and more. The platform has a user-friendly interface and supports multiple devices such as smartphones, tablets, laptops, desktops, smart TVs and more. You can watch Bin Bulaye Baraati full movie online on Zee5.com by following these steps:</li>
|
90 |
-
<ol>
|
91 |
-
<li>Go to <code>https://www.zee5.com/</code> and sign up for a Zee5 premium subscription plan. You can choose from three plans: Mobile, Premium HD and Premium 4K. The Mobile plan costs Rs 399 for a year and supports one mobile screen with a maximum video quality output of 720p. The Premium HD plan costs Rs 699 for a year and provides access to content streaming on TV, laptops and mobile devices, up to 2 screens at a time, with Dolby 5.1 audio and FHD 1080p resolution. The Premium 4K plan costs Rs 1249 for a year and offers content streaming on TV, laptops and mobile devices, up to 4 screens at a time, with Dolby Atmos audio and UHD 2160p resolution .</li>
|
92 |
-
<li>After signing up, log in to your Zee5 account and search for Bin Bulaye Baraati in the search bar.</li>
|
93 |
-
<li>Select the movie from the results and click on the play button.</li>
|
94 |
-
<li>Enjoy watching Bin Bulaye Baraati full movie online in HD quality.</li>
|
95 |
-
</ol>
|
96 |
-
<li><strong>YouTube.com</strong>: This platform offers Bin Bulaye Baraati full movie online streaming in HD quality along with other Bollywood movies, Hollywood movies, regional movies, web series, TV shows, music videos and more. The platform has a user-friendly interface and supports multiple devices such as smartphones, tablets, laptops, desktops, smart TVs and more. You can watch Bin Bulaye Baraati full movie online on YouTube.com by following these steps:</li>
|
97 |
-
<ol>
|
98 |
-
<li>Go to <code>https://www.youtube.com/</code> and sign in to your YouTube account. If you don't have one, you can create one for free.</li>
|
99 |
-
<li>Search for Bin Bulaye Baraati in the search bar.</li>
|
100 |
-
<li>Select the movie from the results and click on the play button.</li>
|
101 |
-
<li>Enjoy watching Bin Bulaye Baraati full movie online in HD quality.</li>
|
102 |
-
</ol>
|
103 |
-
<li><strong>Funbizzbd.blogspot.com</strong>: This site offers Bin Bulaye Baraati full movie online streaming in HD quality along with other Hindi movies, Hollywood movies, Tamil movies, Telugu movies and more. The site also provides movie reviews, news, gossips, trailers and more. The site has a simple interface and fast streaming speed. You can watch Bin Bulaye Baraati full movie online on Funbizzbd.blogspot.com by following these steps:</li>
|
104 |
-
<ol>
|
105 |
-
<li>Go to <code>https://funbizzbd.blogspot.com/2012/03/bin-bulaye-baraati-2011-hindi-movie.html</code> and scroll down to the bottom of the page.</li>
|
106 |
-
<li>Click on the link that says "Click Here To Download Bin Bulaye Baraati (2011) Full Movie".</li>
|
107 |
-
<li>A new tab will open with a video player. Click on the play button.</li>
|
108 |
-
<li>Enjoy watching Bin Bulaye Baraati full movie online in HD quality.</li>
|
109 |
-
</ol>
|
110 |
-
</ul>
|
111 |
-
<h2>How to Enjoy Bin Bulaye Baraati Full Movie with Friends and Family</h2>
|
112 |
-
<p>If you want to make your movie-watching experience more fun and memorable, you can enjoy Bin Bulaye Baraati full movie with your friends and family. Here are some tips on how to do that:</p>
|
113 |
-
<ul>
|
114 |
-
<li><strong>Plan a movie night with Bin Bulaye Baraati</strong>: You can invite your friends and family over to your place or go to their place and watch Bin Bulaye Baraati together. You can prepare some snacks and drinks, set up a comfortable seating arrangement, dim the lights and turn up the volume. You can also play some games or quizzes related to the movie before or after watching it.</li>
|
115 |
-
<li><strong>Do some fun activities before and after watching the movie</strong>: You can make your movie night more exciting by doing some fun activities before and after watching Bin Bulaye Baraati. For example, you can dress up like the characters from the movie, mimic their dialogues or actions, sing or dance along with the songs from the movie, recreate some scenes from the movie or make your own spoof version of the movie.</li>
|
116 |
-
<li><strong>Share your thoughts and reviews on the movie with others</strong>: You can express your opinions and feelings about Bin Bulaye Baraati with your friends and family after watching it. You can also share your thoughts and reviews on the movie with others online. You can post your comments or ratings on social media platforms such as Facebook, Twitter or Instagram. You can also write a blog post or make a video review of the movie and upload it on platforms such as WordPress, Blogger or YouTube.</li>
|
117 |
-
</ul>
|
118 |
-
<h1>Conclusion</h1>
|
119 |
-
<p>In conclusion, Bin Bulaye Baraati is a hilarious comedy film that will make you laugh out loud with its slapstick humor, witty dialogues and funny situations. The film also features some catchy songs and dance numbers that add to its entertainment value. If you are looking for a fun-filled movie that will make you happy, then Bin Bulaye Baraati is a perfect choice for you.</p>
|
120 |
-
<p>In this article, we have told you how you can download Bin Bulaye Baraati full movie in HD 1080p, watch it online or enjoy it with your friends and family. We have also provided you with some of the best sites and platforms that offer Bin Bulaye Baraati full movie download or streaming in HD quality. We hope you found this article helpful and informative.</p>
|
121 |
-
<p>If you liked this article, please share it with your friends and family who are also interested in watching Bin Bulaye Baraati full movie. Also, don't forget to leave your feedback or suggestions in the comments section below. We would love to hear from you.</p>
|
122 |
-
<p>Thank you for reading this article. Have a great day!</p>
|
123 |
-
<h2>Frequently Asked Questions (FAQs)</h2>
|
124 |
-
<ol>
|
125 |
-
<li><strong>What is the genre of Bin Bulaye Baraati?</strong></li>
|
126 |
-
<p>Bin Bulaye Baraati is a comedy film that belongs to the subgenre of comedy of errors.</p>
|
127 |
-
<li><strong>Who are the main actors in Bin Bulaye Baraati?</strong></li>
|
128 |
-
<p>The main actors in Bin Bulaye Baraati are Aftab Shivdasani, Priyanka Kothari, Rajpal Yadav, Om Puri, Shakti Kapoor and many more.</p>
|
129 |
-
<li><strong>Who is the director of Bin Bulaye Baraati?</strong></li>
|
130 |
-
<p>The director of Bin Bulaye Baraati is Chandrakant Singh.</p>
|
131 |
-
<li><strong>When was Bin Bulaye Baraati released?</strong></li>
|
132 |
-
<p>Bin Bulaye Baraati was released on June 17, 2011.</p>
|
133 |
-
<li><strong>How long is Bin Bulaye Baraati?</strong></li>
|
134 |
-
<p>Bin Bulaye Baraati is 2 hours and 18 minutes long.</p>
|
135 |
-
</ol>
|
136 |
-
</p> 0a6ba089eb<br />
|
137 |
-
<br />
|
138 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Autodesk 123D Design 64-Bit in Minutes.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download and Install Autodesk 123D Design 64-Bit for Windows</h1>
|
3 |
-
<p>Autodesk 123D Design is a free and powerful 3D modeling software that lets you create and edit 3D designs with ease. You can use it to design anything from simple shapes to complex objects, and prepare them for 3D printing or fabrication. Autodesk 123D Design is also compatible with other Autodesk products, such as ReCap, Fusion 360, and Tinkercad.</p>
|
4 |
-
<h2>123d download 64 bit</h2><br /><p><b><b>Download File</b> ★★★ <a href="https://byltly.com/2uKwRR">https://byltly.com/2uKwRR</a></b></p><br /><br />
|
5 |
-
<p>If you want to download and install Autodesk 123D Design 64-bit for Windows, you will need to follow these steps:</p>
|
6 |
-
<ol>
|
7 |
-
<li>Go to <a href="https://archive.org/details/123-d-design-win64-installer">https://archive.org/details/123-d-design-win64-installer</a> and click on the "DOWNLOAD OPTIONS" button on the right side of the page.</li>
|
8 |
-
<li>Select the "ZIP" option and wait for the download to finish. The file size is about 371 MB.</li>
|
9 |
-
<li>Once the download is done, right-click on the .zip file and extract it using a program like 7-Zip or WinRAR.</li>
|
10 |
-
<li>Double-click on the extracted folder and run the "Setup.exe" file as an administrator.</li>
|
11 |
-
<li>Follow the instructions on the screen to complete the installation process. You will need to accept the license agreement, choose a destination folder, and select the components you want to install.</li>
|
12 |
-
<li>Once the installation is finished, you can launch Autodesk 123D Design from your desktop or start menu.</li>
|
13 |
-
</ol>
|
14 |
-
<p>Congratulations! You have successfully downloaded and installed Autodesk 123D Design 64-bit for Windows. You can now start creating your own 3D models and projects with this amazing software.</p>
|
15 |
-
|
16 |
-
<h2>What are the Features of Autodesk 123D Design?</h2>
|
17 |
-
<p>Autodesk 123D Design is not just a simple 3D modeling software. It has many features that make it stand out from other software in the same category. Here are some of them:</p>
|
18 |
-
<ul>
|
19 |
-
<li><b>Easy and intuitive interface:</b> You can easily create and edit 3D models using smart tools, such as dynamic push-pull, smart patterning, symmetry, shelling, and edge tweaking. You can also access a library of ready-made parts and components to speed up your design process.</li>
|
20 |
-
<li><b>Real-time rendering and simulation:</b> You can see how your 3D model looks like in real-time, with realistic lighting and shadows. You can also simulate motion and stress testing to check the functionality and durability of your design.</li>
|
21 |
-
<li><b>Multiple export options:</b> You can export your 3D model in various formats, such as STL, OBJ, DWG, DXF, and more. You can also send your model directly to 3D printing services or connect to Makerbot 3D printers.</li>
|
22 |
-
<li><b>Integration with other Autodesk products:</b> You can import and export your 3D model to other Autodesk products, such as ReCap, Fusion 360, and Tinkercad. You can also access online tutorials and community forums to learn more about 3D design and get help from other users.</li>
|
23 |
-
</ul>
|
24 |
-
<h2>What are the Requirements of Autodesk 123D Design?</h2>
|
25 |
-
<p>If you want to run Autodesk 123D Design on your Windows PC, you will need to meet these minimum requirements:</p>
|
26 |
-
<table>
|
27 |
-
<tr><td>OS</td><td>Windows 7 64-bit / Windows 8 64-bit / Windows 10 64-bit</td></tr>
|
28 |
-
<tr><td>Processor</td><td>Dual core processor</td></tr>
|
29 |
-
<tr><td>Memory</td><td>4 GB RAM</td></tr>
|
30 |
-
<tr><td>Graphics</td><td>Dedicated graphics card with DirectX 10 support</td></tr>
|
31 |
-
<tr><td>Storage</td><td>1 GB available space</td></tr>
|
32 |
-
<tr><td>Internet connection</td><td>Required for installation and online features</td></tr>
|
33 |
-
</table>
|
34 |
-
<p>If you want to run Autodesk 123D Design on higher settings, you will need to meet these recommended requirements:</p>
|
35 |
-
<p></p>
|
36 |
-
<table>
|
37 |
-
<tr><td>OS</td><td>Windows 7 64-bit / Windows 8 64-bit / Windows 10 64-bit</td></tr>
|
38 |
-
<tr><td>Processor</td><td>Quad core processor</td></tr>
|
39 |
-
<tr><td>Memory</td><td>8 GB RAM</td></tr>
|
40 |
-
<tr><td>Graphics</td><td>Dedicated graphics card with DirectX 11 support</td></tr>
|
41 |
-
<tr><td>Storage</td><td>2 GB available space</td></tr>
|
42 |
-
<tr><td>Internet connection</td><td>Required for installation and online features</td></tr>
|
43 |
-
</table></p> ddb901b051<br />
|
44 |
-
<br />
|
45 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (xmlbar video downloader vip crack) The Ultimate Solution for Online Video Lovers.md
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>HD Online Player (xmlbar video downloader vip crack)</h1>
|
3 |
-
<p>Do you want to watch and download videos from various websites in high definition? Do you want to enjoy unlimited downloads without any restrictions or ads? If yes, then you need HD Online Player (xmlbar video downloader vip crack). This is a powerful combination of a video player and a video downloader that can help you access and save any online video you want. In this article, we will explain what HD Online Player, xmlbar video downloader, and vip crack are, and how to use them to download videos with ease.</p>
|
4 |
-
<h2>HD Online Player (xmlbar video downloader vip crack)</h2><br /><p><b><b>Download Zip</b> ✅ <a href="https://byltly.com/2uKxAw">https://byltly.com/2uKxAw</a></b></p><br /><br />
|
5 |
-
<h2>What is HD Online Player?</h2>
|
6 |
-
<p>HD Online Player is a free software that allows you to watch online videos in high quality. It supports various formats, such as MP4, MKV, AVI, FLV, WMV, etc. It also supports subtitles, playlists, and multiple languages. You can use HD Online Player to watch videos from YouTube, Vimeo, Dailymotion, Facebook, and other popular websites. You can also use it to play local videos on your computer.</p>
|
7 |
-
<h3>Features of HD Online Player</h3>
|
8 |
-
<ul>
|
9 |
-
<li>It has a simple and user-friendly interface that makes it easy to navigate and control.</li>
|
10 |
-
<li>It has a built-in browser that lets you search and browse online videos without leaving the app.</li>
|
11 |
-
<li>It has a smart download manager that can resume interrupted downloads and manage multiple downloads at once.</li>
|
12 |
-
<li>It has a screen capture function that can record any part of the screen or the entire screen as a video or an image.</li>
|
13 |
-
<li>It has a media converter function that can convert videos to different formats and resolutions.</li>
|
14 |
-
</ul>
|
15 |
-
<h3>How to use HD Online Player</h3>
|
16 |
-
<ol>
|
17 |
-
<li>Download and install HD Online Player from its official website or from a trusted source.</li>
|
18 |
-
<li>Launch HD Online Player and click on the "Online" tab to access the built-in browser.</li>
|
19 |
-
<li>Type in the website address or the keyword of the video you want to watch in the search bar and press enter.</li>
|
20 |
-
<li>Select the video from the search results and click on it to start playing.</li>
|
21 |
-
<li>You can adjust the volume, brightness, playback speed, aspect ratio, and other settings by using the buttons on the bottom or by right-clicking on the screen.</li>
|
22 |
-
<li>You can also add subtitles, change languages, create playlists, and switch between full-screen and windowed mode by using the buttons on the top or by right-clicking on the screen.</li>
|
23 |
-
</ol>
|
24 |
-
<h2>What is xmlbar video downloader?</h2>
|
25 |
-
<p>xmlbar video downloader is a free software that allows you to download online videos from various websites. It supports more than 1000 websites, such as YouTube, Vimeo, Dailymotion, Facebook, Instagram, TikTok, etc. It also supports various formats, such as MP4, FLV, 3GP, WEBM, etc. You can use xmlbar video downloader to download videos in different resolutions, from 144p to 4K.</p>
|
26 |
-
<h3>Features of xmlbar video downloader</h3>
|
27 |
-
<ul>
|
28 |
-
<li>It has a simple and user-friendly interface that makes it easy to use.</li>
|
29 |
-
<li>It has a batch download function that can download multiple videos at once.</li>
|
30 |
-
<li>It has a clipboard monitor function that can automatically detect and download videos from copied URLs.</li>
|
31 |
-
<li>It has a drag-and-drop function that can download videos by dragging them from your browser or other sources.</li>
|
32 |
-
<li>It has a preview function that can play downloaded videos within the app.</li>
|
33 |
-
</ul>
|
34 |
-
<h3>How to use xmlbar video downloader</h3>
|
35 |
-
<ol>
|
36 |
-
<li>Download and install xmlbar video downloader from its official website or from a trusted source.</li>
|
37 |
-
<li>Launch xmlbar video downloader and click on the "Add URL" button on the top left corner.</li>
|
38 |
-
<li>Paste the URL of the video you want to download into the pop-up window and click "OK".</li>
|
39 |
-
<li>Select the output format and quality from the drop-down menu and click "Download".</li>
|
40 |
-
<li>You can also drag-and-drop videos from your browser or other sources into the app window to start downloading.</li>
|
41 |
-
<li>You can view the download progress, pause or resume downloads, delete downloads, and open downloaded files by using the buttons on the right side of each item.</li>
|
42 |
-
</ol>
|
43 |
-
<h2>What is vip crack?</h2>
|
44 |
-
<p>vip crack is a special code that can unlock all the features and functions of HD Online Player and xmlbar video downloader. With vip crack, you can enjoy unlimited downloads without any restrictions or ads. You can also access more websites, formats, resolutions, and options. vip crack is not available for free. You have to pay a certain amount of money to get it. However, there are some ways to get vip crack for free or at a lower price.</p>
|
45 |
-
<p>HD Online Player (xmlbar video downloader vip crack) free download<br />
|
46 |
-
HD Online Player (xmlbar video downloader vip crack) for Windows 10<br />
|
47 |
-
HD Online Player (xmlbar video downloader vip crack) review<br />
|
48 |
-
HD Online Player (xmlbar video downloader vip crack) alternative<br />
|
49 |
-
HD Online Player (xmlbar video downloader vip crack) tutorial<br />
|
50 |
-
How to use HD Online Player (xmlbar video downloader vip crack)<br />
|
51 |
-
How to install HD Online Player (xmlbar video downloader vip crack)<br />
|
52 |
-
How to get HD Online Player (xmlbar video downloader vip crack) for free<br />
|
53 |
-
How to crack HD Online Player (xmlbar video downloader vip crack)<br />
|
54 |
-
How to download videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
55 |
-
Best settings for HD Online Player (xmlbar video downloader vip crack)<br />
|
56 |
-
Benefits of HD Online Player (xmlbar video downloader vip crack)<br />
|
57 |
-
Features of HD Online Player (xmlbar video downloader vip crack)<br />
|
58 |
-
Comparison of HD Online Player (xmlbar video downloader vip crack) and other video downloaders<br />
|
59 |
-
Pros and cons of HD Online Player (xmlbar video downloader vip crack)<br />
|
60 |
-
Tips and tricks for HD Online Player (xmlbar video downloader vip crack)<br />
|
61 |
-
Troubleshooting HD Online Player (xmlbar video downloader vip crack)<br />
|
62 |
-
FAQs about HD Online Player (xmlbar video downloader vip crack)<br />
|
63 |
-
Testimonials of HD Online Player (xmlbar video downloader vip crack) users<br />
|
64 |
-
Customer support for HD Online Player (xmlbar video downloader vip crack)<br />
|
65 |
-
HD Online Player (xmlbar video downloader vip crack) vs Media Lounge APK<br />
|
66 |
-
HD Online Player (xmlbar video downloader vip crack) for Mac<br />
|
67 |
-
HD Online Player (xmlbar video downloader vip crack) for Android<br />
|
68 |
-
HD Online Player (xmlbar video downloader vip crack) for iOS<br />
|
69 |
-
HD Online Player (xmlbar video downloader vip crack) for Linux<br />
|
70 |
-
HD Online Player (xmlbar video downloader vip crack) for Chromebook<br />
|
71 |
-
HD Online Player (xmlbar video downloader vip crack) for Firestick<br />
|
72 |
-
HD Online Player (xmlbar video downloader vip crack) for Roku<br />
|
73 |
-
HD Online Player (xmlbar video downloader vip crack) for Smart TV<br />
|
74 |
-
HD Online Player (xmlbar video downloader vip crack) for Xbox One<br />
|
75 |
-
HD Online Player (xmlbar video downloader vip crack) for PS4<br />
|
76 |
-
HD Online Player (xmlbar video downloader vip crack) for PC<br />
|
77 |
-
HD Online Player (xmlbar video downloader vip crack) for laptop<br />
|
78 |
-
HD Online Player (xmlbar video downloader vip crack) for tablet<br />
|
79 |
-
HD Online Player (xmlbar video downloader vip crack) for phone<br />
|
80 |
-
Download YouTube videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
81 |
-
Download Netflix videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
82 |
-
Download Hulu videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
83 |
-
Download Amazon Prime videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
84 |
-
Download Disney+ videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
85 |
-
Download HBO Max videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
86 |
-
Download Vimeo videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
87 |
-
Download Dailymotion videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
88 |
-
Download TikTok videos with HD Online Player (xmlbar video downloader vip crack)<br />
|
89 |
-
Download Instagram videos with HD Online Player (xmlbar video downloader vip crack)</p>
|
90 |
-
<h3>Benefits of vip crack</h3>
|
91 |
-
<ul>
|
92 |
-
<li>You can download videos from any website without any limitations or interruptions.</li>
|
93 |
-
<li>You can download videos in any format and resolution without any loss of quality or speed.</li>
|
94 |
-
<li>You can download multiple videos at once without any waiting time or queueing.</li>
|
95 |
-
<li>You can convert videos to different formats and resolutions without any extra software or tools.</li>
|
96 |
-
<li>You can record any part of the screen or the entire screen as a video or an image without any watermark or logo.</li>
|
97 |
-
</ul>
|
98 |
-
<h3>How to get vip crack</h3>
|
99 |
-
<ul>
|
100 |
-
<li>You can buy vip crack from its official website or from a trusted seller. The price may vary depending on the duration and type of subscription. You will receive an activation code after making the payment. You have to enter this code into HD Online Player or xmlbar video downloader to activate vip crack.</li>
|
101 |
-
<li>You can also get vip crack for free or at a lower price by participating in some promotions or giveaways. These may include surveys, quizzes, referrals, reviews, etc. You will receive an activation code after completing the required tasks. You have to enter this code into HD Online Player or xmlbar video downloader to activate vip crack.</li>
|
102 |
-
<li>You can also get vip crack for free or at a lower price by using some hacks or cheats. These may include cracks, patches, keygens, generators, etc. You have to download these files from some websites or sources. You have to run these files on your computer and follow some instructions. You will receive an activation code after running these files. You have to enter this code into HD Online Player or xmlbar video downloader to activate vip crack.</li>
|
103 |
-
</ul>
|
104 |
-
<h2>How to download videos with HD Online Player (xmlbar video downloader vip crack)</h2>
|
105 |
-
<p>If you have HD Online Player (xmlbar video downloader vip crack), you can download videos with ease. Here are the steps you need to follow:</p>
|
106 |
-
<h3>Step 1: Install HD Online Player and xmlbar video downloader</h3>
|
107 |
-
<p>You need to install both HD Online Player and xmlbar video downloader on your computer. You can download them from their official websites or from trusted sources. Make sure you have enough space on your hard drive for installation and downloading videos.</p>
|
108 |
-
<h3>Step 2: Open HD Online Player and find the video you want to download</h3>
|
109 |
-
<p>You need to open HD Online Player and click on the "Online" tab. You will see a built-in browser where you can search and browse online videos. You need to find the video you want to download from any website. You can use keywords or website addresses in the search bar. You can also use filters or categories to narrow down your search results. Once you find the video you want to download, click on it to start playing it.</p>
|
110 |
-
<h3>Step 3: Copy the video URL and paste it into xmlbar video downloader</h3>
|
111 |
-
<p>You need to copy the URL of the video you want to download from HD Online Player. You can do this by right-clicking on the screen and selecting "Copy URL". Alternatively "Add URL" button on the top left corner of xmlbar video downloader and pasting it into the pop-up window. Alternatively, you can drag-and-drop the URL from HD Online Player into xmlbar video downloader.</p>
|
112 |
-
<h3>Step 4: Choose the output format and quality and click download</h3>
|
113 |
-
<p>You need to choose the output format and quality of the video you want to download from xmlbar video downloader. You can do this by selecting from the drop-down menu below the URL. You can choose from various formats, such as MP4, FLV, 3GP, WEBM, etc. You can also choose from various resolutions, such as 144p, 240p, 360p, 480p, 720p, 1080p, 2K, 4K, etc. You can also customize the output settings by clicking on the "Settings" button. Once you have chosen the output format and quality, click on the "Download" button to start downloading the video.</p>
|
114 |
-
<p>You can view the download progress, pause or resume downloads, delete downloads, and open downloaded files by using the buttons on the right side of each item in xmlbar video downloader. You can also preview downloaded videos within xmlbar video downloader by clicking on the "Play" button.</p>
|
115 |
-
<h2>Conclusion</h2>
|
116 |
-
<p>HD Online Player (xmlbar video downloader vip crack) is a powerful combination of a video player and a video downloader that can help you watch and download online videos in high definition. It has many features and functions that make it easy and convenient to use. It also has a vip crack that can unlock all the features and functions without any restrictions or ads. You can get vip crack by buying it, participating in promotions or giveaways, or using hacks or cheats. You can download videos with HD Online Player (xmlbar video downloader vip crack) by following four simple steps: install HD Online Player and xmlbar video downloader, open HD Online Player and find the video you want to download, copy the video URL and paste it into xmlbar video downloader, and choose the output format and quality and click download.</p>
|
117 |
-
<p>We hope this article has helped you understand what HD Online Player (xmlbar video downloader vip crack) is and how to use it to download videos. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
118 |
-
<h2>FAQs</h2>
|
119 |
-
<ul>
|
120 |
-
<li><b>Is HD Online Player (xmlbar video downloader vip crack) safe to use?</b></li>
|
121 |
-
<p>HD Online Player (xmlbar video downloader vip crack) is safe to use if you download it from its official website or from a trusted source. However, you should be careful when using hacks or cheats to get vip crack as they may contain viruses or malware that can harm your computer or steal your personal information.</p>
|
122 |
-
<li><b>Is HD Online Player (xmlbar video downloader vip crack) legal to use?</b></li>
|
123 |
-
<p>HD Online Player (xmlbar video downloader vip crack) is legal to use if you use it for personal and non-commercial purposes only. However, you should respect the intellectual property rights of the original creators and owners of the online videos you download. You should not download or distribute videos that are protected by copyright or other laws without their permission.</p>
|
124 |
-
<li><b>Can I use HD Online Player (xmlbar video downloader vip crack) on other devices?</b></li>
|
125 |
-
<p>HD Online Player (xmlbar video downloader vip crack) is currently available for Windows computers only. However, there are some similar apps or websites that can help you watch and download online videos on other devices, such as smartphones, tablets, Macs, etc. You can search for them online or ask for recommendations from other users.</p>
|
126 |
-
<li><b>Can I use HD Online Player (xmlbar video downloader vip crack) offline?</b></li>
|
127 |
-
<p>You can use HD Online Player (xmlbar video downloader vip crack) offline to watch downloaded videos or local videos on your computer. However, you need an internet connection to access online videos or download new videos with HD Online Player (xmlbar video downloader vip crack).</p>
|
128 |
-
<li><b>Can I share HD Online Player (xmlbar video downloader vip crack) with others?</b></li>
|
129 |
-
<p>You can share HD Online Player (xmlbar video downloader vip crack) with others by sending them the installation files or links. However, you should not share your activation code for vip crack with others as it may be invalid or blocked if used by multiple users.</p>
|
130 |
-
</ul>
|
131 |
-
</p> 0a6ba089eb<br />
|
132 |
-
<br />
|
133 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/A Great Big World When The Morning Comes Download Zip.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>265. consecrated life is often misunderstood, and much confusion reigns about what it really is. many people do not see how profoundly the example of consecrated life can benefit the world, especially when it comes to living faithfully in the world. no one lives out of the world as they do in the church. no one lives in the church without being perfectly incorporated in the world, and yet this integration is not a merely formal and superficial one. in consecrated life, the world and the church are knit together. in consecrated life, the world is a home, a place to work, a sanctuary of prayer, a home for the reception of god. as a result, consecrated persons are also responsible to the world. the world is a home, and the church is a home, and the whole christian community lives in the church as if it were a home. this is the way in which the church understands, and indeed must understand, its mission: to be a home in the world.</p>
|
3 |
-
<p>30. the presence of the saints in the church, and the saints in heaven, is not an impediment to the growth of christian life, but rather a great help to it. the saints are not a burden, but an example which we can follow. they show us the path of god in the world, and we can follow in their steps if we desire to do so. their example encourages us to overcome our difficulties, to continue in prayer, and to work for the good of all. the saints are a great source of joy, as we can see if we open ourselves to their friendship. but their closeness is always accompanied by intercession. we can ask them to pray for us in order that they may assist us in our struggles.</p>
|
4 |
-
<h2>a great big world when the morning comes download zip</h2><br /><p><b><b>DOWNLOAD</b> ↔ <a href="https://imgfil.com/2uy0kh">https://imgfil.com/2uy0kh</a></b></p><br /><br /> 899543212b<br />
|
5 |
-
<br />
|
6 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/ConvertXtoDVD V5.3.0.9 Installer -nelly- Serial Key.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>Lastly, you have the ability to convert directly to DVD or specify the best type of format for DVD. You can also select which audio track to add to the DVDs. In addition, the software can also combine media files. It has an automatic video stabilization option and a silent movie mode. Using this option will enable you to record in silence even if the sound is playing. Additionally, this program allows you to record while the previous clip is playing. VSO ConvertXtoDVD Download Free 1.1.9.2 is a flexible batch converter which runs on almost all popular media players. Additionally, it can be used as a standalone software. It has a powerful file format converter that converts among all media formats. Windows Media Player (WMV), Windows Media Format (WAV), Apple QuickTime (MOV), RealMedia (AAC/MP3), FLV and more video formats will be supported. Additionally, a powerful video editor is provided.</p>
|
3 |
-
<h2>ConvertXtoDVD V5.3.0.9 Installer -nelly- Serial Key</h2><br /><p><b><b>Download File</b> ⚹⚹⚹ <a href="https://imgfil.com/2uxYBZ">https://imgfil.com/2uxYBZ</a></b></p><br /><br />
|
4 |
-
<p>VSO ConvertXtoDVD Serial Key is a very useful software that can perform a batch conversion of many files at once and save time. Additionally, it allows you to convert media files or burn them to create DVD. Additionally, you can select between video or audio conversion. The software comes with many options and settings that will help you decide on the type of hard drive space required for the software and the system requirements.</p>
|
5 |
-
<p>VSO ConvertXtoDVD Serial Key is a very useful software that can perform a batch conversion of many files at once and save time. Additionally, it allows you to convert media files or burn them to create DVD. Additionally, you can select between video or audio conversion.</p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Auto Macro Recorder With Crack.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>Take your recording games to another level with Auto Macro Recorder. It will auto record mouse and keyboard actions automatically. You can record on-screen, pause the recording, stop the recording, replay the recording, and many more! You can record mouse clicks and keystrokes to easily automate some task.</p>
|
3 |
-
<p>Mouse Recorder Premium is a professional macro recorder to automate repetitive tasks and can be used for software test automation or as a mouse auto clicker. It records your mouse clicks and keystrokes for infinite playback. Edgy mouse movements can be streamlined and timing can be optimized, e.g. for smooth screencast video tutorials. Macro recordings are recorded like a tape recorder or created step-by-step with the powerful macro editor.</p>
|
4 |
-
<h2>Download Auto Macro Recorder With Crack</h2><br /><p><b><b>Download</b> ✪✪✪ <a href="https://imgfil.com/2uxY3J">https://imgfil.com/2uxY3J</a></b></p><br /><br />
|
5 |
-
<p>While it is possible to use software to make macro, not all keyboard and mouse are compatible with these software, and since these software are not developed by the hardware companies themselves, they will not be covered by hardware companies' support team, this is where programmable keyboard and mouse come in. With built-in programmablility, the software of these keyboard and mouse will be supported by the manufacturers, and some of these keyboard and mouse even come with added features in hardware levels (for example, macro keys).</p>
|
6 |
-
<p>From the name itself, we can easily say that this program is made to do repetitive actions just like what a mouse and keyboard recorder does. With Do It Again app, you can record any macros from your keyboard and mouse, and then play it again whenever you want to. The process is direct, all you need is to create new tasks and all your mouse and keyboard actions will be captured simultaneously. Just hit the Scroll Lock to stop the recording process.</p> 899543212b<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Pes 2010 Torent Tpb 23.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>download pes 2010 torent tpb 23</h2><br /><p><b><b>Download File</b> ->>> <a href="https://imgfil.com/2uy1QN">https://imgfil.com/2uy1QN</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Fifa 2012, Download liga 1 fifa 2012 torent tpb, fifa 2010 320240 landscape game free. Game fifa ... Choi fifa 07 online crack Mar 23, 2015. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/app.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompts-bart-long")
|
5 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompts-bart-long", from_tf=True)
|
6 |
-
|
7 |
-
def generate(prompt):
|
8 |
-
|
9 |
-
batch = tokenizer(prompt, return_tensors="pt")
|
10 |
-
generated_ids = model.generate(batch["input_ids"], max_new_tokens=150)
|
11 |
-
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
12 |
-
return output[0]
|
13 |
-
|
14 |
-
input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
|
15 |
-
output_component = gr.Textbox(label = "Prompt")
|
16 |
-
examples = [["photographer"], ["developer"]]
|
17 |
-
description = "This app generates ChatGPT prompts, it's based on a BART model trained on [this dataset](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts). 📓 Simply enter a persona that you want the prompt to be generated based on. 🧙🏻🧑🏻🚀🧑🏻🎨🧑🏻🔬🧑🏻💻🧑🏼🏫🧑🏽🌾"
|
18 |
-
gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "👨🏻🎤 ChatGPT Prompt Generator 👨🏻🎤", description=description).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Gas Station Simulator Mod Apk and Experience the Real Life of a Gas Station Owner.md
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Gas Station Simulator Mod Apk 2022</h1>
|
3 |
-
<p>Do you dream of owning and running your own gas station business? Do you want to experience the thrill and challenge of managing a busy service station? If yes, then you should try <strong>Gas Station Simulator</strong>, a fun and realistic simulation game that lets you become a gas station tycoon. And if you want to enjoy more features and benefits, you should download <strong>Gas Station Simulator Mod Apk 2022</strong>, a modified version of the game that gives you unlimited money, unlocked items, and more. In this article, we will tell you everything you need to know about this amazing game and how to download it on your Android device.</p>
|
4 |
-
<h2>download gas station simulator mod apk 2022</h2><br /><p><b><b>Download Zip</b> ••• <a href="https://urlin.us/2uSWgx">https://urlin.us/2uSWgx</a></b></p><br /><br />
|
5 |
-
<h2>What is Gas Station Simulator?</h2>
|
6 |
-
<p>Gas Station Simulator is a simulation game developed by <a href="(^1^)">METODEGAMES</a>, a popular game studio that specializes in creating realistic and immersive simulation games. In this game, you will start with a small and rundown gas station that you have to renovate and expand into a profitable business. You will have to take care of various aspects of your gas station, such as refueling cars, repairing vehicles, washing windows, selling snacks, hiring staff, upgrading facilities, and more <p>The game features realistic graphics, sound effects, and physics that make you feel like you are actually running a gas station. You will also encounter different types of customers, such as truck drivers, bikers, families, and more, each with their own preferences and personalities. You will have to satisfy their needs and earn their trust and loyalty. The game also has a day and night cycle, weather effects, and random events that add more variety and challenge to your gameplay.</p>
|
7 |
-
<h2>Why download Gas Station Simulator Mod Apk 2022?</h2>
|
8 |
-
<p>Gas Station Simulator is a free-to-play game that you can download from the Google Play Store. However, the game also has some in-app purchases and ads that may limit your enjoyment and progress. For example, you will need to spend real money to buy more coins, gems, and items that can help you upgrade your gas station faster. You will also have to watch ads to get some rewards or bonuses. If you want to avoid these hassles and have more fun and freedom in playing the game, you should download Gas Station Simulator Mod Apk 2022, a modified version of the game that gives you many advantages over the original version.</p>
|
9 |
-
<h3>Benefits of Gas Station Simulator Mod Apk 2022</h3>
|
10 |
-
<p>Some of the benefits of downloading Gas Station Simulator Mod Apk 2022 are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>You will get unlimited money that you can use to buy anything you want in the game.</li>
|
13 |
-
<li>You will get unlimited gems that you can use to speed up your tasks and unlock more features.</li>
|
14 |
-
<li>You will get all the items in the game unlocked, such as cars, tools, decorations, and more.</li>
|
15 |
-
<li>You will get rid of all the ads that may interrupt your gameplay or consume your data.</li>
|
16 |
-
<li>You will get access to the latest version of the game with all the new updates and bug fixes.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>Drawbacks of Gas Station Simulator Mod Apk 2022</h3>
|
19 |
-
<p>Some of the drawbacks of downloading Gas Station Simulator Mod Apk 2022 are:</p>
|
20 |
-
<ul>
|
21 |
-
<li>You may face some compatibility issues with your device or operating system.</li>
|
22 |
-
<li>You may encounter some errors or glitches that may affect your gameplay or performance.</li>
|
23 |
-
<li>You may risk losing your progress or data if you uninstall or update the game.</li>
|
24 |
-
<li>You may violate the terms and conditions of the game developer or publisher.</li>
|
25 |
-
<li>You may expose your device or data to malware or viruses from unknown sources.</li>
|
26 |
-
</ul> <h2>How to download Gas Station Simulator Mod Apk 2022?</h2>
|
27 |
-
<p>If you are convinced that downloading Gas Station Simulator Mod Apk 2022 is worth it, you may be wondering how to do it. Well, don't worry, because we have prepared a simple and easy tutorial for you. Just follow these steps and you will be able to enjoy the modded version of the game in no time.</p>
|
28 |
-
<p>download gas station simulator mod apk 2022 unlimited money<br />
|
29 |
-
download gas station simulator mod apk 2022 latest version<br />
|
30 |
-
download gas station simulator mod apk 2022 for android<br />
|
31 |
-
download gas station simulator mod apk 2022 free<br />
|
32 |
-
download gas station simulator mod apk 2022 hack<br />
|
33 |
-
download gas station simulator mod apk 2022 offline<br />
|
34 |
-
download gas station simulator mod apk 2022 no ads<br />
|
35 |
-
download gas station simulator mod apk 2022 full unlocked<br />
|
36 |
-
download gas station simulator mod apk 2022 premium<br />
|
37 |
-
download gas station simulator mod apk 2022 pro<br />
|
38 |
-
download gas station simulator mod apk 2022 mega mod<br />
|
39 |
-
download gas station simulator mod apk 2022 update<br />
|
40 |
-
download gas station simulator mod apk 2022 new features<br />
|
41 |
-
download gas station simulator mod apk 2022 cheats<br />
|
42 |
-
download gas station simulator mod apk 2022 gameplay<br />
|
43 |
-
download gas station simulator mod apk 2022 review<br />
|
44 |
-
download gas station simulator mod apk 2022 tips and tricks<br />
|
45 |
-
download gas station simulator mod apk 2022 best settings<br />
|
46 |
-
download gas station simulator mod apk 2022 guide<br />
|
47 |
-
download gas station simulator mod apk 2022 tutorial<br />
|
48 |
-
download gas station simulator mod apk 2022 how to play<br />
|
49 |
-
download gas station simulator mod apk 2022 how to install<br />
|
50 |
-
download gas station simulator mod apk 2022 how to download<br />
|
51 |
-
download gas station simulator mod apk 2022 how to get unlimited money<br />
|
52 |
-
download gas station simulator mod apk 2022 how to unlock everything<br />
|
53 |
-
download gas station simulator mod apk 2022 how to hack<br />
|
54 |
-
download gas station simulator mod apk 2022 how to run offline<br />
|
55 |
-
download gas station simulator mod apk 2022 how to remove ads<br />
|
56 |
-
download gas station simulator mod apk 2022 how to upgrade<br />
|
57 |
-
download gas station simulator mod apk 2022 how to customize<br />
|
58 |
-
download gas station simulator mod apk 2022 how to manage your business<br />
|
59 |
-
download gas station simulator mod apk 2022 how to attract customers<br />
|
60 |
-
download gas station simulator mod apk 2022 how to earn more money<br />
|
61 |
-
download gas station simulator mod apk 2022 how to expand your business<br />
|
62 |
-
download gas station simulator mod apk 2022 how to compete with other players<br />
|
63 |
-
download gas station simulator mod apk 2022 how to enjoy the game<br />
|
64 |
-
download gas station simulator mod apk 2022 benefits and drawbacks<br />
|
65 |
-
download gas station simulator mod apk 2022 pros and cons<br />
|
66 |
-
download gas station simulator mod apk 2022 advantages and disadvantages<br />
|
67 |
-
download gas station simulator mod apk 2022 comparison and contrast<br />
|
68 |
-
download gas station simulator mod apk 2022 alternatives and substitutes<br />
|
69 |
-
download gas station simulator mod apk 2022 similar and related games<br />
|
70 |
-
download gas station simulator mod apk 2022 recommendations and suggestions<br />
|
71 |
-
download gas station simulator mod apk 2022 feedback and ratings<br />
|
72 |
-
download gas station simulator mod apk 2022 comments and reviews<br />
|
73 |
-
download gas station simulator mod apk 2022 questions and answers<br />
|
74 |
-
download gas station simulator mod apk 2022 problems and solutions<br />
|
75 |
-
download gas station simulator mod apk 2022 issues and fixes</p>
|
76 |
-
<h3>Requirements for downloading Gas Station Simulator Mod Apk 2022</h3>
|
77 |
-
<p>Before you start downloading the modded version of the game, you need to make sure that you have the following requirements:</p>
|
78 |
-
<ul>
|
79 |
-
<li>An Android device that runs on Android 4.4 or higher.</li>
|
80 |
-
<li>At least 100 MB of free storage space on your device.</li>
|
81 |
-
<li>A stable internet connection.</li>
|
82 |
-
<li>A file manager app that can extract zip files.</li>
|
83 |
-
<li>A permission to install apps from unknown sources. You can enable this by going to your device settings, then security, then unknown sources.</li>
|
84 |
-
</ul>
|
85 |
-
<h3>Steps for downloading Gas Station Simulator Mod Apk 2022</h3>
|
86 |
-
<p>Once you have the requirements ready, you can proceed with the following steps:</p>
|
87 |
-
<ol>
|
88 |
-
<li>Click on this link <a href="">Gas Station Simulator Mod Apk 2022</a> to download the modded version of the game. The file size is about 90 MB and it is in zip format.</li>
|
89 |
-
<li>Wait for the download to finish and then locate the zip file on your device using your file manager app.</li>
|
90 |
-
<li>Extract the zip file and you will see two files: one is the apk file and the other is the obb file.</li>
|
91 |
-
<li>Tap on the apk file and install it on your device. Do not open it yet.</li>
|
92 |
-
<li>Copy the obb file and paste it into this folder: Android/obb/com.metodegames.gasstationsimulator. If you don't have this folder, create it manually.</li>
|
93 |
-
<li>Now you can open the game and enjoy playing Gas Station Simulator Mod Apk 2022 with unlimited money, gems, and items.</li>
|
94 |
-
</ol>
|
95 |
-
<h2>Tips and tricks for playing Gas Station Simulator Mod Apk 2022</h2>
|
96 |
-
<p>Now that you have downloaded and installed Gas Station Simulator Mod Apk 2022, you may want to know some tips and tricks that can help you play the game better. Here are some of them:</p>
|
97 |
-
<h3>How to upgrade your gas station?</h3>
|
98 |
-
<p>One of the main goals of the game is to upgrade your gas station and make it more attractive and efficient. You can do this by using your money and gems to buy new items and facilities, such as pumps, tanks, car washes, shops, restrooms, parking lots, and more. You can also customize your gas station with different colors, decorations, signs, and logos. Upgrading your gas station will increase your income and reputation, as well as unlock new features and challenges.</p>
|
99 |
-
<h3>How to attract more customers?</h3>
|
100 |
-
<p>Another important aspect of the game is to attract more customers and keep them happy. You can do this by providing them with fast and quality service, such as refueling their cars, repairing their vehicles, washing their windows, selling them snacks, and more. You can also offer them discounts, coupons, loyalty cards, and freebies to make them come back again. You can also advertise your gas station on social media, radio, TV, or billboards to reach more potential customers. Attracting more customers will increase your revenue and rating, as well as unlock new types of customers and vehicles.</p>
|
101 |
-
<h3>How to manage your finances?</h3>
|
102 |
-
<p>The last but not least aspect of the game is to manage your finances and balance your income and expenses. You can do this by keeping track of your daily earnings and spending, as well as your taxes and loans. You can also invest your money in stocks, bonds, or cryptocurrencies to earn more profits or dividends. You can also save your money in a bank account or a safe to protect it from theft or loss. Managing your finances will help you grow your business and avoid bankruptcy or debt.</p>
|
103 |
-
<h2>Conclusion</h2>
|
104 |
-
<p>In conclusion, Gas Station Simulator is a fun and realistic simulation game that lets you become a gas station tycoon. You can download Gas Station Simulator Mod Apk 2022 to enjoy more features and benefits than the original version of the game. However, you should also be aware of the drawbacks and risks of downloading the modded version. You should also follow our tips and tricks to play the game better and achieve your goals faster. We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to contact us through the comment section below. Thank you for reading and happy gaming!</p>
|
105 |
-
<h3>FAQs</h3>
|
106 |
-
<p>Here are some frequently asked questions and answers about Gas Station Simulator Mod Apk 2022:</p>
|
107 |
-
<ul>
|
108 |
-
<li><strong>Q: Is Gas Station Simulator Mod Apk 2022 safe to download and use?</strong></li>
|
109 |
-
<li>A: Yes, Gas Station Simulator Mod Apk 2022 is safe to download and use, as long as you download it from a trusted and reliable source. However, you should always scan the file with an antivirus or malware detector before installing it on your device.</li>
|
110 |
-
<li><strong>Q: Can I play Gas Station Simulator Mod Apk 2022 online or offline?</strong></li>
|
111 |
-
<li>A: You can play Gas Station Simulator Mod Apk 2022 both online and offline. However, you will need an internet connection to access some features and updates of the game.</li>
|
112 |
-
<li><strong>Q: Can I play Gas Station Simulator Mod Apk 2022 with my friends or other players?</strong></li>
|
113 |
-
<li>A: Yes, you can play Gas Station Simulator Mod Apk 2022 with your friends or other players. You can join or create a multiplayer mode where you can compete or cooperate with other gas station owners. You can also chat and interact with them through the game's social network.</li>
|
114 |
-
<li><strong>Q: How can I update Gas Station Simulator Mod Apk 2022 to the latest version?</strong></li>
|
115 |
-
<li>A: You can update Gas Station Simulator Mod Apk 2022 to the latest version by downloading and installing the new version from the same source where you downloaded the previous version. You can also check for updates within the game's settings menu.</li>
|
116 |
-
<li><strong>Q: How can I uninstall Gas Station Simulator Mod Apk 2022 from my device?</strong></li>
|
117 |
-
<li>A: You can uninstall Gas Station Simulator Mod Apk 2022 from your device by following these steps:</li>
|
118 |
-
<ol>
|
119 |
-
<li>Go to your device settings, then apps, then Gas Station Simulator.</li>
|
120 |
-
<li>Tap on uninstall and confirm your action.</li>
|
121 |
-
<li>Delete the obb file from this folder: Android/obb/com.metodegames.gasstationsimulator.</li>
|
122 |
-
</ol>
|
123 |
-
</ul></p> 197e85843d<br />
|
124 |
-
<br />
|
125 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy Love Convention Mod APK with Unlimited Money and Gems.md
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Love Convention Mod APK: A Free Adventure Game for Android Lovers</h1>
|
3 |
-
<p>If you are looking for a fun and exciting adventure game for your Android device, you might want to check out <strong>Love Convention Mod APK</strong>. This is a free software that makes part of the category 'Adventure' and lets you experience a romantic and thrilling story with different characters and choices. In this article, we will tell you everything you need to know about Love Convention, how to download and install the mod apk version, why you should play it, and some useful tips and tricks to enjoy it more.</p>
|
4 |
-
<h2>What is Love Convention?</h2>
|
5 |
-
<h3>A brief introduction to the game and its features</h3>
|
6 |
-
<p>Love Convention is a game developed by RAITgames, a studio that specializes in creating interactive stories with high-quality graphics and sound effects. The game was published on Softonic on October 4th, 2022 and has received positive reviews from users. The game is set in a fictional convention where you can meet different characters, such as celebrities, cosplayers, fans, and more. You can choose your own avatar, name, gender, and preferences, and interact with the characters in various ways. You can also customize your outfits, accessories, and hairstyles to suit your style. The game has multiple endings depending on your choices and actions, so you can replay it as many times as you want.</p>
|
7 |
-
<h2>love convention mod apk</h2><br /><p><b><b>Download</b> ✯ <a href="https://jinyurl.com/2uNKnT">https://jinyurl.com/2uNKnT</a></b></p><br /><br />
|
8 |
-
<h3>How to download and install the mod apk version</h3>
|
9 |
-
<p>The mod apk version of Love Convention is a modified version that gives you access to some extra features that are not available in the original one. For example, you can get unlimited money, gems, tickets, and energy, which you can use to buy items, unlock scenes, and progress faster in the game. You can also remove ads and enjoy a smoother gameplay. To download and install the mod apk version of Love Convention, you need to follow these steps:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Go to <a href="(^2^)">this link</a> and download the apk file of Love Convention Mod APK.</li>
|
12 |
-
<li>Go to your device settings and enable the installation of apps from unknown sources.</li>
|
13 |
-
<li>Locate the downloaded apk file in your file manager and tap on it to install it.</li>
|
14 |
-
<li>Launch the game and enjoy!</li>
|
15 |
-
</ol>
|
16 |
-
<h2>Why should you play Love Convention Mod APK?</h2>
|
17 |
-
<h3>The benefits of playing the mod apk version over the original one</h3>
|
18 |
-
<p>As we mentioned before, playing the mod apk version of Love Convention gives you some advantages over playing the original one. Here are some of them:</p>
|
19 |
-
<ul>
|
20 |
-
<li>You can save money and time by getting unlimited resources that you can use to enhance your gaming experience.</li>
|
21 |
-
<li>You can explore more options and outcomes by unlocking all the scenes and endings that are otherwise restricted or limited.</li>
|
22 |
-
<li>You can avoid annoying ads that interrupt your gameplay and distract you from the story.</li>
|
23 |
-
<li>You can enjoy a faster and smoother gameplay without any glitches or bugs.</li>
|
24 |
-
</ul>
|
25 |
-
<h3>The challenges and rewards of the game</h3>
|
26 |
-
<p>Playing Love Convention is not only fun but also challenging. You will face different situations and dilemmas that will test your skills, judgment, and emotions. You will have to make decisions that will affect your relationships with the characters and the outcome of the story. You will also have to deal with some conflicts, secrets, mysteries, and surprises that will keep you on your toes. However, playing Love Convention is also rewarding. You will get to experience a captivating and immersive story that will make you feel like you are part of it. You will also get to meet and romance different characters that have their own personalities, backgrounds, and goals. You will also get to enjoy stunning graphics, animations, and sound effects that will enhance your gaming experience. Playing Love Convention is a great way to escape from reality and have some fun.</p>
|
27 |
-
<h3>The best tips and tricks to enjoy the game</h3>
|
28 |
-
<p>If you want to make the most out of playing Love Convention, here are some tips and tricks that you can use:</p>
|
29 |
-
<ul>
|
30 |
-
<li>Pay attention to the dialogues and choices that you make, as they will affect your relationships with the characters and the outcome of the story.</li>
|
31 |
-
<li>Use your resources wisely, as they are limited in the original version and can run out quickly. You can use them to buy items, unlock scenes, and progress faster in the game.</li>
|
32 |
-
<li>Explore different paths and endings by replaying the game with different choices and actions. You might discover new things and secrets that you missed before.</li>
|
33 |
-
<li>Save your progress frequently, as the game does not have an auto-save feature. You can save your progress by tapping on the menu icon on the top right corner of the screen and selecting "Save".</li>
|
34 |
-
<li>Have fun and enjoy the game!</li>
|
35 |
-
</ul>
|
36 |
-
<h2>Conclusion</h2>
|
37 |
-
<p>Love Convention Mod APK is a free adventure game for Android lovers that lets you experience a romantic and thrilling story with different characters and choices. You can download and install the mod apk version of Love Convention to get access to some extra features that are not available in the original one, such as unlimited money, gems, tickets, and energy, as well as removing ads and enjoying a smoother gameplay. You can also enjoy the benefits, challenges, and rewards of playing Love Convention, as well as some useful tips and tricks to enjoy it more. If you are looking for a fun and exciting adventure game for your Android device, you should definitely give Love Convention Mod APK a try!</p>
|
38 |
-
<h2>FAQs</h2>
|
39 |
-
<h3>What are the requirements to play Love Convention Mod APK?</h3>
|
40 |
-
<p>To play Love Convention Mod APK, you need to have an Android device that runs on Android 4.4 or higher, as well as a stable internet connection. You also need to have enough storage space on your device to download and install the apk file of Love Convention Mod APK.</p>
|
41 |
-
<h3>Is Love Convention Mod APK safe and legal?</h3>
|
42 |
-
<p>Yes, Love Convention Mod APK is safe and legal to use. The apk file of Love Convention Mod APK is scanned for viruses and malware before being uploaded on our website. However, we recommend that you download and install the apk file from our website only, as we cannot guarantee the safety and legality of other sources.</p>
|
43 |
-
<h3>How can I update Love Convention Mod APK?</h3>
|
44 |
-
<p>To update Love Convention Mod APK, you need to visit our website regularly and check for any new versions of the apk file. If there is a new version available, you can download and install it following the same steps as before. However, you might need to uninstall the previous version of Love Convention Mod APK before installing the new one.</p>
|
45 |
-
<p>love convention mod apk download<br />
|
46 |
-
love convention mod apk free<br />
|
47 |
-
love convention mod apk latest version<br />
|
48 |
-
love convention mod apk unlimited money<br />
|
49 |
-
love convention mod apk android<br />
|
50 |
-
love convention mod apk obb<br />
|
51 |
-
love convention mod apk offline<br />
|
52 |
-
love convention mod apk 2023<br />
|
53 |
-
love convention mod apk softonic<br />
|
54 |
-
love convention mod apk apkcombo<br />
|
55 |
-
love convention mod apk adventure game<br />
|
56 |
-
love convention mod apk raitgames<br />
|
57 |
-
love convention mod apk english<br />
|
58 |
-
love convention mod apk german<br />
|
59 |
-
love convention mod apk russian<br />
|
60 |
-
love convention mod apk portuguese<br />
|
61 |
-
love convention mod apk romance<br />
|
62 |
-
love convention mod apk simulation<br />
|
63 |
-
love convention mod apk choices<br />
|
64 |
-
love convention mod apk stories<br />
|
65 |
-
love convention mod apk characters<br />
|
66 |
-
love convention mod apk graphics<br />
|
67 |
-
love convention mod apk gameplay<br />
|
68 |
-
love convention mod apk review<br />
|
69 |
-
love convention mod apk rating<br />
|
70 |
-
love convention mod apk update<br />
|
71 |
-
love convention mod apk bug fix<br />
|
72 |
-
love convention mod apk features<br />
|
73 |
-
love convention mod apk cheats<br />
|
74 |
-
love convention mod apk hack<br />
|
75 |
-
love convention mod apk tips<br />
|
76 |
-
love convention mod apk guide<br />
|
77 |
-
love convention mod apk walkthrough<br />
|
78 |
-
love convention mod apk trailer<br />
|
79 |
-
love convention mod apk video<br />
|
80 |
-
love convention mod apk screenshot<br />
|
81 |
-
love convention mod apk install<br />
|
82 |
-
love convention mod apk safe<br />
|
83 |
-
love convention mod apk virus free<br />
|
84 |
-
love convention mod apk legal<br />
|
85 |
-
love convention mod apk compatible devices<br />
|
86 |
-
love convention mod apk system requirements<br />
|
87 |
-
love convention mod apk size<br />
|
88 |
-
love convention mod apk file type<br />
|
89 |
-
love convention mod apk online support<br />
|
90 |
-
love convention mod apk feedback<br />
|
91 |
-
love convention mod apk forum<br />
|
92 |
-
love convention mod apk community</p>
|
93 |
-
<h3>Can I play Love Convention Mod APK offline?</h3>
|
94 |
-
<p>No, you cannot play Love Convention Mod APK offline. You need to have a stable internet connection to play Love Convention Mod APK, as the game requires online verification and data synchronization.</p>
|
95 |
-
<h3>How can I contact the developers of Love Convention?</h3>
|
96 |
-
<p>If you have any questions, feedback, or suggestions regarding Love Convention or Love Convention Mod APK, you can contact the developers of Love Convention by sending them an email at [email protected] or by visiting their official website at https://www.raitgames.com/.</p> 401be4b1e0<br />
|
97 |
-
<br />
|
98 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/9752isme/ChatGPT4/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Chat-with-GPT4
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.21.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: ysharma/ChatGPT4
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ADOPLE/Adopleai-DocumentQA/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: DocumentQA
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.35.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ADobrovsky/Plant_Disease_Classification_Project/app.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import torch
|
3 |
-
import torch.nn
|
4 |
-
from torch import Tensor
|
5 |
-
import torch.nn.functional
|
6 |
-
import torchvision
|
7 |
-
from torchvision import transforms
|
8 |
-
|
9 |
-
MODEL_NAME = 'ResNeXt-101-64x4d'
|
10 |
-
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
-
MEAN = [0.485, 0.456, 0.406]
|
12 |
-
STD = [0.229, 0.224, 0.225]
|
13 |
-
|
14 |
-
from torchvision.models import resnext101_64x4d
|
15 |
-
model = resnext101_64x4d()
|
16 |
-
model.fc = torch.nn.Linear(model.fc.in_features, 88)
|
17 |
-
|
18 |
-
if (torch.cuda.is_available()):
|
19 |
-
model.load_state_dict(torch.load(MODEL_NAME+'-model-1.pt'))
|
20 |
-
else:
|
21 |
-
model.load_state_dict(torch.load(MODEL_NAME+'-model-1.pt', map_location=torch.device('cpu')))
|
22 |
-
|
23 |
-
model = model.to(DEVICE)
|
24 |
-
|
25 |
-
labels = ['Apple__black_rot', 'Apple__healthy', 'Apple__rust', 'Apple__scab', 'Cassava__bacterial_blight', 'Cassava__brown_streak_disease', 'Cassava__green_mottle', 'Cassava__healthy', 'Cassava__mosaic_disease', 'Cherry__healthy', 'Cherry__powdery_mildew', 'Chili__healthy', 'Chili__leaf curl', 'Chili__leaf spot', 'Chili__whitefly', 'Chili__yellowish', 'Coffee__cercospora_leaf_spot', 'Coffee__healthy', 'Coffee__red_spider_mite', 'Coffee__rust', 'Corn__common_rust', 'Corn__gray_leaf_spot', 'Corn__healthy', 'Corn__northern_leaf_blight', 'Cucumber__diseased', 'Cucumber__healthy', 'Gauva__diseased', 'Gauva__healthy', 'Grape__black_measles', 'Grape__black_rot', 'Grape__healthy', 'Grape__leaf_blight_(isariopsis_leaf_spot)', 'Jamun__diseased', 'Jamun__healthy', 'Lemon__diseased', 'Lemon__healthy', 'Mango__diseased', 'Mango__healthy', 'Peach__bacterial_spot', 'Peach__healthy', 'Pepper_bell__bacterial_spot', 'Pepper_bell__healthy', 'Pomegranate__diseased', 'Pomegranate__healthy', 'Potato__early_blight', 'Potato__healthy', 'Potato__late_blight', 'Rice__brown_spot', 'Rice__healthy', 'Rice__hispa', 'Rice__leaf_blast', 'Rice__neck_blast', 'Soybean__bacterial_blight', 'Soybean__caterpillar', 'Soybean__diabrotica_speciosa', 'Soybean__downy_mildew', 'Soybean__healthy', 'Soybean__mosaic_virus', 'Soybean__powdery_mildew', 'Soybean__rust', 'Soybean__southern_blight', 'Strawberry___leaf_scorch', 'Strawberry__healthy', 'Sugarcane__bacterial_blight', 'Sugarcane__healthy', 'Sugarcane__red_rot', 'Sugarcane__red_stripe', 'Sugarcane__rust', 'Tea__algal_leaf', 'Tea__anthracnose', 'Tea__bird_eye_spot', 'Tea__brown_blight', 'Tea__healthy', 'Tea__red_leaf_spot', 'Tomato__bacterial_spot', 'Tomato__early_blight', 'Tomato__healthy', 'Tomato__late_blight', 'Tomato__leaf_mold', 'Tomato__mosaic_virus', 'Tomato__septoria_leaf_spot', 'Tomato__spider_mites_(two_spotted_spider_mite)', 'Tomato__target_spot', 'Tomato__yellow_leaf_curl_virus', 'Wheat__brown_rust', 'Wheat__healthy', 'Wheat__septoria', 'Wheat__yellow_rust']
|
26 |
-
|
27 |
-
predictTransform = transforms.Compose([
|
28 |
-
transforms.ToTensor(),
|
29 |
-
transforms.Normalize(mean=MEAN, std=STD)
|
30 |
-
])
|
31 |
-
|
32 |
-
def predict(img):
|
33 |
-
img = predictTransform(img).unsqueeze(0).to(DEVICE)
|
34 |
-
with torch.no_grad():
|
35 |
-
model.eval()
|
36 |
-
prediction = torch.nn.functional.softmax(model(img)[0], dim=0)
|
37 |
-
confidences = {labels[i]: float(prediction[i]) for i in range(len(labels))}
|
38 |
-
return confidences
|
39 |
-
|
40 |
-
title = "Plant Disease Classifier"
|
41 |
-
description = "Please upload a photo containing a plant leaf."
|
42 |
-
iface = gr.Interface(predict,
|
43 |
-
inputs=gr.Image(shape=(224, 224)),
|
44 |
-
outputs=gr.Label(num_top_classes=7),
|
45 |
-
live=True,
|
46 |
-
title=title,
|
47 |
-
description=description,
|
48 |
-
interpretation='default').launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/grids/diffusion/4_bands_base_32khz.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Training of the 4 diffusion models described in
|
9 |
-
"From Discrete Tokens to High-Fidelity Audio Using Multi-Band Diffusion"
|
10 |
-
(paper link).
|
11 |
-
"""
|
12 |
-
|
13 |
-
from ._explorers import DiffusionExplorer
|
14 |
-
|
15 |
-
|
16 |
-
@DiffusionExplorer
|
17 |
-
def explorer(launcher):
|
18 |
-
launcher.slurm_(gpus=4, partition='learnfair')
|
19 |
-
|
20 |
-
launcher.bind_({'solver': 'diffusion/default',
|
21 |
-
'dset': 'internal/music_10k_32khz'})
|
22 |
-
|
23 |
-
with launcher.job_array():
|
24 |
-
launcher({'filter.use': True, 'filter.idx_band': 0, "processor.use": False, 'processor.power_std': 0.4})
|
25 |
-
launcher({'filter.use': True, 'filter.idx_band': 1, "processor.use": False, 'processor.power_std': 0.4})
|
26 |
-
launcher({'filter.use': True, 'filter.idx_band': 2, "processor.use": True, 'processor.power_std': 0.4})
|
27 |
-
launcher({'filter.use': True, 'filter.idx_band': 3, "processor.use": True, 'processor.power_std': 0.75})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/sampler.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
"""Samplers, conforming to the glTF 2.0 standards as specified in
|
2 |
-
https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-sampler
|
3 |
-
|
4 |
-
Author: Matthew Matl
|
5 |
-
"""
|
6 |
-
from .constants import GLTF
|
7 |
-
|
8 |
-
|
9 |
-
class Sampler(object):
|
10 |
-
"""Texture sampler properties for filtering and wrapping modes.
|
11 |
-
|
12 |
-
Parameters
|
13 |
-
----------
|
14 |
-
name : str, optional
|
15 |
-
The user-defined name of this object.
|
16 |
-
magFilter : int, optional
|
17 |
-
Magnification filter. Valid values:
|
18 |
-
- :attr:`.GLTF.NEAREST`
|
19 |
-
- :attr:`.GLTF.LINEAR`
|
20 |
-
minFilter : int, optional
|
21 |
-
Minification filter. Valid values:
|
22 |
-
- :attr:`.GLTF.NEAREST`
|
23 |
-
- :attr:`.GLTF.LINEAR`
|
24 |
-
- :attr:`.GLTF.NEAREST_MIPMAP_NEAREST`
|
25 |
-
- :attr:`.GLTF.LINEAR_MIPMAP_NEAREST`
|
26 |
-
- :attr:`.GLTF.NEAREST_MIPMAP_LINEAR`
|
27 |
-
- :attr:`.GLTF.LINEAR_MIPMAP_LINEAR`
|
28 |
-
wrapS : int, optional
|
29 |
-
S (U) wrapping mode. Valid values:
|
30 |
-
- :attr:`.GLTF.CLAMP_TO_EDGE`
|
31 |
-
- :attr:`.GLTF.MIRRORED_REPEAT`
|
32 |
-
- :attr:`.GLTF.REPEAT`
|
33 |
-
wrapT : int, optional
|
34 |
-
T (V) wrapping mode. Valid values:
|
35 |
-
- :attr:`.GLTF.CLAMP_TO_EDGE`
|
36 |
-
- :attr:`.GLTF.MIRRORED_REPEAT`
|
37 |
-
- :attr:`.GLTF.REPEAT`
|
38 |
-
"""
|
39 |
-
|
40 |
-
def __init__(self,
|
41 |
-
name=None,
|
42 |
-
magFilter=None,
|
43 |
-
minFilter=None,
|
44 |
-
wrapS=GLTF.REPEAT,
|
45 |
-
wrapT=GLTF.REPEAT):
|
46 |
-
self.name = name
|
47 |
-
self.magFilter = magFilter
|
48 |
-
self.minFilter = minFilter
|
49 |
-
self.wrapS = wrapS
|
50 |
-
self.wrapT = wrapT
|
51 |
-
|
52 |
-
@property
|
53 |
-
def name(self):
|
54 |
-
"""str : The user-defined name of this object.
|
55 |
-
"""
|
56 |
-
return self._name
|
57 |
-
|
58 |
-
@name.setter
|
59 |
-
def name(self, value):
|
60 |
-
if value is not None:
|
61 |
-
value = str(value)
|
62 |
-
self._name = value
|
63 |
-
|
64 |
-
@property
|
65 |
-
def magFilter(self):
|
66 |
-
"""int : Magnification filter type.
|
67 |
-
"""
|
68 |
-
return self._magFilter
|
69 |
-
|
70 |
-
@magFilter.setter
|
71 |
-
def magFilter(self, value):
|
72 |
-
self._magFilter = value
|
73 |
-
|
74 |
-
@property
|
75 |
-
def minFilter(self):
|
76 |
-
"""int : Minification filter type.
|
77 |
-
"""
|
78 |
-
return self._minFilter
|
79 |
-
|
80 |
-
@minFilter.setter
|
81 |
-
def minFilter(self, value):
|
82 |
-
self._minFilter = value
|
83 |
-
|
84 |
-
@property
|
85 |
-
def wrapS(self):
|
86 |
-
"""int : S (U) wrapping mode.
|
87 |
-
"""
|
88 |
-
return self._wrapS
|
89 |
-
|
90 |
-
@wrapS.setter
|
91 |
-
def wrapS(self, value):
|
92 |
-
self._wrapS = value
|
93 |
-
|
94 |
-
@property
|
95 |
-
def wrapT(self):
|
96 |
-
"""int : T (V) wrapping mode.
|
97 |
-
"""
|
98 |
-
return self._wrapT
|
99 |
-
|
100 |
-
@wrapT.setter
|
101 |
-
def wrapT(self, value):
|
102 |
-
self._wrapT = value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/CLAPWrapper.py
DELETED
@@ -1,257 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
import torchaudio
|
3 |
-
from torch._six import string_classes
|
4 |
-
import collections
|
5 |
-
import re
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import numpy as np
|
8 |
-
from transformers import AutoTokenizer
|
9 |
-
from ldm.modules.encoders.CLAP.utils import read_config_as_args
|
10 |
-
from ldm.modules.encoders.CLAP.clap import CLAP
|
11 |
-
import math
|
12 |
-
import torchaudio.transforms as T
|
13 |
-
import os
|
14 |
-
import torch
|
15 |
-
from importlib_resources import files
|
16 |
-
|
17 |
-
|
18 |
-
class CLAPWrapper():
|
19 |
-
"""
|
20 |
-
A class for interfacing CLAP model.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, model_fp, device):
|
24 |
-
self.np_str_obj_array_pattern = re.compile(r'[SaUO]')
|
25 |
-
self.file_path = os.path.realpath(__file__)
|
26 |
-
self.default_collate_err_msg_format = (
|
27 |
-
"default_collate: batch must contain tensors, numpy arrays, numbers, "
|
28 |
-
"dicts or lists; found {}")
|
29 |
-
self.config_as_str = files('ldm').joinpath('modules/encoders/CLAP/config.yml').read_text()
|
30 |
-
self.model_fp = model_fp
|
31 |
-
self.device = device
|
32 |
-
self.clap, self.tokenizer, self.args = self.load_clap()
|
33 |
-
|
34 |
-
def load_clap(self):
|
35 |
-
r"""Load CLAP model with args from config file"""
|
36 |
-
|
37 |
-
args = read_config_as_args(self.config_as_str, is_config_str=True)
|
38 |
-
|
39 |
-
if 'bert' in args.text_model:
|
40 |
-
self.token_keys = ['input_ids', 'token_type_ids', 'attention_mask']
|
41 |
-
else:
|
42 |
-
self.token_keys = ['input_ids', 'attention_mask']
|
43 |
-
|
44 |
-
clap = CLAP(
|
45 |
-
audioenc_name=args.audioenc_name,
|
46 |
-
sample_rate=args.sampling_rate,
|
47 |
-
window_size=args.window_size,
|
48 |
-
hop_size=args.hop_size,
|
49 |
-
mel_bins=args.mel_bins,
|
50 |
-
fmin=args.fmin,
|
51 |
-
fmax=args.fmax,
|
52 |
-
classes_num=args.num_classes,
|
53 |
-
out_emb=args.out_emb,
|
54 |
-
text_model=args.text_model,
|
55 |
-
transformer_embed_dim=args.transformer_embed_dim,
|
56 |
-
d_proj=args.d_proj
|
57 |
-
)
|
58 |
-
|
59 |
-
# Load pretrained weights for model
|
60 |
-
model_state_dict = torch.load(self.model_fp, map_location=torch.device('cpu'))['model']
|
61 |
-
clap.load_state_dict(model_state_dict)
|
62 |
-
|
63 |
-
clap.eval() # set clap in eval mode
|
64 |
-
tokenizer = AutoTokenizer.from_pretrained(args.text_model)
|
65 |
-
|
66 |
-
clap = clap.to(self.device)
|
67 |
-
tokenizer = tokenizer.to(self.device)
|
68 |
-
|
69 |
-
return clap, tokenizer, args
|
70 |
-
|
71 |
-
def default_collate(self, batch):
|
72 |
-
r"""Puts each data field into a tensor with outer dimension batch size"""
|
73 |
-
elem = batch[0]
|
74 |
-
elem_type = type(elem)
|
75 |
-
if isinstance(elem, torch.Tensor):
|
76 |
-
out = None
|
77 |
-
if torch.utils.data.get_worker_info() is not None:
|
78 |
-
# If we're in a background process, concatenate directly into a
|
79 |
-
# shared memory tensor to avoid an extra copy
|
80 |
-
numel = sum([x.numel() for x in batch])
|
81 |
-
storage = elem.storage()._new_shared(numel)
|
82 |
-
out = elem.new(storage)
|
83 |
-
return torch.stack(batch, 0, out=out)
|
84 |
-
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
|
85 |
-
and elem_type.__name__ != 'string_':
|
86 |
-
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
|
87 |
-
# array of string classes and object
|
88 |
-
if self.np_str_obj_array_pattern.search(elem.dtype.str) is not None:
|
89 |
-
raise TypeError(
|
90 |
-
self.default_collate_err_msg_format.format(elem.dtype))
|
91 |
-
|
92 |
-
return self.default_collate([torch.as_tensor(b) for b in batch])
|
93 |
-
elif elem.shape == (): # scalars
|
94 |
-
return torch.as_tensor(batch)
|
95 |
-
elif isinstance(elem, float):
|
96 |
-
return torch.tensor(batch, dtype=torch.float64)
|
97 |
-
elif isinstance(elem, int):
|
98 |
-
return torch.tensor(batch)
|
99 |
-
elif isinstance(elem, string_classes):
|
100 |
-
return batch
|
101 |
-
elif isinstance(elem, collections.abc.Mapping):
|
102 |
-
return {key: self.default_collate([d[key] for d in batch]) for key in elem}
|
103 |
-
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
|
104 |
-
return elem_type(*(self.default_collate(samples) for samples in zip(*batch)))
|
105 |
-
elif isinstance(elem, collections.abc.Sequence):
|
106 |
-
# check to make sure that the elements in batch have consistent size
|
107 |
-
it = iter(batch)
|
108 |
-
elem_size = len(next(it))
|
109 |
-
if not all(len(elem) == elem_size for elem in it):
|
110 |
-
raise RuntimeError(
|
111 |
-
'each element in list of batch should be of equal size')
|
112 |
-
transposed = zip(*batch)
|
113 |
-
return [self.default_collate(samples) for samples in transposed]
|
114 |
-
|
115 |
-
raise TypeError(self.default_collate_err_msg_format.format(elem_type))
|
116 |
-
|
117 |
-
def load_audio_into_tensor(self, audio_path, audio_duration, resample=False):
|
118 |
-
r"""Loads audio file and returns raw audio."""
|
119 |
-
# Randomly sample a segment of audio_duration from the clip or pad to match duration
|
120 |
-
audio_time_series, sample_rate = torchaudio.load(audio_path)
|
121 |
-
resample_rate = self.args.sampling_rate
|
122 |
-
if resample:
|
123 |
-
resampler = T.Resample(sample_rate, resample_rate)
|
124 |
-
audio_time_series = resampler(audio_time_series)
|
125 |
-
audio_time_series = audio_time_series.reshape(-1)
|
126 |
-
|
127 |
-
# audio_time_series is shorter than predefined audio duration,
|
128 |
-
# so audio_time_series is extended
|
129 |
-
if audio_duration*sample_rate >= audio_time_series.shape[0]:
|
130 |
-
repeat_factor = int(np.ceil((audio_duration*sample_rate) /
|
131 |
-
audio_time_series.shape[0]))
|
132 |
-
# Repeat audio_time_series by repeat_factor to match audio_duration
|
133 |
-
audio_time_series = audio_time_series.repeat(repeat_factor)
|
134 |
-
# remove excess part of audio_time_series
|
135 |
-
audio_time_series = audio_time_series[0:audio_duration*sample_rate]
|
136 |
-
else:
|
137 |
-
# audio_time_series is longer than predefined audio duration,
|
138 |
-
# so audio_time_series is trimmed
|
139 |
-
start_index = random.randrange(
|
140 |
-
audio_time_series.shape[0] - audio_duration*sample_rate)
|
141 |
-
audio_time_series = audio_time_series[start_index:start_index +
|
142 |
-
audio_duration*sample_rate]
|
143 |
-
return torch.FloatTensor(audio_time_series)
|
144 |
-
|
145 |
-
def preprocess_audio(self, audio_files, resample):
|
146 |
-
r"""Load list of audio files and return raw audio"""
|
147 |
-
audio_tensors = []
|
148 |
-
for audio_file in audio_files:
|
149 |
-
audio_tensor = self.load_audio_into_tensor(
|
150 |
-
audio_file, self.args.duration, resample)
|
151 |
-
audio_tensor = audio_tensor.reshape(1, -1).to(self.device)
|
152 |
-
audio_tensors.append(audio_tensor)
|
153 |
-
return self.default_collate(audio_tensors)
|
154 |
-
|
155 |
-
def preprocess_text(self, text_queries, text_len=100):
|
156 |
-
r"""Load list of class labels and return tokenized text"""
|
157 |
-
device = next(self.clap.parameters()).device
|
158 |
-
tokenized_texts = []
|
159 |
-
for ttext in text_queries:
|
160 |
-
tok = self.tokenizer.encode_plus(
|
161 |
-
text=ttext, add_special_tokens=True, max_length=text_len, pad_to_max_length=True, return_tensors="pt")
|
162 |
-
for key in self.token_keys:
|
163 |
-
tok[key] = tok[key].reshape(-1).to(device)
|
164 |
-
tokenized_texts.append(tok)
|
165 |
-
return self.default_collate(tokenized_texts)
|
166 |
-
|
167 |
-
def get_text_embeddings(self, class_labels):
|
168 |
-
r"""Load list of class labels and return text embeddings"""
|
169 |
-
preprocessed_text = self.preprocess_text(class_labels)
|
170 |
-
text_embeddings = self._get_text_embeddings(preprocessed_text)
|
171 |
-
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
|
172 |
-
return text_embeddings
|
173 |
-
|
174 |
-
def get_audio_embeddings(self, audio_files, resample):
|
175 |
-
r"""Load list of audio files and return a audio embeddings"""
|
176 |
-
preprocessed_audio = self.preprocess_audio(audio_files, resample)
|
177 |
-
audio_embeddings = self._get_audio_embeddings(preprocessed_audio)
|
178 |
-
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
|
179 |
-
return audio_embeddings
|
180 |
-
|
181 |
-
def _get_text_embeddings(self, preprocessed_text):
|
182 |
-
r"""Load preprocessed text and return text embeddings"""
|
183 |
-
with torch.no_grad():
|
184 |
-
text_embeddings = self.clap.caption_encoder(preprocessed_text)
|
185 |
-
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
|
186 |
-
return text_embeddings
|
187 |
-
|
188 |
-
def _get_audio_embeddings(self, preprocessed_audio):
|
189 |
-
r"""Load preprocessed audio and return a audio embeddings"""
|
190 |
-
with torch.no_grad():
|
191 |
-
preprocessed_audio = preprocessed_audio.reshape(
|
192 |
-
preprocessed_audio.shape[0], preprocessed_audio.shape[2])
|
193 |
-
#Append [0] the audio emebdding, [1] has output class probabilities
|
194 |
-
audio_embeddings = self.clap.audio_encoder(preprocessed_audio)[0]
|
195 |
-
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
|
196 |
-
return audio_embeddings
|
197 |
-
|
198 |
-
def compute_similarity(self, audio_embeddings, text_embeddings):
|
199 |
-
r"""Compute similarity between text and audio embeddings"""
|
200 |
-
logit_scale = self.clap.logit_scale.exp()
|
201 |
-
similarity = logit_scale*text_embeddings @ audio_embeddings.T
|
202 |
-
return similarity.T
|
203 |
-
|
204 |
-
def _generic_batch_inference(self, func, *args):
|
205 |
-
r"""Process audio and/or text per batch"""
|
206 |
-
input_tmp = args[0]
|
207 |
-
batch_size = args[-1]
|
208 |
-
# args[0] has audio_files, args[1] has class_labels
|
209 |
-
inputs = [args[0], args[1]] if len(args) == 3 else [args[0]]
|
210 |
-
args0_len = len(args[0])
|
211 |
-
# compute text_embeddings once for all the audio_files batches
|
212 |
-
if len(inputs) == 2:
|
213 |
-
text_embeddings = self.get_text_embeddings(args[1])
|
214 |
-
inputs = [args[0], args[1], text_embeddings]
|
215 |
-
dataset_idx = 0
|
216 |
-
for _ in range(math.ceil(args0_len/batch_size)):
|
217 |
-
next_batch_idx = dataset_idx + batch_size
|
218 |
-
# batch size is bigger than available audio/text items
|
219 |
-
if next_batch_idx >= args0_len:
|
220 |
-
inputs[0] = input_tmp[dataset_idx:]
|
221 |
-
return func(*tuple(inputs))
|
222 |
-
else:
|
223 |
-
inputs[0] = input_tmp[dataset_idx:next_batch_idx]
|
224 |
-
yield func(*tuple(inputs))
|
225 |
-
dataset_idx = next_batch_idx
|
226 |
-
|
227 |
-
def get_audio_embeddings_per_batch(self, audio_files, batch_size):
|
228 |
-
r"""Load preprocessed audio and return a audio embeddings per batch"""
|
229 |
-
return self._generic_batch_inference(self.get_audio_embeddings, audio_files, batch_size)
|
230 |
-
|
231 |
-
def get_text_embeddings_per_batch(self, class_labels, batch_size):
|
232 |
-
r"""Load preprocessed text and return text embeddings per batch"""
|
233 |
-
return self._generic_batch_inference(self.get_text_embeddings, class_labels, batch_size)
|
234 |
-
|
235 |
-
def classify_audio_files_per_batch(self, audio_files, class_labels, batch_size):
|
236 |
-
r"""Compute classification probabilities for each audio recording in a batch and each class label"""
|
237 |
-
return self._generic_batch_inference(self.classify_audio_files, audio_files, class_labels, batch_size)
|
238 |
-
|
239 |
-
if __name__ == '__main__':
|
240 |
-
|
241 |
-
# Load and initialize CLAP
|
242 |
-
weights_path = "/home1/huangrongjie/Project/Diffusion/LatentDiffusion/CLAP/CLAP_weights_2022.pth"
|
243 |
-
clap_model = CLAPWrapper(weights_path, use_cuda=False)
|
244 |
-
|
245 |
-
y = ["A woman talks nearby as water pours", "Multiple clanging and clanking sounds"]
|
246 |
-
x = ['/home2/huangjiawei/data/audiocaps/train/Yr1nicOVtvkQ.wav', '/home2/huangjiawei/data/audiocaps/train/YUDGBjjwyaqE.wav']
|
247 |
-
|
248 |
-
# Computing text embeddings
|
249 |
-
text_embeddings = clap_model.get_text_embeddings(y)
|
250 |
-
|
251 |
-
import ipdb
|
252 |
-
ipdb.set_trace()
|
253 |
-
|
254 |
-
# Computing audio embeddings
|
255 |
-
audio_embeddings = clap_model.get_audio_embeddings(x, resample=True)
|
256 |
-
similarity = clap_model.compute_similarity(audio_embeddings, text_embeddings)
|
257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
_base_ = './yolov7_l_syncbn_fast_8x16b-300e_coco.py'
|
2 |
-
|
3 |
-
# ========================modified parameters========================
|
4 |
-
|
5 |
-
# -----model related-----
|
6 |
-
# Data augmentation
|
7 |
-
max_translate_ratio = 0.1 # YOLOv5RandomAffine
|
8 |
-
scaling_ratio_range = (0.5, 1.6) # YOLOv5RandomAffine
|
9 |
-
mixup_prob = 0.05 # YOLOv5MixUp
|
10 |
-
randchoice_mosaic_prob = [0.8, 0.2]
|
11 |
-
mixup_alpha = 8.0 # YOLOv5MixUp
|
12 |
-
mixup_beta = 8.0 # YOLOv5MixUp
|
13 |
-
|
14 |
-
# -----train val related-----
|
15 |
-
loss_cls_weight = 0.5
|
16 |
-
loss_obj_weight = 1.0
|
17 |
-
|
18 |
-
lr_factor = 0.01 # Learning rate scaling factor
|
19 |
-
# ===============================Unmodified in most cases====================
|
20 |
-
num_classes = _base_.num_classes
|
21 |
-
num_det_layers = _base_.num_det_layers
|
22 |
-
img_scale = _base_.img_scale
|
23 |
-
pre_transform = _base_.pre_transform
|
24 |
-
model = dict(
|
25 |
-
backbone=dict(
|
26 |
-
arch='Tiny', act_cfg=dict(type='LeakyReLU', negative_slope=0.1)),
|
27 |
-
neck=dict(
|
28 |
-
is_tiny_version=True,
|
29 |
-
in_channels=[128, 256, 512],
|
30 |
-
out_channels=[64, 128, 256],
|
31 |
-
block_cfg=dict(
|
32 |
-
_delete_=True, type='TinyDownSampleBlock', middle_ratio=0.25),
|
33 |
-
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
|
34 |
-
use_repconv_outs=False),
|
35 |
-
bbox_head=dict(
|
36 |
-
head_module=dict(in_channels=[128, 256, 512]),
|
37 |
-
loss_cls=dict(loss_weight=loss_cls_weight *
|
38 |
-
(num_classes / 80 * 3 / num_det_layers)),
|
39 |
-
loss_obj=dict(loss_weight=loss_obj_weight *
|
40 |
-
((img_scale[0] / 640)**2 * 3 / num_det_layers))))
|
41 |
-
|
42 |
-
mosiac4_pipeline = [
|
43 |
-
dict(
|
44 |
-
type='Mosaic',
|
45 |
-
img_scale=img_scale,
|
46 |
-
pad_val=114.0,
|
47 |
-
pre_transform=pre_transform),
|
48 |
-
dict(
|
49 |
-
type='YOLOv5RandomAffine',
|
50 |
-
max_rotate_degree=0.0,
|
51 |
-
max_shear_degree=0.0,
|
52 |
-
max_translate_ratio=max_translate_ratio, # change
|
53 |
-
scaling_ratio_range=scaling_ratio_range, # change
|
54 |
-
# img_scale is (width, height)
|
55 |
-
border=(-img_scale[0] // 2, -img_scale[1] // 2),
|
56 |
-
border_val=(114, 114, 114)),
|
57 |
-
]
|
58 |
-
|
59 |
-
mosiac9_pipeline = [
|
60 |
-
dict(
|
61 |
-
type='Mosaic9',
|
62 |
-
img_scale=img_scale,
|
63 |
-
pad_val=114.0,
|
64 |
-
pre_transform=pre_transform),
|
65 |
-
dict(
|
66 |
-
type='YOLOv5RandomAffine',
|
67 |
-
max_rotate_degree=0.0,
|
68 |
-
max_shear_degree=0.0,
|
69 |
-
max_translate_ratio=max_translate_ratio, # change
|
70 |
-
scaling_ratio_range=scaling_ratio_range, # change
|
71 |
-
border=(-img_scale[0] // 2, -img_scale[1] // 2),
|
72 |
-
border_val=(114, 114, 114)),
|
73 |
-
]
|
74 |
-
|
75 |
-
randchoice_mosaic_pipeline = dict(
|
76 |
-
type='RandomChoice',
|
77 |
-
transforms=[mosiac4_pipeline, mosiac9_pipeline],
|
78 |
-
prob=randchoice_mosaic_prob)
|
79 |
-
|
80 |
-
train_pipeline = [
|
81 |
-
*pre_transform,
|
82 |
-
randchoice_mosaic_pipeline,
|
83 |
-
dict(
|
84 |
-
type='YOLOv5MixUp',
|
85 |
-
alpha=mixup_alpha,
|
86 |
-
beta=mixup_beta,
|
87 |
-
prob=mixup_prob, # change
|
88 |
-
pre_transform=[*pre_transform, randchoice_mosaic_pipeline]),
|
89 |
-
dict(type='YOLOv5HSVRandomAug'),
|
90 |
-
dict(type='mmdet.RandomFlip', prob=0.5),
|
91 |
-
dict(
|
92 |
-
type='mmdet.PackDetInputs',
|
93 |
-
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
|
94 |
-
'flip_direction'))
|
95 |
-
]
|
96 |
-
|
97 |
-
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
98 |
-
default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ababababababbababa/Ashaar/langs.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
IMG = """<p align = 'center'>
|
2 |
-
<img src='https://raw.githubusercontent.com/ARBML/Ashaar/master/images/ashaar_icon.png' width='150px' alt='logo for Ashaar'/>
|
3 |
-
</p>
|
4 |
-
|
5 |
-
"""
|
6 |
-
TITLE_ar="""<h1 style="font-size: 30px;" align="center">أَشْعــَـار: تحليل وإنشاء الشعر العربي</h1>"""
|
7 |
-
DESCRIPTION_ar = IMG
|
8 |
-
|
9 |
-
DESCRIPTION_ar +=""" <p dir='rtl'>
|
10 |
-
هذا البرنامج يتيح للمستخدم تحليل وإنشاء الشعر العربي.
|
11 |
-
لإنشاء الشعر العربي تم تدريب نموج يقوم بإستخدام البحر والقافية والعاطفة لإنشاء أكمال للقصيدة بناء على هذه الشروط.
|
12 |
-
بالإضافة إلى نموذج إنشاء الشعر يحتوي البرنامج على نماذج لتصنيف الحقبة الزمنية والعاطفة والبحر و كذلك تشكيل الشعر .
|
13 |
-
يقوم البرنامج بإستخدام هذه النماذج لإيجاد الخلل في القصيدة من خلال إضافة ألوان معينة تدل على اماكن الخلل.
|
14 |
-
لإستخدام البرنامج قم في البداية بكتابة قصيدة تحتوي على عدد زوجي من الأبيات و من ثم قم بالضغط على تحليل ، وبعد إنتهاء التحليل بالإمكان إنشاء إكمال للقصيدة.
|
15 |
-
عند الضغط على زر التحليل يتم إنشاء جدول التحليل الذي يشرح العديد من الأشياء :
|
16 |
-
</p>
|
17 |
-
"""
|
18 |
-
DESCRIPTION_ar+= """<div dir='RTL'>
|
19 |
-
<ul>
|
20 |
-
<li> المشكل : تشكيل كل شطر من القصيدة المدخلة</li>
|
21 |
-
<li>الكتابة العروضية: وتقوم هذه الكتابة على التعبير عن كل منطوق في اللغة وتبيانه حتى لو لم يكن يكتب إملائياً
|
22 |
-
</li>
|
23 |
-
<li>التفعيلة: تفعيلات القصيدة ، مثالاً : طَويلٌ لَهُ دُونَ البُحورِ فضائل فَعُوْلُنْ مَفَاْعِيْلُنْ فَعُوْلُنْ مَفَاْعِلُ
|
24 |
-
</li>
|
25 |
-
<li>النمط: يحدد حركة وسكون كل حرف في الكتابة العروضية. نستخدم الألوان التالية للرمز إلى خلل في الكتابة العروضية: الأحمر: حرف محذوف، الأزرق: حرف مضاف، الأصفر: حركة مقلوبة.</li>
|
26 |
-
</ul>
|
27 |
-
</div>
|
28 |
-
"""
|
29 |
-
DESCRIPTION_ar+= """<p dir='rtl'>
|
30 |
-
قمنا بتوفير الشفرة البرمجية كلها على
|
31 |
-
<a href ='https://github.com/ARBML/Ashaar'> GitHub</a>.
|
32 |
-
</p>
|
33 |
-
"""
|
34 |
-
|
35 |
-
TITLE_en="""<h1 style="font-size: 30px;" align="center">Ashaar: Arabic Poetry Analysis and Generation</h1>"""
|
36 |
-
DESCRIPTION_en = IMG
|
37 |
-
|
38 |
-
DESCRIPTION_en +="""
|
39 |
-
The demo provides a way to generate analysis for poetry and also complete the poetry.
|
40 |
-
The generative model is a character-based conditional GPT-2 model. The pipeline contains many models for
|
41 |
-
classification, diacritization and conditional generation. Check our <a src='https://github.com/ARBML/Ashaar'>GitHub</a> for more techincal details
|
42 |
-
about this work. In the demo we have two basic pipelines. Analyze which predicts the meter, era, theme, diacritized text, qafiyah and, arudi style.
|
43 |
-
The other module, Generate which takes the input text, meter, theme and qafiyah to generate the full poem.
|
44 |
-
"""
|
45 |
-
|
46 |
-
btn_trg_text_ar = "إنشاء"
|
47 |
-
btn_inp_text_ar = "تحليل"
|
48 |
-
|
49 |
-
btn_inp_text_en = "Generate"
|
50 |
-
btn_trg_text_en = "Analyze"
|
51 |
-
|
52 |
-
textbox_inp_text_ar = "القصيدة المدخلة"
|
53 |
-
textbox_trg_text_ar = "القصيدة المنشئة"
|
54 |
-
|
55 |
-
textbox_trg_text_en = "Input Poem"
|
56 |
-
textbox_inp_text_en = "Generated Poem"
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abdllh/poetry2023/app.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
import gc
|
2 |
-
import gradio as gr
|
3 |
-
from transformers import pipeline, set_seed
|
4 |
-
|
5 |
-
pipe = pipeline('text-generation', framework='pt', model='akhooli/ap2023', tokenizer='akhooli/ap2023')
|
6 |
-
#gc.collect()
|
7 |
-
samples = [['أنت'
|
8 |
-
,1.0, 50, 1.0, 1.0, 114],['هل غادر'
|
9 |
-
,1.0, 50, 1.0, 1.0, 114 ],['ألا ليت'
|
10 |
-
,1.0, 50, 1.0, 1.0, 114 ],['يا قدس'
|
11 |
-
,1.0, 50, 1.0, 1.0, 114],['عيد بأية حال'
|
12 |
-
,1.0, 50, 1.0, 1.0, 114],['لكل شيء إذا ما'
|
13 |
-
,1.0, 50, 1.0, 1.0, 114 ],['.'
|
14 |
-
,1.0, 50, 1.0, 1.0, 114]]
|
15 |
-
|
16 |
-
notes = """
|
17 |
-
- Enter a short prompt or select (click) one of the examples and click SEND
|
18 |
-
- Adjust parameters (temperture, top k, top p and penalty) through the slider (keep close to default values).
|
19 |
-
- For the same seed (randomness), the same output is regenerated if other parameters are fixed
|
20 |
-
- Clear and enter new prompt or select another example and SEND to regenerate
|
21 |
-
- The '.' means start a new line from no prompt (your prompt need not be long)
|
22 |
-
- Be patient: this runs on CPU (free tier)
|
23 |
-
- Feedback (Twitter): @akhooli (https://twitter.com/akhooli/status/1611025232201977859)
|
24 |
-
- Note/Disclaimer: may generate unaccepted or inappropriate content. Use at your own risk.
|
25 |
-
"""
|
26 |
-
def sayPoetry(prompt, temp=1.0, topk = 50, topp = 1.0, penalty=1.0, seed=114):
|
27 |
-
if not int(seed) >= 0: seed=114
|
28 |
-
set_seed(seed)
|
29 |
-
gen = pipe(prompt, max_length=96, do_sample=True, temperature=temp, top_k=topk, top_p=topp, repetition_penalty=penalty,
|
30 |
-
min_length = 64, no_repeat_ngram_size = 3, return_full_text=True,
|
31 |
-
num_beams=5, num_return_sequences=1)[0]["generated_text"]
|
32 |
-
poetry =""
|
33 |
-
for line in gen.split('.')[:-1]:
|
34 |
-
poetry += line #+ "\n"
|
35 |
-
return poetry
|
36 |
-
poetry = gr.Interface(fn=sayPoetry,
|
37 |
-
inputs=[
|
38 |
-
gr.Textbox(label="Enter short prompt or select from examples:"),
|
39 |
-
gr.Slider(0.70, 1.2, step=0.01,value=1.0, label='control temperature'),
|
40 |
-
gr.Slider(25, 100, step=1,value=50, label='control top k'),
|
41 |
-
gr.Slider(0.80, 1.0, step=0.01,value=1.0, label='control top p'),
|
42 |
-
gr.Slider(0.90, 1.50, step=0.01,value=1.0, label='control penalty'),
|
43 |
-
gr.Number(value=139750, precision=0, label='Seed'),
|
44 |
-
],
|
45 |
-
outputs=[gr.Textbox(label="Generated Poetry:")],
|
46 |
-
|
47 |
-
allow_flagging='never',
|
48 |
-
title='Arabic Poetry Generation Demo (updated Jan. 2023)',
|
49 |
-
description = "A simple demo of AI generated poetry based on 1M poems fine-tuned using AraGPT2 (be patient, runs on cpu)",
|
50 |
-
examples=samples,
|
51 |
-
cache_examples=False,
|
52 |
-
article = notes)
|
53 |
-
poetry.launch() # show_error = True, debug=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AbeShinzo0708/AI_Kishida_Fumio_speaker/hooks/hook-librosa.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from PyInstaller.utils.hooks import copy_metadata
|
2 |
-
|
3 |
-
datas = copy_metadata('librosa')
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/gridalign-plugin.js
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
import {
|
2 |
-
HexagonGridAlign,
|
3 |
-
QuadGridAlign
|
4 |
-
} from './gridalign.js';
|
5 |
-
|
6 |
-
class GridAlignPlugin extends Phaser.Plugins.BasePlugin {
|
7 |
-
|
8 |
-
constructor(pluginManager) {
|
9 |
-
super(pluginManager);
|
10 |
-
}
|
11 |
-
|
12 |
-
start() {
|
13 |
-
var eventEmitter = this.game.events;
|
14 |
-
eventEmitter.on('destroy', this.destroy, this);
|
15 |
-
}
|
16 |
-
|
17 |
-
hexagon(items, options) {
|
18 |
-
return HexagonGridAlign(items, options);
|
19 |
-
}
|
20 |
-
|
21 |
-
quad(items, options) {
|
22 |
-
return QuadGridAlign(items, options);
|
23 |
-
}
|
24 |
-
}
|
25 |
-
|
26 |
-
export default GridAlignPlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/Bejeweled.d.ts
DELETED
@@ -1,192 +0,0 @@
|
|
1 |
-
import ComponentBase from '../../plugins/utils/componentbase/ComponentBase';
|
2 |
-
import Board from '../../plugins/board/board/Board';
|
3 |
-
import Match from '../../plugins/board/match/Match';
|
4 |
-
import MoveTo from '../../plugins/board/moveto/MoveTo';
|
5 |
-
import { TileXYType } from '../../plugins/board/types/Position';
|
6 |
-
|
7 |
-
export default Bejeweled;
|
8 |
-
|
9 |
-
declare namespace Bejeweled {
|
10 |
-
|
11 |
-
type ChessSymbol = number | string;
|
12 |
-
|
13 |
-
type GenerateSymbolCallbackType = (
|
14 |
-
board: Board,
|
15 |
-
tileX: number, tileY: number,
|
16 |
-
excluded: undefined | ChessSymbol[]
|
17 |
-
) => ChessSymbol;
|
18 |
-
|
19 |
-
type CreateChessCallbackType = (
|
20 |
-
board: Board
|
21 |
-
) => Phaser.GameObjects.GameObject;
|
22 |
-
|
23 |
-
type SwapActionType = (
|
24 |
-
chess1: Phaser.GameObjects.GameObject,
|
25 |
-
chess2: Phaser.GameObjects.GameObject,
|
26 |
-
board: Board,
|
27 |
-
bejeweled: Bejeweled,
|
28 |
-
) => void;
|
29 |
-
|
30 |
-
type EliminatingActionType = (
|
31 |
-
chessArray: Phaser.GameObjects.GameObject[],
|
32 |
-
board: Board,
|
33 |
-
bejeweled: Bejeweled,
|
34 |
-
) => void;
|
35 |
-
|
36 |
-
type FallingActionType = (
|
37 |
-
board: Board,
|
38 |
-
bejeweled: Bejeweled,
|
39 |
-
) => void;
|
40 |
-
|
41 |
-
interface IConfig {
|
42 |
-
rexBoard?: string,
|
43 |
-
|
44 |
-
board: Board.IConfig,
|
45 |
-
match?: Match.IConfig,
|
46 |
-
|
47 |
-
chess: {
|
48 |
-
symbols: ChessSymbol[] | GenerateSymbolCallbackType,
|
49 |
-
|
50 |
-
create: CreateChessCallbackType,
|
51 |
-
|
52 |
-
scope?: object,
|
53 |
-
|
54 |
-
moveTo?: MoveTo.IConfig,
|
55 |
-
|
56 |
-
tileZ?: number | string,
|
57 |
-
},
|
58 |
-
|
59 |
-
swapAction?: SwapActionType,
|
60 |
-
|
61 |
-
undoSwapAction?: SwapActionType,
|
62 |
-
|
63 |
-
eliminatingAction?: EliminatingActionType,
|
64 |
-
|
65 |
-
fallingAction?: FallingActionType,
|
66 |
-
|
67 |
-
input?: boolean,
|
68 |
-
|
69 |
-
mask?: boolean,
|
70 |
-
|
71 |
-
debug?: boolean,
|
72 |
-
|
73 |
-
}
|
74 |
-
|
75 |
-
namespace Events {
|
76 |
-
type Select1CallbackType = (board: Board, bejeweled: Bejeweled) => void;
|
77 |
-
|
78 |
-
type Select2CallbackType = (board: Board, bejeweled: Bejeweled) => void;
|
79 |
-
|
80 |
-
type SwapCallbackType = (
|
81 |
-
selectedChess1: Phaser.GameObjects.GameObject,
|
82 |
-
selectedChess2: Phaser.GameObjects.GameObject,
|
83 |
-
board: Board, bejeweled: Bejeweled
|
84 |
-
) => void;
|
85 |
-
|
86 |
-
type MatchStartCallbackType = (board: Board, bejeweled: Bejeweled) => void;
|
87 |
-
|
88 |
-
type MatchCallbackType = (
|
89 |
-
lines: Phaser.Structs.Set<Phaser.GameObjects.GameObject>[],
|
90 |
-
board: Board, bejeweled: Bejeweled
|
91 |
-
) => void;
|
92 |
-
|
93 |
-
type EliminateCallbackType = (
|
94 |
-
chessArray: Phaser.GameObjects.GameObject[],
|
95 |
-
board: Board, bejeweled: Bejeweled
|
96 |
-
) => void;
|
97 |
-
|
98 |
-
type FallCallbackType = (board: Board, bejeweled: Bejeweled) => void;
|
99 |
-
|
100 |
-
type FillCallbackType = (board: Board, bejeweled: Bejeweled) => void;
|
101 |
-
|
102 |
-
type MatchEndCallbackType = (board: Board, bejeweled: Bejeweled) => void;
|
103 |
-
|
104 |
-
type UndoSwapCallbackType = (
|
105 |
-
selectedChess1: Phaser.GameObjects.GameObject,
|
106 |
-
selectedChess2: Phaser.GameObjects.GameObject,
|
107 |
-
board: Board, bejeweled: Bejeweled
|
108 |
-
) => void;
|
109 |
-
|
110 |
-
type SetDataCallback = (
|
111 |
-
bejeweled: Bejeweled,
|
112 |
-
key: string, value: any
|
113 |
-
) => void;
|
114 |
-
|
115 |
-
type ChangeetAnyDataCallback = (
|
116 |
-
bejeweled: Bejeweled,
|
117 |
-
key: string, value: any, previousValue: any
|
118 |
-
) => void;
|
119 |
-
|
120 |
-
type ChangeetDataCallback = (
|
121 |
-
bejeweled: Bejeweled,
|
122 |
-
value: any, previousValue: any
|
123 |
-
) => void;
|
124 |
-
}
|
125 |
-
}
|
126 |
-
|
127 |
-
declare class Bejeweled extends ComponentBase {
|
128 |
-
constructor(
|
129 |
-
scene: Phaser.Scene,
|
130 |
-
config?: Bejeweled.IConfig
|
131 |
-
);
|
132 |
-
|
133 |
-
start(): this;
|
134 |
-
|
135 |
-
setInputEnable(enable?: boolean): this;
|
136 |
-
|
137 |
-
worldXYToChess(
|
138 |
-
worldX: number,
|
139 |
-
worldY: number
|
140 |
-
): Phaser.GameObjects.GameObject;
|
141 |
-
|
142 |
-
tileXYToChess(
|
143 |
-
tileX: number,
|
144 |
-
tileY: number
|
145 |
-
): Phaser.GameObjects.GameObject;
|
146 |
-
|
147 |
-
getNeighborChessAtAngle(
|
148 |
-
chess: Phaser.GameObjects.GameObject | TileXYType,
|
149 |
-
angle: number
|
150 |
-
): Phaser.GameObjects.GameObject;
|
151 |
-
|
152 |
-
getNeighborChessAtDirection(
|
153 |
-
chess: Phaser.GameObjects.GameObject | TileXYType,
|
154 |
-
direction: number
|
155 |
-
): Phaser.GameObjects.GameObject;
|
156 |
-
|
157 |
-
selectChess1(
|
158 |
-
chess: Phaser.GameObjects.GameObject
|
159 |
-
): this;
|
160 |
-
getSelectedChess1(): Phaser.GameObjects.GameObject;
|
161 |
-
|
162 |
-
selectChess2(
|
163 |
-
chess: Phaser.GameObjects.GameObject
|
164 |
-
): this;
|
165 |
-
getSelectedChess2(): Phaser.GameObjects.GameObject;
|
166 |
-
|
167 |
-
getChessMoveTo(
|
168 |
-
chess: Phaser.GameObjects.GameObject
|
169 |
-
): MoveTo | undefined;
|
170 |
-
|
171 |
-
getChessTileZ(): number | string;
|
172 |
-
|
173 |
-
getBoard(): Board;
|
174 |
-
getMatch(): Match;
|
175 |
-
|
176 |
-
// Custom eliminateChess, falling action
|
177 |
-
waitEvent(
|
178 |
-
eventEmitter: Phaser.Events.EventEmitter,
|
179 |
-
eventName?: string
|
180 |
-
): this;
|
181 |
-
isWaitingEvent(): boolean;
|
182 |
-
|
183 |
-
// Data manager
|
184 |
-
setDataEnabled(): this;
|
185 |
-
setData(key: string, value: any): this;
|
186 |
-
incData(key: string, value: number): this;
|
187 |
-
toggleData(key: string): this;
|
188 |
-
getData(key: string): any;
|
189 |
-
data: Phaser.Data.DataManager;
|
190 |
-
|
191 |
-
|
192 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/grid/Grid.js
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
import Base from '../base/Base.js';
|
2 |
-
import { Circle } from '../utils/Geoms.js';
|
3 |
-
import Yoyo from '../utils/Yoyo.js';
|
4 |
-
|
5 |
-
|
6 |
-
const Linear = Phaser.Math.Linear;
|
7 |
-
const RowNum = 3;
|
8 |
-
const ColNum = 3;
|
9 |
-
|
10 |
-
class Grid extends Base {
|
11 |
-
constructor(scene, config) {
|
12 |
-
super(scene, config);
|
13 |
-
this.type = 'rexSpinnerGrid';
|
14 |
-
}
|
15 |
-
|
16 |
-
buildShapes() {
|
17 |
-
var cnt = RowNum * ColNum;
|
18 |
-
for (var i = 0; i < cnt; i++) {
|
19 |
-
var dot = new Circle();
|
20 |
-
this.addShape(dot);
|
21 |
-
|
22 |
-
dot.setData('offset', Math.random());
|
23 |
-
}
|
24 |
-
}
|
25 |
-
|
26 |
-
updateShapes() {
|
27 |
-
var centerX = this.centerX;
|
28 |
-
var centerY = this.centerY;
|
29 |
-
var radius = this.radius;
|
30 |
-
var isSizeChanged = this.isSizeChanged;
|
31 |
-
|
32 |
-
var leftBound = centerX - radius;
|
33 |
-
var topBound = centerY - radius;
|
34 |
-
var cellWidth = (radius * 2) / ColNum;
|
35 |
-
var cellHeight = (radius * 2) / RowNum;
|
36 |
-
var maxDotRadius = (Math.min(cellWidth, cellHeight) / 2) * 0.8;
|
37 |
-
|
38 |
-
|
39 |
-
var shapes = this.getShapes();
|
40 |
-
for (var i = 0, cnt = shapes.length; i < cnt; i++) {
|
41 |
-
var colIdx = (i % ColNum);
|
42 |
-
var rowIdx = Math.floor(i / RowNum);
|
43 |
-
var x = leftBound + cellWidth * (colIdx + 0.5);
|
44 |
-
var y = topBound + cellHeight * (rowIdx + 0.5);
|
45 |
-
|
46 |
-
var dot = shapes[i];
|
47 |
-
var t = (this.value + dot.getData('offset')) % 1;
|
48 |
-
t = Yoyo(t);
|
49 |
-
dot.fillStyle(this.color, Linear(0.25, 1, t));
|
50 |
-
|
51 |
-
if (isSizeChanged) {
|
52 |
-
dot
|
53 |
-
.setRadius(maxDotRadius)
|
54 |
-
.setCenterPosition(x, y)
|
55 |
-
}
|
56 |
-
}
|
57 |
-
}
|
58 |
-
}
|
59 |
-
|
60 |
-
export default Grid;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/index.d.ts
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import Maker from './Maker';
|
2 |
-
import Make from './Make';
|
3 |
-
import YAMLMake from './YAMLMake';
|
4 |
-
import Builders from './builders/Builders';
|
5 |
-
|
6 |
-
|
7 |
-
export {
|
8 |
-
Maker,
|
9 |
-
Make,
|
10 |
-
YAMLMake,
|
11 |
-
Builders,
|
12 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch/NinePatch.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import NinePatch from '../../../plugins/ninepatch';
|
2 |
-
export default NinePatch;
|
|
|
|
|
|
spaces/AiMimicry/sovits-models/modules/__init__.py
DELETED
File without changes
|
spaces/Amon1/ChatGPTForAcadamic/project_self_analysis.md
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
# chatgpt-academic项目自译解报告
|
2 |
-
(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄)
|
3 |
-
|
4 |
-
## [0/18] 程序摘要: functional_crazy.py
|
5 |
-
|
6 |
-
这是一个功能扩展的程序,文件名为 `functional_crazy.py`。代码的主要功能是通过提供一系列函数插件,增强程序的功能,让用户可以通过界面中的按钮,快速调用对应的函数插件实现相应的操作。代码中使用了 `HotReload` 函数插件,可以在不重启程序的情况下更新函数插件的代码,让其生效。同时,通过 `UserVisibleLevel` 变量的设置,可以控制哪些插件会在UI界面显示出来。函数插件列表包括了以下功能:解析项目本身、解析一个Python项目、解析一个C++项目头文件、解析一个C++项目、读取文章并生成摘要、批量生成函数注释、全项目切换成英文、批量总结PDF文档、批量总结PDF文档pdfminer、批量总结Word文档、高阶功能模板函数、以及其他未经充分测试的函数插件。
|
7 |
-
|
8 |
-
## [1/18] 程序摘要: main.py
|
9 |
-
|
10 |
-
该程序是一个基于Gradio构建的对话生成模型的Web界面示例,包含了以下主要功能:
|
11 |
-
|
12 |
-
1.加载模型并对用户输入进行响应;
|
13 |
-
2.通过调用外部函数库来获取用户的输入,并在模型生成的过程中进行处理;
|
14 |
-
3.支持用户上传本地文件,供外部函数库调用;
|
15 |
-
4.支持停止当前的生成过程;
|
16 |
-
5.保存用户的历史记录,并将其记录在本地日志文件中,以供后续分析和使用。
|
17 |
-
|
18 |
-
该程序需要依赖于一些外部库和软件包,如Gradio、torch等。用户需要确保这些依赖项已经安装,并且在运行该程序前对config_private.py配置文件进行相应的修改。
|
19 |
-
|
20 |
-
## [2/18] 程序摘要: functional.py
|
21 |
-
|
22 |
-
该文件定义了一个名为“functional”的函数,函数的作用是返回一个包含多个字典(键值对)的字典,每个键值对表示一种功能。该字典的键值由功能名称和对应的数据组成。其中的每个字典都包含4个键值对,分别为“Prefix”、“Suffix”、“Color”和“PreProcess”,分别表示前缀、后缀、按钮颜色和预处理函数。如果某些键值对没有给出,那么程序中默认相应的值,如按钮颜色默认为“secondary”等。每个功能描述了不同的学术润色/翻译/其他服务,如“英语学术润色”、“中文学术润色”、“查找语法错误”等。函数还引用了一个名为“clear_line_break”的函数,用于预处理修改前的文本。
|
23 |
-
|
24 |
-
## [3/18] 程序摘要: show_math.py
|
25 |
-
|
26 |
-
该程序文件名为show_math.py,主要用途是将Markdown和LaTeX混合格式转换成带有MathML的HTML格式。该程序通过递归地处理LaTeX和Markdown混合段落逐一转换成HTML/MathML标记出来,并在LaTeX公式创建中进行错误处理。在程序文件中定义了3个变量,分别是incomplete,convError和convert,其中convert函数是用来执行转换的主要函数。程序使用正则表达式进行LaTeX格式和Markdown段落的分割,从而实现转换。如果在Latex转换过程中发生错误,程序将输出相应的错误信息。
|
27 |
-
|
28 |
-
## [4/18] 程序摘要: predict.py
|
29 |
-
|
30 |
-
本程序文件的文件名为"./predict.py",主要包含三个函数:
|
31 |
-
|
32 |
-
1. predict:正常对话时使用,具备完备的交互功能,不可多线程;
|
33 |
-
2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑;
|
34 |
-
3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程。
|
35 |
-
|
36 |
-
其中,predict函数用于基础的对话功能,发送至chatGPT,流式获取输出,根据点击的哪个按钮,进行对话预处理等额外操作;predict_no_ui函数用于payload比较大的情况,或者用于实现多线、带嵌套的复杂功能;predict_no_ui_long_connection实现调用predict_no_ui处理长文档时,避免连接断掉的情况,支持多线程。
|
37 |
-
|
38 |
-
## [5/18] 程序摘要: check_proxy.py
|
39 |
-
|
40 |
-
该程序文件名为check_proxy.py,主要功能是检查代理服务器的可用性并返回代理服务器的地理位置信息或错误提示。具体实现方式如下:
|
41 |
-
|
42 |
-
首先使用requests模块向指定网站(https://ipapi.co/json/)发送GET请求,请求结果以JSON格式返回。如果代理服务器参数(proxies)是有效的且没有指明'https'代理,则用默认字典值'无'替代。
|
43 |
-
|
44 |
-
然后,程序会解析返回的JSON数据,并根据数据中是否包含国家名字字段来判断代理服务器的地理位置。如果有国家名字字段,则将其打印出来并返回代理服务器的相关信息。如果没有国家名字字段,但有错误信息字段,则返回其他错误提示信息。
|
45 |
-
|
46 |
-
在程序执行前,程序会先设置环境变量no_proxy,并使用toolbox模块中的get_conf函数从配置文件中读取代理参数。
|
47 |
-
|
48 |
-
最后,检测程序会输出检查结果并返回对应的结果字符串。
|
49 |
-
|
50 |
-
## [6/18] 程序摘要: config_private.py
|
51 |
-
|
52 |
-
本程序文件名为`config_private.py`,其功能为配置私有信息以便在主程序中使用。主要功能包括:
|
53 |
-
|
54 |
-
- 配置OpenAI API的密钥和API URL
|
55 |
-
- 配置是否使用代理,如果使用代理配置代理地址和端口
|
56 |
-
- 配置发送请求的超时时间和失败重试次数的限制
|
57 |
-
- 配置并行使用线程数和用户名密码
|
58 |
-
- 提供检查功能以确保API密钥已经正确设置
|
59 |
-
|
60 |
-
其中,需要特别注意的是:最后一个检查功能要求在运行之前必须将API密钥正确设置,否则程序会直接退出。
|
61 |
-
|
62 |
-
## [7/18] 程序摘要: config.py
|
63 |
-
|
64 |
-
该程序文件是一个配置文件,用于配置OpenAI的API参数和优化体验的相关参数,具体包括以下几个步骤:
|
65 |
-
|
66 |
-
1.设置OpenAI的API密钥。
|
67 |
-
|
68 |
-
2.选择是否使用代理,如果使用则需要设置代理地址和端口等参数。
|
69 |
-
|
70 |
-
3.设置请求OpenAI后的超时时间、网页的端口、重试次数、选择的OpenAI模型、API的网址等。
|
71 |
-
|
72 |
-
4.设置并行使用的线程数和用户名密码。
|
73 |
-
|
74 |
-
该程序文件的作用为在使用OpenAI API时进行相关参数的配置,以保证请求的正确性和速度,并且优化使用体验。
|
75 |
-
|
76 |
-
## [8/18] 程序摘要: theme.py
|
77 |
-
|
78 |
-
该程序是一个自定义Gradio主题的Python模块。主题文件名为"./theme.py"。程序引入了Gradio模块,并定义了一个名为"adjust_theme()"的函数。该函数根据输入值调整Gradio的默认主题,返回一个包含所需自定义属性的主题对象。主题属性包括颜色、字体、过渡、阴影、按钮边框和渐变等。主题颜色列表包括石板色、灰色、锌色、中性色、石头色、红色、橙色、琥珀色、黄色、酸橙色、绿色、祖母绿、青蓝色、青色、天蓝色、蓝色、靛蓝色、紫罗兰色、紫色、洋红色、粉红色和玫瑰色。如果Gradio版本较旧,则不能自定义字体和颜色。
|
79 |
-
|
80 |
-
## [9/18] 程序摘要: toolbox.py
|
81 |
-
|
82 |
-
该程序文件包含了一系列函数,用于实现聊天程序所需的各种功能,如预测对话、将对话记录写入文件、将普通文本转换为Markdown格式文本、装饰器函数CatchException和HotReload等。其中一些函数用到了第三方库,如Python-Markdown、mdtex2html、zipfile、tarfile、rarfile和py7zr。除此之外,还有一些辅助函数,如get_conf、clear_line_break和extract_archive等。主要功能包括:
|
83 |
-
|
84 |
-
1. 导入markdown、mdtex2html、threading、functools等模块。
|
85 |
-
2. 定义函数predict_no_ui_but_counting_down,用于生成对话。
|
86 |
-
3. 定义函数write_results_to_file,用于将对话记录生成Markdown文件。
|
87 |
-
4. 定义函数regular_txt_to_markdown,将普通文本转换为Markdown格式的文本。
|
88 |
-
5. 定义装饰器函数CatchException,用于捕获函数执行异常并返回生成器。
|
89 |
-
6. 定义函数report_execption,用于向chatbot中添加错误信息。
|
90 |
-
7. 定义函数text_divide_paragraph,用于将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
91 |
-
8. 定义函数markdown_convertion,用于将Markdown格式的文本转换为HTML格式。
|
92 |
-
9. 定义函数format_io,用于将输入和输出解析为HTML格式。
|
93 |
-
10. 定义函数find_free_port,用于返回当前系统中可用的未使用端口。
|
94 |
-
11. 定义函数extract_archive,用于解压归档文件。
|
95 |
-
12. 定义函数find_recent_files,用于查找最近创建的文件。
|
96 |
-
13. 定义函数on_file_uploaded,用于处理上传文件的操作。
|
97 |
-
14. 定义函数on_report_generated,用于处理生成报告文件的操作。
|
98 |
-
|
99 |
-
|
100 |
-
## [10/18] 程序摘要: crazy_functions/生成函数注释.py
|
101 |
-
|
102 |
-
该程序文件是一个Python脚本,文件名为“生成函数注释.py”,位于“./crazy_functions/”目录下。该程序实现了一个批量生成函数注释的功能,可以对指定文件夹下的所有Python和C++源代码文件中的所有函数进行注释,使用Markdown表格输出注释结果。
|
103 |
-
|
104 |
-
该程序引用了predict.py和toolbox.py两个模块,其中predict.py实现了一个基于GPT模型的文本生成功能,用于生成函数注释,而toolbox.py实现了一些工具函数,包括异常处理函数、文本写入函数等。另外,该程序还定义了两个函数,一个是“生成函数注释”函数,用于处理单个文件的注释生成;另一个是“批量生成函数注释”函数,用于批量处理多个文件的注释生成。
|
105 |
-
|
106 |
-
## [11/18] 程序摘要: crazy_functions/读文章写摘要.py
|
107 |
-
|
108 |
-
这个程序文件是一个名为“读文章写摘要”的函数。该函数的输入包括文章的文本内容、top_p(生成文本时选择最可能的词语的概率阈值)、temperature(控制生成文本的随机性的因子)、对话历史等参数,以及一个聊天机器人和一个系统提示的文本。该函数的主要工作是解析一组.tex文件,���后生成一段学术性语言的中文和英文摘要。在解析过程中,该函数使用一个名为“toolbox”的模块中的辅助函数和一个名为“predict”的模块中的函数来执行GPT-2模型的推理工作,然后将结果返回给聊天机器人。另外,该程序还包括一个名为“fast_debug”的bool型变量,用于调试和测试。
|
109 |
-
|
110 |
-
## [12/18] 程序摘要: crazy_functions/代码重写为全英文_多线程.py
|
111 |
-
|
112 |
-
该程序文件实现了一个多线程操作,用于将指定目录下的所有 Python 文件中的中文转化为英文,并将转化后的文件存入另一个目录中。具体实现过程如下:
|
113 |
-
|
114 |
-
1. 集合目标文件路径并清空历史记录。
|
115 |
-
2. 循环目标文件,对每个文件启动一个线程进行任务操作。
|
116 |
-
3. 各个线程同时开始执行任务函数,并在任务完成后将转化后的文件写入指定目录,最终生成一份任务执行报告。
|
117 |
-
|
118 |
-
## [13/18] 程序摘要: crazy_functions/高级功能函数模板.py
|
119 |
-
|
120 |
-
该程序文件名为高级功能函数模板.py,它包含了一个名为“高阶功能模板函数”的函数,这个函数可以作为开发新功能函数的模板。该函数引用了predict.py和toolbox.py文件中的函数。在该函数内部,它首先清空了历史记录,然后对于今天和今天以后的四天,它问用户历史中哪些事件发生在这些日期,并列举两条事件并发送相关的图片。在向用户询问问题时,使用了GPT进行响应。由于请求GPT需要一定的时间,所以函数会在重新显示状态之前等待一段时间。在每次与用户的互动中,使用yield关键字生成器函数来输出聊天机器人的当前状态,包括聊天消息、历史记录和状态('正常')。最后,程序调用write_results_to_file函数将聊天的结果写入文件,以供后续的评估和分析。
|
121 |
-
|
122 |
-
## [14/18] 程序摘要: crazy_functions/总结word文档.py
|
123 |
-
|
124 |
-
该程序文件名为总结word文档.py,主要功能是批量总结Word文档。具体实现过程是解析docx格式和doc格式文件,生成文件内容,然后使用自然语言处理工具对文章内容做中英文概述,最后给出建议。该程序需要依赖python-docx和pywin32,如果没有安装,会给出安装建议。
|
125 |
-
|
126 |
-
## [15/18] 程序摘要: crazy_functions/批量总结PDF文档pdfminer.py
|
127 |
-
|
128 |
-
该程序文件名为pdfminer.py,位于./crazy_functions/目录下。程序实现了批量读取PDF文件,并使用pdfminer解析PDF文件内容。此外,程序还根据解析得到的文本内容,调用机器学习模型生成对每篇文章的概述,最终生成全文摘要。程序中还对模块依赖进行了导入检查,若缺少依赖,则会提供安装建议。
|
129 |
-
|
130 |
-
## [16/18] 程序摘要: crazy_functions/解析项目源代码.py
|
131 |
-
|
132 |
-
这个程序文件中包含了几个函数,分别是:
|
133 |
-
|
134 |
-
1. `解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)`:通过输入文件路径列表对程序文件进行逐文件分析,根据分析结果做出整体功能和构架的概括,并生成包括每个文件功能的markdown表格。
|
135 |
-
2. `解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对当前文件夹下的所有Python文件及其子文件夹进行逐文件分析,并生成markdown表格。
|
136 |
-
3. `解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有Python文件及其子文件夹进行逐文件分析,并生成markdown表格。
|
137 |
-
4. `解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有头文件进行逐文件分析,并生成markdown表格。
|
138 |
-
5. `解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有.h、.cpp、.c文件及其子文件夹进行逐文件分析,并生成markdown表格。
|
139 |
-
|
140 |
-
程序中还包含了一些辅助函数和变量,如CatchException装饰器函数,report_execption函数、write_results_to_file函数等。在执行过程中还会调用其他模块中的函数,如toolbox模块的函数和predict模块的函数。
|
141 |
-
|
142 |
-
## [17/18] 程序摘要: crazy_functions/批量总结PDF文档.py
|
143 |
-
|
144 |
-
这个程序文件是一个名为“批量总结PDF文档”的函数插件。它导入了predict和toolbox模块,并定义了一些函数,包括is_paragraph_break,normalize_text和clean_text。这些函数是对输入文本进行预处理和清洗的功能函数。主要的功能函数是解析PDF,它打开每个PDF文件并将其内容存储在file_content变量中,然后传递给聊天机器人,以产生一句话的概括。在解析PDF文件之后,该函数连接了所有文件的摘要,以产生一段学术语言和英文摘要。最后,函数批量处理目标文件夹中的所有PDF文件,并输出结果。
|
145 |
-
|
146 |
-
## 根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能。
|
147 |
-
|
148 |
-
该程序是一个聊天机器人,使用了OpenAI的GPT语言模型以及一些特殊的辅助功能去处理各种学术写作和科研润色任务。整个程序由一些函数组成,每个函数都代表了不同的学术润色/翻译/其他服务。
|
149 |
-
|
150 |
-
下面是程序中每个文件的功能列表:
|
151 |
-
|
152 |
-
| 文件名 | 功能 |
|
153 |
-
|--------|--------|
|
154 |
-
| functional_crazy.py | 实现高级功能函数模板和其他一些辅助功能函数 |
|
155 |
-
| main.py | 程序的主要入口,负责程序的启动和UI的展示 |
|
156 |
-
| functional.py | 定义各种功能按钮的颜色和响应函数 |
|
157 |
-
| show_math.py | 解析LaTeX文本,将其转换为Markdown格式 |
|
158 |
-
| predict.py | 基础的对话功能,用于与chatGPT进行交互 |
|
159 |
-
| check_proxy.py | 检查代理设置的正确性 |
|
160 |
-
| config_private.py | 配置程序的API密钥和其他私有信息 |
|
161 |
-
| config.py | 配置OpenAI的API参数和程序的其他属性 |
|
162 |
-
| theme.py | 设置程序主题样式 |
|
163 |
-
| toolbox.py | 存放一些辅助函数供程序使用 |
|
164 |
-
| crazy_functions/生成函数注释.py | 生成Python文件中所有函数的注释 |
|
165 |
-
| crazy_functions/读文章写摘要.py | 解析文章文本,生成中英文摘要 |
|
166 |
-
| crazy_functions/代码重写为全英文_多线程.py | 将中文代码内容转化为英文 |
|
167 |
-
| crazy_functions/高级功能函数模板.py | 实现高级功能函数模板 |
|
168 |
-
| crazy_functions/总结word文档.py | 解析Word文件,生成文章内容的概要 |
|
169 |
-
| crazy_functions/批量总结PDF文档pdfminer.py | 解析PDF文件,生成文章内容的概要(使用pdfminer库) |
|
170 |
-
| crazy_functions/批量总结PDF文档.py | 解析PDF文件,生成文章内容的概要(使用PyMuPDF库) |
|
171 |
-
| crazy_functions/解析项目源代码.py | 解析C/C++源代码,生成markdown表格 |
|
172 |
-
| crazy_functions/批量总结PDF文档.py | 对PDF文件进行批量摘要生成 |
|
173 |
-
|
174 |
-
总的来说,该程序提供了一系列的学术润色和翻译的工具,支持对各种类型的文件进行分析和处理。同时也提供了对话式用户界面,便于用户使用和交互。
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/train_dreambooth_lora.py
DELETED
@@ -1,1418 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# coding=utf-8
|
3 |
-
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
|
16 |
-
import argparse
|
17 |
-
import gc
|
18 |
-
import hashlib
|
19 |
-
import itertools
|
20 |
-
import logging
|
21 |
-
import math
|
22 |
-
import os
|
23 |
-
import shutil
|
24 |
-
import warnings
|
25 |
-
from pathlib import Path
|
26 |
-
from typing import Dict
|
27 |
-
|
28 |
-
import numpy as np
|
29 |
-
import torch
|
30 |
-
import torch.nn.functional as F
|
31 |
-
import torch.utils.checkpoint
|
32 |
-
import transformers
|
33 |
-
from accelerate import Accelerator
|
34 |
-
from accelerate.logging import get_logger
|
35 |
-
from accelerate.utils import ProjectConfiguration, set_seed
|
36 |
-
from huggingface_hub import create_repo, upload_folder
|
37 |
-
from packaging import version
|
38 |
-
from PIL import Image
|
39 |
-
from PIL.ImageOps import exif_transpose
|
40 |
-
from torch.utils.data import Dataset
|
41 |
-
from torchvision import transforms
|
42 |
-
from tqdm.auto import tqdm
|
43 |
-
from transformers import AutoTokenizer, PretrainedConfig
|
44 |
-
|
45 |
-
import diffusers
|
46 |
-
from diffusers import (
|
47 |
-
AutoencoderKL,
|
48 |
-
DDPMScheduler,
|
49 |
-
DiffusionPipeline,
|
50 |
-
DPMSolverMultistepScheduler,
|
51 |
-
StableDiffusionPipeline,
|
52 |
-
UNet2DConditionModel,
|
53 |
-
)
|
54 |
-
from diffusers.loaders import (
|
55 |
-
LoraLoaderMixin,
|
56 |
-
text_encoder_lora_state_dict,
|
57 |
-
)
|
58 |
-
from diffusers.models.attention_processor import (
|
59 |
-
AttnAddedKVProcessor,
|
60 |
-
AttnAddedKVProcessor2_0,
|
61 |
-
LoRAAttnAddedKVProcessor,
|
62 |
-
LoRAAttnProcessor,
|
63 |
-
LoRAAttnProcessor2_0,
|
64 |
-
SlicedAttnAddedKVProcessor,
|
65 |
-
)
|
66 |
-
from diffusers.optimization import get_scheduler
|
67 |
-
from diffusers.utils import check_min_version, is_wandb_available
|
68 |
-
from diffusers.utils.import_utils import is_xformers_available
|
69 |
-
|
70 |
-
|
71 |
-
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
72 |
-
check_min_version("0.19.0")
|
73 |
-
|
74 |
-
logger = get_logger(__name__)
|
75 |
-
|
76 |
-
|
77 |
-
def save_model_card(
|
78 |
-
repo_id: str,
|
79 |
-
images=None,
|
80 |
-
base_model=str,
|
81 |
-
train_text_encoder=False,
|
82 |
-
prompt=str,
|
83 |
-
repo_folder=None,
|
84 |
-
pipeline: DiffusionPipeline = None,
|
85 |
-
):
|
86 |
-
img_str = ""
|
87 |
-
for i, image in enumerate(images):
|
88 |
-
image.save(os.path.join(repo_folder, f"image_{i}.png"))
|
89 |
-
img_str += f"\n"
|
90 |
-
|
91 |
-
yaml = f"""
|
92 |
-
---
|
93 |
-
license: creativeml-openrail-m
|
94 |
-
base_model: {base_model}
|
95 |
-
instance_prompt: {prompt}
|
96 |
-
tags:
|
97 |
-
- {'stable-diffusion' if isinstance(pipeline, StableDiffusionPipeline) else 'if'}
|
98 |
-
- {'stable-diffusion-diffusers' if isinstance(pipeline, StableDiffusionPipeline) else 'if-diffusers'}
|
99 |
-
- text-to-image
|
100 |
-
- diffusers
|
101 |
-
- lora
|
102 |
-
inference: true
|
103 |
-
---
|
104 |
-
"""
|
105 |
-
model_card = f"""
|
106 |
-
# LoRA DreamBooth - {repo_id}
|
107 |
-
|
108 |
-
These are LoRA adaption weights for {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. \n
|
109 |
-
{img_str}
|
110 |
-
|
111 |
-
LoRA for the text encoder was enabled: {train_text_encoder}.
|
112 |
-
"""
|
113 |
-
with open(os.path.join(repo_folder, "README.md"), "w") as f:
|
114 |
-
f.write(yaml + model_card)
|
115 |
-
|
116 |
-
|
117 |
-
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
|
118 |
-
text_encoder_config = PretrainedConfig.from_pretrained(
|
119 |
-
pretrained_model_name_or_path,
|
120 |
-
subfolder="text_encoder",
|
121 |
-
revision=revision,
|
122 |
-
)
|
123 |
-
model_class = text_encoder_config.architectures[0]
|
124 |
-
|
125 |
-
if model_class == "CLIPTextModel":
|
126 |
-
from transformers import CLIPTextModel
|
127 |
-
|
128 |
-
return CLIPTextModel
|
129 |
-
elif model_class == "RobertaSeriesModelWithTransformation":
|
130 |
-
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
|
131 |
-
|
132 |
-
return RobertaSeriesModelWithTransformation
|
133 |
-
elif model_class == "T5EncoderModel":
|
134 |
-
from transformers import T5EncoderModel
|
135 |
-
|
136 |
-
return T5EncoderModel
|
137 |
-
else:
|
138 |
-
raise ValueError(f"{model_class} is not supported.")
|
139 |
-
|
140 |
-
|
141 |
-
def parse_args(input_args=None):
|
142 |
-
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
143 |
-
parser.add_argument(
|
144 |
-
"--pretrained_model_name_or_path",
|
145 |
-
type=str,
|
146 |
-
default=None,
|
147 |
-
required=True,
|
148 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
149 |
-
)
|
150 |
-
parser.add_argument(
|
151 |
-
"--revision",
|
152 |
-
type=str,
|
153 |
-
default=None,
|
154 |
-
required=False,
|
155 |
-
help="Revision of pretrained model identifier from huggingface.co/models.",
|
156 |
-
)
|
157 |
-
parser.add_argument(
|
158 |
-
"--tokenizer_name",
|
159 |
-
type=str,
|
160 |
-
default=None,
|
161 |
-
help="Pretrained tokenizer name or path if not the same as model_name",
|
162 |
-
)
|
163 |
-
parser.add_argument(
|
164 |
-
"--instance_data_dir",
|
165 |
-
type=str,
|
166 |
-
default=None,
|
167 |
-
required=True,
|
168 |
-
help="A folder containing the training data of instance images.",
|
169 |
-
)
|
170 |
-
parser.add_argument(
|
171 |
-
"--class_data_dir",
|
172 |
-
type=str,
|
173 |
-
default=None,
|
174 |
-
required=False,
|
175 |
-
help="A folder containing the training data of class images.",
|
176 |
-
)
|
177 |
-
parser.add_argument(
|
178 |
-
"--instance_prompt",
|
179 |
-
type=str,
|
180 |
-
default=None,
|
181 |
-
required=True,
|
182 |
-
help="The prompt with identifier specifying the instance",
|
183 |
-
)
|
184 |
-
parser.add_argument(
|
185 |
-
"--class_prompt",
|
186 |
-
type=str,
|
187 |
-
default=None,
|
188 |
-
help="The prompt to specify images in the same class as provided instance images.",
|
189 |
-
)
|
190 |
-
parser.add_argument(
|
191 |
-
"--validation_prompt",
|
192 |
-
type=str,
|
193 |
-
default=None,
|
194 |
-
help="A prompt that is used during validation to verify that the model is learning.",
|
195 |
-
)
|
196 |
-
parser.add_argument(
|
197 |
-
"--num_validation_images",
|
198 |
-
type=int,
|
199 |
-
default=4,
|
200 |
-
help="Number of images that should be generated during validation with `validation_prompt`.",
|
201 |
-
)
|
202 |
-
parser.add_argument(
|
203 |
-
"--validation_epochs",
|
204 |
-
type=int,
|
205 |
-
default=50,
|
206 |
-
help=(
|
207 |
-
"Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
|
208 |
-
" `args.validation_prompt` multiple times: `args.num_validation_images`."
|
209 |
-
),
|
210 |
-
)
|
211 |
-
parser.add_argument(
|
212 |
-
"--with_prior_preservation",
|
213 |
-
default=False,
|
214 |
-
action="store_true",
|
215 |
-
help="Flag to add prior preservation loss.",
|
216 |
-
)
|
217 |
-
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
|
218 |
-
parser.add_argument(
|
219 |
-
"--num_class_images",
|
220 |
-
type=int,
|
221 |
-
default=100,
|
222 |
-
help=(
|
223 |
-
"Minimal class images for prior preservation loss. If there are not enough images already present in"
|
224 |
-
" class_data_dir, additional images will be sampled with class_prompt."
|
225 |
-
),
|
226 |
-
)
|
227 |
-
parser.add_argument(
|
228 |
-
"--output_dir",
|
229 |
-
type=str,
|
230 |
-
default="lora-dreambooth-model",
|
231 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
232 |
-
)
|
233 |
-
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
234 |
-
parser.add_argument(
|
235 |
-
"--resolution",
|
236 |
-
type=int,
|
237 |
-
default=512,
|
238 |
-
help=(
|
239 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
240 |
-
" resolution"
|
241 |
-
),
|
242 |
-
)
|
243 |
-
parser.add_argument(
|
244 |
-
"--center_crop",
|
245 |
-
default=False,
|
246 |
-
action="store_true",
|
247 |
-
help=(
|
248 |
-
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
|
249 |
-
" cropped. The images will be resized to the resolution first before cropping."
|
250 |
-
),
|
251 |
-
)
|
252 |
-
parser.add_argument(
|
253 |
-
"--train_text_encoder",
|
254 |
-
action="store_true",
|
255 |
-
help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
|
256 |
-
)
|
257 |
-
parser.add_argument(
|
258 |
-
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
|
259 |
-
)
|
260 |
-
parser.add_argument(
|
261 |
-
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
|
262 |
-
)
|
263 |
-
parser.add_argument("--num_train_epochs", type=int, default=1)
|
264 |
-
parser.add_argument(
|
265 |
-
"--max_train_steps",
|
266 |
-
type=int,
|
267 |
-
default=None,
|
268 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
269 |
-
)
|
270 |
-
parser.add_argument(
|
271 |
-
"--checkpointing_steps",
|
272 |
-
type=int,
|
273 |
-
default=500,
|
274 |
-
help=(
|
275 |
-
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
|
276 |
-
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
|
277 |
-
" training using `--resume_from_checkpoint`."
|
278 |
-
),
|
279 |
-
)
|
280 |
-
parser.add_argument(
|
281 |
-
"--checkpoints_total_limit",
|
282 |
-
type=int,
|
283 |
-
default=None,
|
284 |
-
help=("Max number of checkpoints to store."),
|
285 |
-
)
|
286 |
-
parser.add_argument(
|
287 |
-
"--resume_from_checkpoint",
|
288 |
-
type=str,
|
289 |
-
default=None,
|
290 |
-
help=(
|
291 |
-
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
|
292 |
-
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
|
293 |
-
),
|
294 |
-
)
|
295 |
-
parser.add_argument(
|
296 |
-
"--gradient_accumulation_steps",
|
297 |
-
type=int,
|
298 |
-
default=1,
|
299 |
-
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
300 |
-
)
|
301 |
-
parser.add_argument(
|
302 |
-
"--gradient_checkpointing",
|
303 |
-
action="store_true",
|
304 |
-
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
|
305 |
-
)
|
306 |
-
parser.add_argument(
|
307 |
-
"--learning_rate",
|
308 |
-
type=float,
|
309 |
-
default=5e-4,
|
310 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
311 |
-
)
|
312 |
-
parser.add_argument(
|
313 |
-
"--scale_lr",
|
314 |
-
action="store_true",
|
315 |
-
default=False,
|
316 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
317 |
-
)
|
318 |
-
parser.add_argument(
|
319 |
-
"--lr_scheduler",
|
320 |
-
type=str,
|
321 |
-
default="constant",
|
322 |
-
help=(
|
323 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
324 |
-
' "constant", "constant_with_warmup"]'
|
325 |
-
),
|
326 |
-
)
|
327 |
-
parser.add_argument(
|
328 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
329 |
-
)
|
330 |
-
parser.add_argument(
|
331 |
-
"--lr_num_cycles",
|
332 |
-
type=int,
|
333 |
-
default=1,
|
334 |
-
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
|
335 |
-
)
|
336 |
-
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
|
337 |
-
parser.add_argument(
|
338 |
-
"--dataloader_num_workers",
|
339 |
-
type=int,
|
340 |
-
default=0,
|
341 |
-
help=(
|
342 |
-
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
|
343 |
-
),
|
344 |
-
)
|
345 |
-
parser.add_argument(
|
346 |
-
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
|
347 |
-
)
|
348 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
349 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
350 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
351 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
352 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
353 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
354 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
355 |
-
parser.add_argument(
|
356 |
-
"--hub_model_id",
|
357 |
-
type=str,
|
358 |
-
default=None,
|
359 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
360 |
-
)
|
361 |
-
parser.add_argument(
|
362 |
-
"--logging_dir",
|
363 |
-
type=str,
|
364 |
-
default="logs",
|
365 |
-
help=(
|
366 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
367 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
368 |
-
),
|
369 |
-
)
|
370 |
-
parser.add_argument(
|
371 |
-
"--allow_tf32",
|
372 |
-
action="store_true",
|
373 |
-
help=(
|
374 |
-
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
|
375 |
-
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
|
376 |
-
),
|
377 |
-
)
|
378 |
-
parser.add_argument(
|
379 |
-
"--report_to",
|
380 |
-
type=str,
|
381 |
-
default="tensorboard",
|
382 |
-
help=(
|
383 |
-
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
|
384 |
-
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
|
385 |
-
),
|
386 |
-
)
|
387 |
-
parser.add_argument(
|
388 |
-
"--mixed_precision",
|
389 |
-
type=str,
|
390 |
-
default=None,
|
391 |
-
choices=["no", "fp16", "bf16"],
|
392 |
-
help=(
|
393 |
-
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
394 |
-
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
|
395 |
-
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
|
396 |
-
),
|
397 |
-
)
|
398 |
-
parser.add_argument(
|
399 |
-
"--prior_generation_precision",
|
400 |
-
type=str,
|
401 |
-
default=None,
|
402 |
-
choices=["no", "fp32", "fp16", "bf16"],
|
403 |
-
help=(
|
404 |
-
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
405 |
-
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
|
406 |
-
),
|
407 |
-
)
|
408 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
409 |
-
parser.add_argument(
|
410 |
-
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
|
411 |
-
)
|
412 |
-
parser.add_argument(
|
413 |
-
"--pre_compute_text_embeddings",
|
414 |
-
action="store_true",
|
415 |
-
help="Whether or not to pre-compute text embeddings. If text embeddings are pre-computed, the text encoder will not be kept in memory during training and will leave more GPU memory available for training the rest of the model. This is not compatible with `--train_text_encoder`.",
|
416 |
-
)
|
417 |
-
parser.add_argument(
|
418 |
-
"--tokenizer_max_length",
|
419 |
-
type=int,
|
420 |
-
default=None,
|
421 |
-
required=False,
|
422 |
-
help="The maximum length of the tokenizer. If not set, will default to the tokenizer's max length.",
|
423 |
-
)
|
424 |
-
parser.add_argument(
|
425 |
-
"--text_encoder_use_attention_mask",
|
426 |
-
action="store_true",
|
427 |
-
required=False,
|
428 |
-
help="Whether to use attention mask for the text encoder",
|
429 |
-
)
|
430 |
-
parser.add_argument(
|
431 |
-
"--validation_images",
|
432 |
-
required=False,
|
433 |
-
default=None,
|
434 |
-
nargs="+",
|
435 |
-
help="Optional set of images to use for validation. Used when the target pipeline takes an initial image as input such as when training image variation or superresolution.",
|
436 |
-
)
|
437 |
-
parser.add_argument(
|
438 |
-
"--class_labels_conditioning",
|
439 |
-
required=False,
|
440 |
-
default=None,
|
441 |
-
help="The optional `class_label` conditioning to pass to the unet, available values are `timesteps`.",
|
442 |
-
)
|
443 |
-
parser.add_argument(
|
444 |
-
"--rank",
|
445 |
-
type=int,
|
446 |
-
default=4,
|
447 |
-
help=("The dimension of the LoRA update matrices."),
|
448 |
-
)
|
449 |
-
|
450 |
-
if input_args is not None:
|
451 |
-
args = parser.parse_args(input_args)
|
452 |
-
else:
|
453 |
-
args = parser.parse_args()
|
454 |
-
|
455 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
456 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
457 |
-
args.local_rank = env_local_rank
|
458 |
-
|
459 |
-
if args.with_prior_preservation:
|
460 |
-
if args.class_data_dir is None:
|
461 |
-
raise ValueError("You must specify a data directory for class images.")
|
462 |
-
if args.class_prompt is None:
|
463 |
-
raise ValueError("You must specify prompt for class images.")
|
464 |
-
else:
|
465 |
-
# logger is not available yet
|
466 |
-
if args.class_data_dir is not None:
|
467 |
-
warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
|
468 |
-
if args.class_prompt is not None:
|
469 |
-
warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
|
470 |
-
|
471 |
-
if args.train_text_encoder and args.pre_compute_text_embeddings:
|
472 |
-
raise ValueError("`--train_text_encoder` cannot be used with `--pre_compute_text_embeddings`")
|
473 |
-
|
474 |
-
return args
|
475 |
-
|
476 |
-
|
477 |
-
class DreamBoothDataset(Dataset):
|
478 |
-
"""
|
479 |
-
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
|
480 |
-
It pre-processes the images and the tokenizes prompts.
|
481 |
-
"""
|
482 |
-
|
483 |
-
def __init__(
|
484 |
-
self,
|
485 |
-
instance_data_root,
|
486 |
-
instance_prompt,
|
487 |
-
tokenizer,
|
488 |
-
class_data_root=None,
|
489 |
-
class_prompt=None,
|
490 |
-
class_num=None,
|
491 |
-
size=512,
|
492 |
-
center_crop=False,
|
493 |
-
encoder_hidden_states=None,
|
494 |
-
instance_prompt_encoder_hidden_states=None,
|
495 |
-
tokenizer_max_length=None,
|
496 |
-
):
|
497 |
-
self.size = size
|
498 |
-
self.center_crop = center_crop
|
499 |
-
self.tokenizer = tokenizer
|
500 |
-
self.encoder_hidden_states = encoder_hidden_states
|
501 |
-
self.instance_prompt_encoder_hidden_states = instance_prompt_encoder_hidden_states
|
502 |
-
self.tokenizer_max_length = tokenizer_max_length
|
503 |
-
|
504 |
-
self.instance_data_root = Path(instance_data_root)
|
505 |
-
if not self.instance_data_root.exists():
|
506 |
-
raise ValueError("Instance images root doesn't exists.")
|
507 |
-
|
508 |
-
self.instance_images_path = list(Path(instance_data_root).iterdir())
|
509 |
-
self.num_instance_images = len(self.instance_images_path)
|
510 |
-
self.instance_prompt = instance_prompt
|
511 |
-
self._length = self.num_instance_images
|
512 |
-
|
513 |
-
if class_data_root is not None:
|
514 |
-
self.class_data_root = Path(class_data_root)
|
515 |
-
self.class_data_root.mkdir(parents=True, exist_ok=True)
|
516 |
-
self.class_images_path = list(self.class_data_root.iterdir())
|
517 |
-
if class_num is not None:
|
518 |
-
self.num_class_images = min(len(self.class_images_path), class_num)
|
519 |
-
else:
|
520 |
-
self.num_class_images = len(self.class_images_path)
|
521 |
-
self._length = max(self.num_class_images, self.num_instance_images)
|
522 |
-
self.class_prompt = class_prompt
|
523 |
-
else:
|
524 |
-
self.class_data_root = None
|
525 |
-
|
526 |
-
self.image_transforms = transforms.Compose(
|
527 |
-
[
|
528 |
-
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
|
529 |
-
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
|
530 |
-
transforms.ToTensor(),
|
531 |
-
transforms.Normalize([0.5], [0.5]),
|
532 |
-
]
|
533 |
-
)
|
534 |
-
|
535 |
-
def __len__(self):
|
536 |
-
return self._length
|
537 |
-
|
538 |
-
def __getitem__(self, index):
|
539 |
-
example = {}
|
540 |
-
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
|
541 |
-
instance_image = exif_transpose(instance_image)
|
542 |
-
|
543 |
-
if not instance_image.mode == "RGB":
|
544 |
-
instance_image = instance_image.convert("RGB")
|
545 |
-
example["instance_images"] = self.image_transforms(instance_image)
|
546 |
-
|
547 |
-
if self.encoder_hidden_states is not None:
|
548 |
-
example["instance_prompt_ids"] = self.encoder_hidden_states
|
549 |
-
else:
|
550 |
-
text_inputs = tokenize_prompt(
|
551 |
-
self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length
|
552 |
-
)
|
553 |
-
example["instance_prompt_ids"] = text_inputs.input_ids
|
554 |
-
example["instance_attention_mask"] = text_inputs.attention_mask
|
555 |
-
|
556 |
-
if self.class_data_root:
|
557 |
-
class_image = Image.open(self.class_images_path[index % self.num_class_images])
|
558 |
-
class_image = exif_transpose(class_image)
|
559 |
-
|
560 |
-
if not class_image.mode == "RGB":
|
561 |
-
class_image = class_image.convert("RGB")
|
562 |
-
example["class_images"] = self.image_transforms(class_image)
|
563 |
-
|
564 |
-
if self.instance_prompt_encoder_hidden_states is not None:
|
565 |
-
example["class_prompt_ids"] = self.instance_prompt_encoder_hidden_states
|
566 |
-
else:
|
567 |
-
class_text_inputs = tokenize_prompt(
|
568 |
-
self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length
|
569 |
-
)
|
570 |
-
example["class_prompt_ids"] = class_text_inputs.input_ids
|
571 |
-
example["class_attention_mask"] = class_text_inputs.attention_mask
|
572 |
-
|
573 |
-
return example
|
574 |
-
|
575 |
-
|
576 |
-
def collate_fn(examples, with_prior_preservation=False):
|
577 |
-
has_attention_mask = "instance_attention_mask" in examples[0]
|
578 |
-
|
579 |
-
input_ids = [example["instance_prompt_ids"] for example in examples]
|
580 |
-
pixel_values = [example["instance_images"] for example in examples]
|
581 |
-
|
582 |
-
if has_attention_mask:
|
583 |
-
attention_mask = [example["instance_attention_mask"] for example in examples]
|
584 |
-
|
585 |
-
# Concat class and instance examples for prior preservation.
|
586 |
-
# We do this to avoid doing two forward passes.
|
587 |
-
if with_prior_preservation:
|
588 |
-
input_ids += [example["class_prompt_ids"] for example in examples]
|
589 |
-
pixel_values += [example["class_images"] for example in examples]
|
590 |
-
if has_attention_mask:
|
591 |
-
attention_mask += [example["class_attention_mask"] for example in examples]
|
592 |
-
|
593 |
-
pixel_values = torch.stack(pixel_values)
|
594 |
-
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
595 |
-
|
596 |
-
input_ids = torch.cat(input_ids, dim=0)
|
597 |
-
|
598 |
-
batch = {
|
599 |
-
"input_ids": input_ids,
|
600 |
-
"pixel_values": pixel_values,
|
601 |
-
}
|
602 |
-
|
603 |
-
if has_attention_mask:
|
604 |
-
batch["attention_mask"] = attention_mask
|
605 |
-
|
606 |
-
return batch
|
607 |
-
|
608 |
-
|
609 |
-
class PromptDataset(Dataset):
|
610 |
-
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
|
611 |
-
|
612 |
-
def __init__(self, prompt, num_samples):
|
613 |
-
self.prompt = prompt
|
614 |
-
self.num_samples = num_samples
|
615 |
-
|
616 |
-
def __len__(self):
|
617 |
-
return self.num_samples
|
618 |
-
|
619 |
-
def __getitem__(self, index):
|
620 |
-
example = {}
|
621 |
-
example["prompt"] = self.prompt
|
622 |
-
example["index"] = index
|
623 |
-
return example
|
624 |
-
|
625 |
-
|
626 |
-
def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
|
627 |
-
if tokenizer_max_length is not None:
|
628 |
-
max_length = tokenizer_max_length
|
629 |
-
else:
|
630 |
-
max_length = tokenizer.model_max_length
|
631 |
-
|
632 |
-
text_inputs = tokenizer(
|
633 |
-
prompt,
|
634 |
-
truncation=True,
|
635 |
-
padding="max_length",
|
636 |
-
max_length=max_length,
|
637 |
-
return_tensors="pt",
|
638 |
-
)
|
639 |
-
|
640 |
-
return text_inputs
|
641 |
-
|
642 |
-
|
643 |
-
def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None):
|
644 |
-
text_input_ids = input_ids.to(text_encoder.device)
|
645 |
-
|
646 |
-
if text_encoder_use_attention_mask:
|
647 |
-
attention_mask = attention_mask.to(text_encoder.device)
|
648 |
-
else:
|
649 |
-
attention_mask = None
|
650 |
-
|
651 |
-
prompt_embeds = text_encoder(
|
652 |
-
text_input_ids,
|
653 |
-
attention_mask=attention_mask,
|
654 |
-
)
|
655 |
-
prompt_embeds = prompt_embeds[0]
|
656 |
-
|
657 |
-
return prompt_embeds
|
658 |
-
|
659 |
-
|
660 |
-
def unet_attn_processors_state_dict(unet) -> Dict[str, torch.tensor]:
|
661 |
-
r"""
|
662 |
-
Returns:
|
663 |
-
a state dict containing just the attention processor parameters.
|
664 |
-
"""
|
665 |
-
attn_processors = unet.attn_processors
|
666 |
-
|
667 |
-
attn_processors_state_dict = {}
|
668 |
-
|
669 |
-
for attn_processor_key, attn_processor in attn_processors.items():
|
670 |
-
for parameter_key, parameter in attn_processor.state_dict().items():
|
671 |
-
attn_processors_state_dict[f"{attn_processor_key}.{parameter_key}"] = parameter
|
672 |
-
|
673 |
-
return attn_processors_state_dict
|
674 |
-
|
675 |
-
|
676 |
-
def main(args):
|
677 |
-
logging_dir = Path(args.output_dir, args.logging_dir)
|
678 |
-
|
679 |
-
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
|
680 |
-
|
681 |
-
accelerator = Accelerator(
|
682 |
-
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
683 |
-
mixed_precision=args.mixed_precision,
|
684 |
-
log_with=args.report_to,
|
685 |
-
project_config=accelerator_project_config,
|
686 |
-
)
|
687 |
-
|
688 |
-
if args.report_to == "wandb":
|
689 |
-
if not is_wandb_available():
|
690 |
-
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
|
691 |
-
import wandb
|
692 |
-
|
693 |
-
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
|
694 |
-
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
|
695 |
-
# TODO (sayakpaul): Remove this check when gradient accumulation with two models is enabled in accelerate.
|
696 |
-
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
|
697 |
-
raise ValueError(
|
698 |
-
"Gradient accumulation is not supported when training the text encoder in distributed training. "
|
699 |
-
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
|
700 |
-
)
|
701 |
-
|
702 |
-
# Make one log on every process with the configuration for debugging.
|
703 |
-
logging.basicConfig(
|
704 |
-
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
705 |
-
datefmt="%m/%d/%Y %H:%M:%S",
|
706 |
-
level=logging.INFO,
|
707 |
-
)
|
708 |
-
logger.info(accelerator.state, main_process_only=False)
|
709 |
-
if accelerator.is_local_main_process:
|
710 |
-
transformers.utils.logging.set_verbosity_warning()
|
711 |
-
diffusers.utils.logging.set_verbosity_info()
|
712 |
-
else:
|
713 |
-
transformers.utils.logging.set_verbosity_error()
|
714 |
-
diffusers.utils.logging.set_verbosity_error()
|
715 |
-
|
716 |
-
# If passed along, set the training seed now.
|
717 |
-
if args.seed is not None:
|
718 |
-
set_seed(args.seed)
|
719 |
-
|
720 |
-
# Generate class images if prior preservation is enabled.
|
721 |
-
if args.with_prior_preservation:
|
722 |
-
class_images_dir = Path(args.class_data_dir)
|
723 |
-
if not class_images_dir.exists():
|
724 |
-
class_images_dir.mkdir(parents=True)
|
725 |
-
cur_class_images = len(list(class_images_dir.iterdir()))
|
726 |
-
|
727 |
-
if cur_class_images < args.num_class_images:
|
728 |
-
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
|
729 |
-
if args.prior_generation_precision == "fp32":
|
730 |
-
torch_dtype = torch.float32
|
731 |
-
elif args.prior_generation_precision == "fp16":
|
732 |
-
torch_dtype = torch.float16
|
733 |
-
elif args.prior_generation_precision == "bf16":
|
734 |
-
torch_dtype = torch.bfloat16
|
735 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
736 |
-
args.pretrained_model_name_or_path,
|
737 |
-
torch_dtype=torch_dtype,
|
738 |
-
safety_checker=None,
|
739 |
-
revision=args.revision,
|
740 |
-
)
|
741 |
-
pipeline.set_progress_bar_config(disable=True)
|
742 |
-
|
743 |
-
num_new_images = args.num_class_images - cur_class_images
|
744 |
-
logger.info(f"Number of class images to sample: {num_new_images}.")
|
745 |
-
|
746 |
-
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
|
747 |
-
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
|
748 |
-
|
749 |
-
sample_dataloader = accelerator.prepare(sample_dataloader)
|
750 |
-
pipeline.to(accelerator.device)
|
751 |
-
|
752 |
-
for example in tqdm(
|
753 |
-
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
754 |
-
):
|
755 |
-
images = pipeline(example["prompt"]).images
|
756 |
-
|
757 |
-
for i, image in enumerate(images):
|
758 |
-
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
|
759 |
-
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
|
760 |
-
image.save(image_filename)
|
761 |
-
|
762 |
-
del pipeline
|
763 |
-
if torch.cuda.is_available():
|
764 |
-
torch.cuda.empty_cache()
|
765 |
-
|
766 |
-
# Handle the repository creation
|
767 |
-
if accelerator.is_main_process:
|
768 |
-
if args.output_dir is not None:
|
769 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
770 |
-
|
771 |
-
if args.push_to_hub:
|
772 |
-
repo_id = create_repo(
|
773 |
-
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
|
774 |
-
).repo_id
|
775 |
-
|
776 |
-
# Load the tokenizer
|
777 |
-
if args.tokenizer_name:
|
778 |
-
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
|
779 |
-
elif args.pretrained_model_name_or_path:
|
780 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
781 |
-
args.pretrained_model_name_or_path,
|
782 |
-
subfolder="tokenizer",
|
783 |
-
revision=args.revision,
|
784 |
-
use_fast=False,
|
785 |
-
)
|
786 |
-
|
787 |
-
# import correct text encoder class
|
788 |
-
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
|
789 |
-
|
790 |
-
# Load scheduler and models
|
791 |
-
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
|
792 |
-
text_encoder = text_encoder_cls.from_pretrained(
|
793 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
|
794 |
-
)
|
795 |
-
try:
|
796 |
-
vae = AutoencoderKL.from_pretrained(
|
797 |
-
args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
|
798 |
-
)
|
799 |
-
except OSError:
|
800 |
-
# IF does not have a VAE so let's just set it to None
|
801 |
-
# We don't have to error out here
|
802 |
-
vae = None
|
803 |
-
|
804 |
-
unet = UNet2DConditionModel.from_pretrained(
|
805 |
-
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
|
806 |
-
)
|
807 |
-
|
808 |
-
# We only train the additional adapter LoRA layers
|
809 |
-
if vae is not None:
|
810 |
-
vae.requires_grad_(False)
|
811 |
-
text_encoder.requires_grad_(False)
|
812 |
-
unet.requires_grad_(False)
|
813 |
-
|
814 |
-
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
|
815 |
-
# as these weights are only used for inference, keeping weights in full precision is not required.
|
816 |
-
weight_dtype = torch.float32
|
817 |
-
if accelerator.mixed_precision == "fp16":
|
818 |
-
weight_dtype = torch.float16
|
819 |
-
elif accelerator.mixed_precision == "bf16":
|
820 |
-
weight_dtype = torch.bfloat16
|
821 |
-
|
822 |
-
# Move unet, vae and text_encoder to device and cast to weight_dtype
|
823 |
-
unet.to(accelerator.device, dtype=weight_dtype)
|
824 |
-
if vae is not None:
|
825 |
-
vae.to(accelerator.device, dtype=weight_dtype)
|
826 |
-
text_encoder.to(accelerator.device, dtype=weight_dtype)
|
827 |
-
|
828 |
-
if args.enable_xformers_memory_efficient_attention:
|
829 |
-
if is_xformers_available():
|
830 |
-
import xformers
|
831 |
-
|
832 |
-
xformers_version = version.parse(xformers.__version__)
|
833 |
-
if xformers_version == version.parse("0.0.16"):
|
834 |
-
logger.warn(
|
835 |
-
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
|
836 |
-
)
|
837 |
-
unet.enable_xformers_memory_efficient_attention()
|
838 |
-
else:
|
839 |
-
raise ValueError("xformers is not available. Make sure it is installed correctly")
|
840 |
-
|
841 |
-
# now we will add new LoRA weights to the attention layers
|
842 |
-
# It's important to realize here how many attention weights will be added and of which sizes
|
843 |
-
# The sizes of the attention layers consist only of two different variables:
|
844 |
-
# 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`.
|
845 |
-
# 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`.
|
846 |
-
|
847 |
-
# Let's first see how many attention processors we will have to set.
|
848 |
-
# For Stable Diffusion, it should be equal to:
|
849 |
-
# - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12
|
850 |
-
# - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2
|
851 |
-
# - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18
|
852 |
-
# => 32 layers
|
853 |
-
|
854 |
-
# Set correct lora layers
|
855 |
-
unet_lora_attn_procs = {}
|
856 |
-
unet_lora_parameters = []
|
857 |
-
for name, attn_processor in unet.attn_processors.items():
|
858 |
-
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
|
859 |
-
if name.startswith("mid_block"):
|
860 |
-
hidden_size = unet.config.block_out_channels[-1]
|
861 |
-
elif name.startswith("up_blocks"):
|
862 |
-
block_id = int(name[len("up_blocks.")])
|
863 |
-
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
|
864 |
-
elif name.startswith("down_blocks"):
|
865 |
-
block_id = int(name[len("down_blocks.")])
|
866 |
-
hidden_size = unet.config.block_out_channels[block_id]
|
867 |
-
|
868 |
-
if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):
|
869 |
-
lora_attn_processor_class = LoRAAttnAddedKVProcessor
|
870 |
-
else:
|
871 |
-
lora_attn_processor_class = (
|
872 |
-
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
|
873 |
-
)
|
874 |
-
|
875 |
-
module = lora_attn_processor_class(
|
876 |
-
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=args.rank
|
877 |
-
)
|
878 |
-
unet_lora_attn_procs[name] = module
|
879 |
-
unet_lora_parameters.extend(module.parameters())
|
880 |
-
|
881 |
-
unet.set_attn_processor(unet_lora_attn_procs)
|
882 |
-
|
883 |
-
# The text encoder comes from 🤗 transformers, so we cannot directly modify it.
|
884 |
-
# So, instead, we monkey-patch the forward calls of its attention-blocks.
|
885 |
-
if args.train_text_encoder:
|
886 |
-
# ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
|
887 |
-
text_lora_parameters = LoraLoaderMixin._modify_text_encoder(text_encoder, dtype=torch.float32, rank=args.rank)
|
888 |
-
|
889 |
-
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
|
890 |
-
def save_model_hook(models, weights, output_dir):
|
891 |
-
# there are only two options here. Either are just the unet attn processor layers
|
892 |
-
# or there are the unet and text encoder atten layers
|
893 |
-
unet_lora_layers_to_save = None
|
894 |
-
text_encoder_lora_layers_to_save = None
|
895 |
-
|
896 |
-
for model in models:
|
897 |
-
if isinstance(model, type(accelerator.unwrap_model(unet))):
|
898 |
-
unet_lora_layers_to_save = unet_attn_processors_state_dict(model)
|
899 |
-
elif isinstance(model, type(accelerator.unwrap_model(text_encoder))):
|
900 |
-
text_encoder_lora_layers_to_save = text_encoder_lora_state_dict(model)
|
901 |
-
else:
|
902 |
-
raise ValueError(f"unexpected save model: {model.__class__}")
|
903 |
-
|
904 |
-
# make sure to pop weight so that corresponding model is not saved again
|
905 |
-
weights.pop()
|
906 |
-
|
907 |
-
LoraLoaderMixin.save_lora_weights(
|
908 |
-
output_dir,
|
909 |
-
unet_lora_layers=unet_lora_layers_to_save,
|
910 |
-
text_encoder_lora_layers=text_encoder_lora_layers_to_save,
|
911 |
-
)
|
912 |
-
|
913 |
-
def load_model_hook(models, input_dir):
|
914 |
-
unet_ = None
|
915 |
-
text_encoder_ = None
|
916 |
-
|
917 |
-
while len(models) > 0:
|
918 |
-
model = models.pop()
|
919 |
-
|
920 |
-
if isinstance(model, type(accelerator.unwrap_model(unet))):
|
921 |
-
unet_ = model
|
922 |
-
elif isinstance(model, type(accelerator.unwrap_model(text_encoder))):
|
923 |
-
text_encoder_ = model
|
924 |
-
else:
|
925 |
-
raise ValueError(f"unexpected save model: {model.__class__}")
|
926 |
-
|
927 |
-
lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
|
928 |
-
LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
|
929 |
-
LoraLoaderMixin.load_lora_into_text_encoder(
|
930 |
-
lora_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_
|
931 |
-
)
|
932 |
-
|
933 |
-
accelerator.register_save_state_pre_hook(save_model_hook)
|
934 |
-
accelerator.register_load_state_pre_hook(load_model_hook)
|
935 |
-
|
936 |
-
# Enable TF32 for faster training on Ampere GPUs,
|
937 |
-
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
|
938 |
-
if args.allow_tf32:
|
939 |
-
torch.backends.cuda.matmul.allow_tf32 = True
|
940 |
-
|
941 |
-
if args.scale_lr:
|
942 |
-
args.learning_rate = (
|
943 |
-
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
944 |
-
)
|
945 |
-
|
946 |
-
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
|
947 |
-
if args.use_8bit_adam:
|
948 |
-
try:
|
949 |
-
import bitsandbytes as bnb
|
950 |
-
except ImportError:
|
951 |
-
raise ImportError(
|
952 |
-
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
|
953 |
-
)
|
954 |
-
|
955 |
-
optimizer_class = bnb.optim.AdamW8bit
|
956 |
-
else:
|
957 |
-
optimizer_class = torch.optim.AdamW
|
958 |
-
|
959 |
-
# Optimizer creation
|
960 |
-
params_to_optimize = (
|
961 |
-
itertools.chain(unet_lora_parameters, text_lora_parameters)
|
962 |
-
if args.train_text_encoder
|
963 |
-
else unet_lora_parameters
|
964 |
-
)
|
965 |
-
optimizer = optimizer_class(
|
966 |
-
params_to_optimize,
|
967 |
-
lr=args.learning_rate,
|
968 |
-
betas=(args.adam_beta1, args.adam_beta2),
|
969 |
-
weight_decay=args.adam_weight_decay,
|
970 |
-
eps=args.adam_epsilon,
|
971 |
-
)
|
972 |
-
|
973 |
-
if args.pre_compute_text_embeddings:
|
974 |
-
|
975 |
-
def compute_text_embeddings(prompt):
|
976 |
-
with torch.no_grad():
|
977 |
-
text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=args.tokenizer_max_length)
|
978 |
-
prompt_embeds = encode_prompt(
|
979 |
-
text_encoder,
|
980 |
-
text_inputs.input_ids,
|
981 |
-
text_inputs.attention_mask,
|
982 |
-
text_encoder_use_attention_mask=args.text_encoder_use_attention_mask,
|
983 |
-
)
|
984 |
-
|
985 |
-
return prompt_embeds
|
986 |
-
|
987 |
-
pre_computed_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
|
988 |
-
validation_prompt_negative_prompt_embeds = compute_text_embeddings("")
|
989 |
-
|
990 |
-
if args.validation_prompt is not None:
|
991 |
-
validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt)
|
992 |
-
else:
|
993 |
-
validation_prompt_encoder_hidden_states = None
|
994 |
-
|
995 |
-
if args.instance_prompt is not None:
|
996 |
-
pre_computed_instance_prompt_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
|
997 |
-
else:
|
998 |
-
pre_computed_instance_prompt_encoder_hidden_states = None
|
999 |
-
|
1000 |
-
text_encoder = None
|
1001 |
-
tokenizer = None
|
1002 |
-
|
1003 |
-
gc.collect()
|
1004 |
-
torch.cuda.empty_cache()
|
1005 |
-
else:
|
1006 |
-
pre_computed_encoder_hidden_states = None
|
1007 |
-
validation_prompt_encoder_hidden_states = None
|
1008 |
-
validation_prompt_negative_prompt_embeds = None
|
1009 |
-
pre_computed_instance_prompt_encoder_hidden_states = None
|
1010 |
-
|
1011 |
-
# Dataset and DataLoaders creation:
|
1012 |
-
train_dataset = DreamBoothDataset(
|
1013 |
-
instance_data_root=args.instance_data_dir,
|
1014 |
-
instance_prompt=args.instance_prompt,
|
1015 |
-
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
|
1016 |
-
class_prompt=args.class_prompt,
|
1017 |
-
class_num=args.num_class_images,
|
1018 |
-
tokenizer=tokenizer,
|
1019 |
-
size=args.resolution,
|
1020 |
-
center_crop=args.center_crop,
|
1021 |
-
encoder_hidden_states=pre_computed_encoder_hidden_states,
|
1022 |
-
instance_prompt_encoder_hidden_states=pre_computed_instance_prompt_encoder_hidden_states,
|
1023 |
-
tokenizer_max_length=args.tokenizer_max_length,
|
1024 |
-
)
|
1025 |
-
|
1026 |
-
train_dataloader = torch.utils.data.DataLoader(
|
1027 |
-
train_dataset,
|
1028 |
-
batch_size=args.train_batch_size,
|
1029 |
-
shuffle=True,
|
1030 |
-
collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
|
1031 |
-
num_workers=args.dataloader_num_workers,
|
1032 |
-
)
|
1033 |
-
|
1034 |
-
# Scheduler and math around the number of training steps.
|
1035 |
-
overrode_max_train_steps = False
|
1036 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
1037 |
-
if args.max_train_steps is None:
|
1038 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
1039 |
-
overrode_max_train_steps = True
|
1040 |
-
|
1041 |
-
lr_scheduler = get_scheduler(
|
1042 |
-
args.lr_scheduler,
|
1043 |
-
optimizer=optimizer,
|
1044 |
-
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
|
1045 |
-
num_training_steps=args.max_train_steps * accelerator.num_processes,
|
1046 |
-
num_cycles=args.lr_num_cycles,
|
1047 |
-
power=args.lr_power,
|
1048 |
-
)
|
1049 |
-
|
1050 |
-
# Prepare everything with our `accelerator`.
|
1051 |
-
if args.train_text_encoder:
|
1052 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
1053 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
|
1054 |
-
)
|
1055 |
-
else:
|
1056 |
-
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
1057 |
-
unet, optimizer, train_dataloader, lr_scheduler
|
1058 |
-
)
|
1059 |
-
|
1060 |
-
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
1061 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
1062 |
-
if overrode_max_train_steps:
|
1063 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
1064 |
-
# Afterwards we recalculate our number of training epochs
|
1065 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
1066 |
-
|
1067 |
-
# We need to initialize the trackers we use, and also store our configuration.
|
1068 |
-
# The trackers initializes automatically on the main process.
|
1069 |
-
if accelerator.is_main_process:
|
1070 |
-
tracker_config = vars(args)
|
1071 |
-
tracker_config.pop("validation_images")
|
1072 |
-
accelerator.init_trackers("dreambooth-lora", config=tracker_config)
|
1073 |
-
|
1074 |
-
# Train!
|
1075 |
-
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
1076 |
-
|
1077 |
-
logger.info("***** Running training *****")
|
1078 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
1079 |
-
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
|
1080 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
1081 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
1082 |
-
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
1083 |
-
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
1084 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
1085 |
-
global_step = 0
|
1086 |
-
first_epoch = 0
|
1087 |
-
|
1088 |
-
# Potentially load in the weights and states from a previous save
|
1089 |
-
if args.resume_from_checkpoint:
|
1090 |
-
if args.resume_from_checkpoint != "latest":
|
1091 |
-
path = os.path.basename(args.resume_from_checkpoint)
|
1092 |
-
else:
|
1093 |
-
# Get the mos recent checkpoint
|
1094 |
-
dirs = os.listdir(args.output_dir)
|
1095 |
-
dirs = [d for d in dirs if d.startswith("checkpoint")]
|
1096 |
-
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
|
1097 |
-
path = dirs[-1] if len(dirs) > 0 else None
|
1098 |
-
|
1099 |
-
if path is None:
|
1100 |
-
accelerator.print(
|
1101 |
-
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
|
1102 |
-
)
|
1103 |
-
args.resume_from_checkpoint = None
|
1104 |
-
else:
|
1105 |
-
accelerator.print(f"Resuming from checkpoint {path}")
|
1106 |
-
accelerator.load_state(os.path.join(args.output_dir, path))
|
1107 |
-
global_step = int(path.split("-")[1])
|
1108 |
-
|
1109 |
-
resume_global_step = global_step * args.gradient_accumulation_steps
|
1110 |
-
first_epoch = global_step // num_update_steps_per_epoch
|
1111 |
-
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
|
1112 |
-
|
1113 |
-
# Only show the progress bar once on each machine.
|
1114 |
-
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
|
1115 |
-
progress_bar.set_description("Steps")
|
1116 |
-
|
1117 |
-
for epoch in range(first_epoch, args.num_train_epochs):
|
1118 |
-
unet.train()
|
1119 |
-
if args.train_text_encoder:
|
1120 |
-
text_encoder.train()
|
1121 |
-
for step, batch in enumerate(train_dataloader):
|
1122 |
-
# Skip steps until we reach the resumed step
|
1123 |
-
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
1124 |
-
if step % args.gradient_accumulation_steps == 0:
|
1125 |
-
progress_bar.update(1)
|
1126 |
-
continue
|
1127 |
-
|
1128 |
-
with accelerator.accumulate(unet):
|
1129 |
-
pixel_values = batch["pixel_values"].to(dtype=weight_dtype)
|
1130 |
-
|
1131 |
-
if vae is not None:
|
1132 |
-
# Convert images to latent space
|
1133 |
-
model_input = vae.encode(pixel_values).latent_dist.sample()
|
1134 |
-
model_input = model_input * vae.config.scaling_factor
|
1135 |
-
else:
|
1136 |
-
model_input = pixel_values
|
1137 |
-
|
1138 |
-
# Sample noise that we'll add to the latents
|
1139 |
-
noise = torch.randn_like(model_input)
|
1140 |
-
bsz, channels, height, width = model_input.shape
|
1141 |
-
# Sample a random timestep for each image
|
1142 |
-
timesteps = torch.randint(
|
1143 |
-
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
|
1144 |
-
)
|
1145 |
-
timesteps = timesteps.long()
|
1146 |
-
|
1147 |
-
# Add noise to the model input according to the noise magnitude at each timestep
|
1148 |
-
# (this is the forward diffusion process)
|
1149 |
-
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
|
1150 |
-
|
1151 |
-
# Get the text embedding for conditioning
|
1152 |
-
if args.pre_compute_text_embeddings:
|
1153 |
-
encoder_hidden_states = batch["input_ids"]
|
1154 |
-
else:
|
1155 |
-
encoder_hidden_states = encode_prompt(
|
1156 |
-
text_encoder,
|
1157 |
-
batch["input_ids"],
|
1158 |
-
batch["attention_mask"],
|
1159 |
-
text_encoder_use_attention_mask=args.text_encoder_use_attention_mask,
|
1160 |
-
)
|
1161 |
-
|
1162 |
-
if accelerator.unwrap_model(unet).config.in_channels == channels * 2:
|
1163 |
-
noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1)
|
1164 |
-
|
1165 |
-
if args.class_labels_conditioning == "timesteps":
|
1166 |
-
class_labels = timesteps
|
1167 |
-
else:
|
1168 |
-
class_labels = None
|
1169 |
-
|
1170 |
-
# Predict the noise residual
|
1171 |
-
model_pred = unet(
|
1172 |
-
noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels
|
1173 |
-
).sample
|
1174 |
-
|
1175 |
-
# if model predicts variance, throw away the prediction. we will only train on the
|
1176 |
-
# simplified training objective. This means that all schedulers using the fine tuned
|
1177 |
-
# model must be configured to use one of the fixed variance variance types.
|
1178 |
-
if model_pred.shape[1] == 6:
|
1179 |
-
model_pred, _ = torch.chunk(model_pred, 2, dim=1)
|
1180 |
-
|
1181 |
-
# Get the target for loss depending on the prediction type
|
1182 |
-
if noise_scheduler.config.prediction_type == "epsilon":
|
1183 |
-
target = noise
|
1184 |
-
elif noise_scheduler.config.prediction_type == "v_prediction":
|
1185 |
-
target = noise_scheduler.get_velocity(model_input, noise, timesteps)
|
1186 |
-
else:
|
1187 |
-
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
1188 |
-
|
1189 |
-
if args.with_prior_preservation:
|
1190 |
-
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
|
1191 |
-
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
|
1192 |
-
target, target_prior = torch.chunk(target, 2, dim=0)
|
1193 |
-
|
1194 |
-
# Compute instance loss
|
1195 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
1196 |
-
|
1197 |
-
# Compute prior loss
|
1198 |
-
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
|
1199 |
-
|
1200 |
-
# Add the prior loss to the instance loss.
|
1201 |
-
loss = loss + args.prior_loss_weight * prior_loss
|
1202 |
-
else:
|
1203 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
1204 |
-
|
1205 |
-
accelerator.backward(loss)
|
1206 |
-
if accelerator.sync_gradients:
|
1207 |
-
params_to_clip = (
|
1208 |
-
itertools.chain(unet_lora_parameters, text_lora_parameters)
|
1209 |
-
if args.train_text_encoder
|
1210 |
-
else unet_lora_parameters
|
1211 |
-
)
|
1212 |
-
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
1213 |
-
optimizer.step()
|
1214 |
-
lr_scheduler.step()
|
1215 |
-
optimizer.zero_grad()
|
1216 |
-
|
1217 |
-
# Checks if the accelerator has performed an optimization step behind the scenes
|
1218 |
-
if accelerator.sync_gradients:
|
1219 |
-
progress_bar.update(1)
|
1220 |
-
global_step += 1
|
1221 |
-
|
1222 |
-
if accelerator.is_main_process:
|
1223 |
-
if global_step % args.checkpointing_steps == 0:
|
1224 |
-
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
|
1225 |
-
if args.checkpoints_total_limit is not None:
|
1226 |
-
checkpoints = os.listdir(args.output_dir)
|
1227 |
-
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
|
1228 |
-
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
|
1229 |
-
|
1230 |
-
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
|
1231 |
-
if len(checkpoints) >= args.checkpoints_total_limit:
|
1232 |
-
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
|
1233 |
-
removing_checkpoints = checkpoints[0:num_to_remove]
|
1234 |
-
|
1235 |
-
logger.info(
|
1236 |
-
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
|
1237 |
-
)
|
1238 |
-
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
|
1239 |
-
|
1240 |
-
for removing_checkpoint in removing_checkpoints:
|
1241 |
-
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
|
1242 |
-
shutil.rmtree(removing_checkpoint)
|
1243 |
-
|
1244 |
-
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
|
1245 |
-
accelerator.save_state(save_path)
|
1246 |
-
logger.info(f"Saved state to {save_path}")
|
1247 |
-
|
1248 |
-
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
1249 |
-
progress_bar.set_postfix(**logs)
|
1250 |
-
accelerator.log(logs, step=global_step)
|
1251 |
-
|
1252 |
-
if global_step >= args.max_train_steps:
|
1253 |
-
break
|
1254 |
-
|
1255 |
-
if accelerator.is_main_process:
|
1256 |
-
if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
|
1257 |
-
logger.info(
|
1258 |
-
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
|
1259 |
-
f" {args.validation_prompt}."
|
1260 |
-
)
|
1261 |
-
# create pipeline
|
1262 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
1263 |
-
args.pretrained_model_name_or_path,
|
1264 |
-
unet=accelerator.unwrap_model(unet),
|
1265 |
-
text_encoder=None if args.pre_compute_text_embeddings else accelerator.unwrap_model(text_encoder),
|
1266 |
-
revision=args.revision,
|
1267 |
-
torch_dtype=weight_dtype,
|
1268 |
-
)
|
1269 |
-
|
1270 |
-
# We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
|
1271 |
-
scheduler_args = {}
|
1272 |
-
|
1273 |
-
if "variance_type" in pipeline.scheduler.config:
|
1274 |
-
variance_type = pipeline.scheduler.config.variance_type
|
1275 |
-
|
1276 |
-
if variance_type in ["learned", "learned_range"]:
|
1277 |
-
variance_type = "fixed_small"
|
1278 |
-
|
1279 |
-
scheduler_args["variance_type"] = variance_type
|
1280 |
-
|
1281 |
-
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
|
1282 |
-
pipeline.scheduler.config, **scheduler_args
|
1283 |
-
)
|
1284 |
-
|
1285 |
-
pipeline = pipeline.to(accelerator.device)
|
1286 |
-
pipeline.set_progress_bar_config(disable=True)
|
1287 |
-
|
1288 |
-
# run inference
|
1289 |
-
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
|
1290 |
-
if args.pre_compute_text_embeddings:
|
1291 |
-
pipeline_args = {
|
1292 |
-
"prompt_embeds": validation_prompt_encoder_hidden_states,
|
1293 |
-
"negative_prompt_embeds": validation_prompt_negative_prompt_embeds,
|
1294 |
-
}
|
1295 |
-
else:
|
1296 |
-
pipeline_args = {"prompt": args.validation_prompt}
|
1297 |
-
|
1298 |
-
if args.validation_images is None:
|
1299 |
-
images = []
|
1300 |
-
for _ in range(args.num_validation_images):
|
1301 |
-
with torch.cuda.amp.autocast():
|
1302 |
-
image = pipeline(**pipeline_args, generator=generator).images[0]
|
1303 |
-
images.append(image)
|
1304 |
-
else:
|
1305 |
-
images = []
|
1306 |
-
for image in args.validation_images:
|
1307 |
-
image = Image.open(image)
|
1308 |
-
with torch.cuda.amp.autocast():
|
1309 |
-
image = pipeline(**pipeline_args, image=image, generator=generator).images[0]
|
1310 |
-
images.append(image)
|
1311 |
-
|
1312 |
-
for tracker in accelerator.trackers:
|
1313 |
-
if tracker.name == "tensorboard":
|
1314 |
-
np_images = np.stack([np.asarray(img) for img in images])
|
1315 |
-
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
|
1316 |
-
if tracker.name == "wandb":
|
1317 |
-
tracker.log(
|
1318 |
-
{
|
1319 |
-
"validation": [
|
1320 |
-
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
|
1321 |
-
for i, image in enumerate(images)
|
1322 |
-
]
|
1323 |
-
}
|
1324 |
-
)
|
1325 |
-
|
1326 |
-
del pipeline
|
1327 |
-
torch.cuda.empty_cache()
|
1328 |
-
|
1329 |
-
# Save the lora layers
|
1330 |
-
accelerator.wait_for_everyone()
|
1331 |
-
if accelerator.is_main_process:
|
1332 |
-
unet = accelerator.unwrap_model(unet)
|
1333 |
-
unet = unet.to(torch.float32)
|
1334 |
-
unet_lora_layers = unet_attn_processors_state_dict(unet)
|
1335 |
-
|
1336 |
-
if text_encoder is not None and args.train_text_encoder:
|
1337 |
-
text_encoder = accelerator.unwrap_model(text_encoder)
|
1338 |
-
text_encoder = text_encoder.to(torch.float32)
|
1339 |
-
text_encoder_lora_layers = text_encoder_lora_state_dict(text_encoder)
|
1340 |
-
else:
|
1341 |
-
text_encoder_lora_layers = None
|
1342 |
-
|
1343 |
-
LoraLoaderMixin.save_lora_weights(
|
1344 |
-
save_directory=args.output_dir,
|
1345 |
-
unet_lora_layers=unet_lora_layers,
|
1346 |
-
text_encoder_lora_layers=text_encoder_lora_layers,
|
1347 |
-
)
|
1348 |
-
|
1349 |
-
# Final inference
|
1350 |
-
# Load previous pipeline
|
1351 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
1352 |
-
args.pretrained_model_name_or_path, revision=args.revision, torch_dtype=weight_dtype
|
1353 |
-
)
|
1354 |
-
|
1355 |
-
# We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
|
1356 |
-
scheduler_args = {}
|
1357 |
-
|
1358 |
-
if "variance_type" in pipeline.scheduler.config:
|
1359 |
-
variance_type = pipeline.scheduler.config.variance_type
|
1360 |
-
|
1361 |
-
if variance_type in ["learned", "learned_range"]:
|
1362 |
-
variance_type = "fixed_small"
|
1363 |
-
|
1364 |
-
scheduler_args["variance_type"] = variance_type
|
1365 |
-
|
1366 |
-
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
|
1367 |
-
|
1368 |
-
pipeline = pipeline.to(accelerator.device)
|
1369 |
-
|
1370 |
-
# load attention processors
|
1371 |
-
pipeline.load_lora_weights(args.output_dir, weight_name="pytorch_lora_weights.bin")
|
1372 |
-
|
1373 |
-
# run inference
|
1374 |
-
images = []
|
1375 |
-
if args.validation_prompt and args.num_validation_images > 0:
|
1376 |
-
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
|
1377 |
-
images = [
|
1378 |
-
pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
|
1379 |
-
for _ in range(args.num_validation_images)
|
1380 |
-
]
|
1381 |
-
|
1382 |
-
for tracker in accelerator.trackers:
|
1383 |
-
if tracker.name == "tensorboard":
|
1384 |
-
np_images = np.stack([np.asarray(img) for img in images])
|
1385 |
-
tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
|
1386 |
-
if tracker.name == "wandb":
|
1387 |
-
tracker.log(
|
1388 |
-
{
|
1389 |
-
"test": [
|
1390 |
-
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
|
1391 |
-
for i, image in enumerate(images)
|
1392 |
-
]
|
1393 |
-
}
|
1394 |
-
)
|
1395 |
-
|
1396 |
-
if args.push_to_hub:
|
1397 |
-
save_model_card(
|
1398 |
-
repo_id,
|
1399 |
-
images=images,
|
1400 |
-
base_model=args.pretrained_model_name_or_path,
|
1401 |
-
train_text_encoder=args.train_text_encoder,
|
1402 |
-
prompt=args.instance_prompt,
|
1403 |
-
repo_folder=args.output_dir,
|
1404 |
-
pipeline=pipeline,
|
1405 |
-
)
|
1406 |
-
upload_folder(
|
1407 |
-
repo_id=repo_id,
|
1408 |
-
folder_path=args.output_dir,
|
1409 |
-
commit_message="End of training",
|
1410 |
-
ignore_patterns=["step_*", "epoch_*"],
|
1411 |
-
)
|
1412 |
-
|
1413 |
-
accelerator.end_training()
|
1414 |
-
|
1415 |
-
|
1416 |
-
if __name__ == "__main__":
|
1417 |
-
args = parse_args()
|
1418 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://detectron2/resnet101_caffe',
|
4 |
-
backbone=dict(depth=101))
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[16, 22])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/fpn_uniformer.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
3 |
-
model = dict(
|
4 |
-
type='EncoderDecoder',
|
5 |
-
backbone=dict(
|
6 |
-
type='UniFormer',
|
7 |
-
embed_dim=[64, 128, 320, 512],
|
8 |
-
layers=[3, 4, 8, 3],
|
9 |
-
head_dim=64,
|
10 |
-
mlp_ratio=4.,
|
11 |
-
qkv_bias=True,
|
12 |
-
drop_rate=0.,
|
13 |
-
attn_drop_rate=0.,
|
14 |
-
drop_path_rate=0.1),
|
15 |
-
neck=dict(
|
16 |
-
type='FPN',
|
17 |
-
in_channels=[64, 128, 320, 512],
|
18 |
-
out_channels=256,
|
19 |
-
num_outs=4),
|
20 |
-
decode_head=dict(
|
21 |
-
type='FPNHead',
|
22 |
-
in_channels=[256, 256, 256, 256],
|
23 |
-
in_index=[0, 1, 2, 3],
|
24 |
-
feature_strides=[4, 8, 16, 32],
|
25 |
-
channels=128,
|
26 |
-
dropout_ratio=0.1,
|
27 |
-
num_classes=150,
|
28 |
-
norm_cfg=norm_cfg,
|
29 |
-
align_corners=False,
|
30 |
-
loss_decode=dict(
|
31 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
32 |
-
# model training and testing settings
|
33 |
-
train_cfg=dict(),
|
34 |
-
test_cfg=dict(mode='whole')
|
35 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/one_click_installer_check.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from modules.logging_colors import logger
|
3 |
-
|
4 |
-
if Path('../webui.py').exists():
|
5 |
-
logger.warning('\nIt looks like you are running an outdated version of '
|
6 |
-
'the one-click-installers.\n'
|
7 |
-
'Please migrate your installation following the instructions here:\n'
|
8 |
-
'https://github.com/oobabooga/text-generation-webui/wiki/Migrating-an-old-one%E2%80%90click-install')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/diffusionmodules/upscaling.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import numpy as np
|
4 |
-
from functools import partial
|
5 |
-
|
6 |
-
from ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule
|
7 |
-
from ldm.util import default
|
8 |
-
|
9 |
-
|
10 |
-
class AbstractLowScaleModel(nn.Module):
|
11 |
-
# for concatenating a downsampled image to the latent representation
|
12 |
-
def __init__(self, noise_schedule_config=None):
|
13 |
-
super(AbstractLowScaleModel, self).__init__()
|
14 |
-
if noise_schedule_config is not None:
|
15 |
-
self.register_schedule(**noise_schedule_config)
|
16 |
-
|
17 |
-
def register_schedule(self, beta_schedule="linear", timesteps=1000,
|
18 |
-
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
19 |
-
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
20 |
-
cosine_s=cosine_s)
|
21 |
-
alphas = 1. - betas
|
22 |
-
alphas_cumprod = np.cumprod(alphas, axis=0)
|
23 |
-
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
24 |
-
|
25 |
-
timesteps, = betas.shape
|
26 |
-
self.num_timesteps = int(timesteps)
|
27 |
-
self.linear_start = linear_start
|
28 |
-
self.linear_end = linear_end
|
29 |
-
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
30 |
-
|
31 |
-
to_torch = partial(torch.tensor, dtype=torch.float32)
|
32 |
-
|
33 |
-
self.register_buffer('betas', to_torch(betas))
|
34 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
35 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
36 |
-
|
37 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
38 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
39 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
40 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
41 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
42 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
43 |
-
|
44 |
-
def q_sample(self, x_start, t, noise=None):
|
45 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
46 |
-
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
47 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
return x, None
|
51 |
-
|
52 |
-
def decode(self, x):
|
53 |
-
return x
|
54 |
-
|
55 |
-
|
56 |
-
class SimpleImageConcat(AbstractLowScaleModel):
|
57 |
-
# no noise level conditioning
|
58 |
-
def __init__(self):
|
59 |
-
super(SimpleImageConcat, self).__init__(noise_schedule_config=None)
|
60 |
-
self.max_noise_level = 0
|
61 |
-
|
62 |
-
def forward(self, x):
|
63 |
-
# fix to constant noise level
|
64 |
-
return x, torch.zeros(x.shape[0], device=x.device).long()
|
65 |
-
|
66 |
-
|
67 |
-
class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel):
|
68 |
-
def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False):
|
69 |
-
super().__init__(noise_schedule_config=noise_schedule_config)
|
70 |
-
self.max_noise_level = max_noise_level
|
71 |
-
|
72 |
-
def forward(self, x, noise_level=None):
|
73 |
-
if noise_level is None:
|
74 |
-
noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
|
75 |
-
else:
|
76 |
-
assert isinstance(noise_level, torch.Tensor)
|
77 |
-
z = self.q_sample(x, noise_level)
|
78 |
-
return z, noise_level
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/tests/quantization/test_vq.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import torch
|
8 |
-
|
9 |
-
from audiocraft.quantization.vq import ResidualVectorQuantizer
|
10 |
-
|
11 |
-
|
12 |
-
class TestResidualVectorQuantizer:
|
13 |
-
|
14 |
-
def test_rvq(self):
|
15 |
-
x = torch.randn(1, 16, 2048)
|
16 |
-
vq = ResidualVectorQuantizer(n_q=8, dimension=16, bins=8)
|
17 |
-
res = vq(x, 1.)
|
18 |
-
assert res.x.shape == torch.Size([1, 16, 2048])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_structures.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
|
6 |
-
class InfinityType:
|
7 |
-
def __repr__(self) -> str:
|
8 |
-
return "Infinity"
|
9 |
-
|
10 |
-
def __hash__(self) -> int:
|
11 |
-
return hash(repr(self))
|
12 |
-
|
13 |
-
def __lt__(self, other: object) -> bool:
|
14 |
-
return False
|
15 |
-
|
16 |
-
def __le__(self, other: object) -> bool:
|
17 |
-
return False
|
18 |
-
|
19 |
-
def __eq__(self, other: object) -> bool:
|
20 |
-
return isinstance(other, self.__class__)
|
21 |
-
|
22 |
-
def __gt__(self, other: object) -> bool:
|
23 |
-
return True
|
24 |
-
|
25 |
-
def __ge__(self, other: object) -> bool:
|
26 |
-
return True
|
27 |
-
|
28 |
-
def __neg__(self: object) -> "NegativeInfinityType":
|
29 |
-
return NegativeInfinity
|
30 |
-
|
31 |
-
|
32 |
-
Infinity = InfinityType()
|
33 |
-
|
34 |
-
|
35 |
-
class NegativeInfinityType:
|
36 |
-
def __repr__(self) -> str:
|
37 |
-
return "-Infinity"
|
38 |
-
|
39 |
-
def __hash__(self) -> int:
|
40 |
-
return hash(repr(self))
|
41 |
-
|
42 |
-
def __lt__(self, other: object) -> bool:
|
43 |
-
return True
|
44 |
-
|
45 |
-
def __le__(self, other: object) -> bool:
|
46 |
-
return True
|
47 |
-
|
48 |
-
def __eq__(self, other: object) -> bool:
|
49 |
-
return isinstance(other, self.__class__)
|
50 |
-
|
51 |
-
def __gt__(self, other: object) -> bool:
|
52 |
-
return False
|
53 |
-
|
54 |
-
def __ge__(self, other: object) -> bool:
|
55 |
-
return False
|
56 |
-
|
57 |
-
def __neg__(self: object) -> InfinityType:
|
58 |
-
return Infinity
|
59 |
-
|
60 |
-
|
61 |
-
NegativeInfinity = NegativeInfinityType()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/adapters.py
DELETED
@@ -1,584 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
requests.adapters
|
3 |
-
~~~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
This module contains the transport adapters that Requests uses to define
|
6 |
-
and maintain connections.
|
7 |
-
"""
|
8 |
-
|
9 |
-
import os.path
|
10 |
-
import socket # noqa: F401
|
11 |
-
|
12 |
-
from pip._vendor.urllib3.exceptions import ClosedPoolError, ConnectTimeoutError
|
13 |
-
from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
|
14 |
-
from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader
|
15 |
-
from pip._vendor.urllib3.exceptions import (
|
16 |
-
LocationValueError,
|
17 |
-
MaxRetryError,
|
18 |
-
NewConnectionError,
|
19 |
-
ProtocolError,
|
20 |
-
)
|
21 |
-
from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
|
22 |
-
from pip._vendor.urllib3.exceptions import ReadTimeoutError, ResponseError
|
23 |
-
from pip._vendor.urllib3.exceptions import SSLError as _SSLError
|
24 |
-
from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
|
25 |
-
from pip._vendor.urllib3.response import HTTPResponse
|
26 |
-
from pip._vendor.urllib3.util import Timeout as TimeoutSauce
|
27 |
-
from pip._vendor.urllib3.util import parse_url
|
28 |
-
from pip._vendor.urllib3.util.retry import Retry
|
29 |
-
|
30 |
-
from .auth import _basic_auth_str
|
31 |
-
from .compat import basestring, urlparse
|
32 |
-
from .cookies import extract_cookies_to_jar
|
33 |
-
from .exceptions import (
|
34 |
-
ConnectionError,
|
35 |
-
ConnectTimeout,
|
36 |
-
InvalidHeader,
|
37 |
-
InvalidProxyURL,
|
38 |
-
InvalidSchema,
|
39 |
-
InvalidURL,
|
40 |
-
ProxyError,
|
41 |
-
ReadTimeout,
|
42 |
-
RetryError,
|
43 |
-
SSLError,
|
44 |
-
)
|
45 |
-
from .models import Response
|
46 |
-
from .structures import CaseInsensitiveDict
|
47 |
-
from .utils import (
|
48 |
-
DEFAULT_CA_BUNDLE_PATH,
|
49 |
-
extract_zipped_paths,
|
50 |
-
get_auth_from_url,
|
51 |
-
get_encoding_from_headers,
|
52 |
-
prepend_scheme_if_needed,
|
53 |
-
select_proxy,
|
54 |
-
urldefragauth,
|
55 |
-
)
|
56 |
-
|
57 |
-
try:
|
58 |
-
from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
|
59 |
-
except ImportError:
|
60 |
-
|
61 |
-
def SOCKSProxyManager(*args, **kwargs):
|
62 |
-
raise InvalidSchema("Missing dependencies for SOCKS support.")
|
63 |
-
|
64 |
-
|
65 |
-
DEFAULT_POOLBLOCK = False
|
66 |
-
DEFAULT_POOLSIZE = 10
|
67 |
-
DEFAULT_RETRIES = 0
|
68 |
-
DEFAULT_POOL_TIMEOUT = None
|
69 |
-
|
70 |
-
|
71 |
-
class BaseAdapter:
|
72 |
-
"""The Base Transport Adapter"""
|
73 |
-
|
74 |
-
def __init__(self):
|
75 |
-
super().__init__()
|
76 |
-
|
77 |
-
def send(
|
78 |
-
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
|
79 |
-
):
|
80 |
-
"""Sends PreparedRequest object. Returns Response object.
|
81 |
-
|
82 |
-
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
|
83 |
-
:param stream: (optional) Whether to stream the request content.
|
84 |
-
:param timeout: (optional) How long to wait for the server to send
|
85 |
-
data before giving up, as a float, or a :ref:`(connect timeout,
|
86 |
-
read timeout) <timeouts>` tuple.
|
87 |
-
:type timeout: float or tuple
|
88 |
-
:param verify: (optional) Either a boolean, in which case it controls whether we verify
|
89 |
-
the server's TLS certificate, or a string, in which case it must be a path
|
90 |
-
to a CA bundle to use
|
91 |
-
:param cert: (optional) Any user-provided SSL certificate to be trusted.
|
92 |
-
:param proxies: (optional) The proxies dictionary to apply to the request.
|
93 |
-
"""
|
94 |
-
raise NotImplementedError
|
95 |
-
|
96 |
-
def close(self):
|
97 |
-
"""Cleans up adapter specific items."""
|
98 |
-
raise NotImplementedError
|
99 |
-
|
100 |
-
|
101 |
-
class HTTPAdapter(BaseAdapter):
|
102 |
-
"""The built-in HTTP Adapter for urllib3.
|
103 |
-
|
104 |
-
Provides a general-case interface for Requests sessions to contact HTTP and
|
105 |
-
HTTPS urls by implementing the Transport Adapter interface. This class will
|
106 |
-
usually be created by the :class:`Session <Session>` class under the
|
107 |
-
covers.
|
108 |
-
|
109 |
-
:param pool_connections: The number of urllib3 connection pools to cache.
|
110 |
-
:param pool_maxsize: The maximum number of connections to save in the pool.
|
111 |
-
:param max_retries: The maximum number of retries each connection
|
112 |
-
should attempt. Note, this applies only to failed DNS lookups, socket
|
113 |
-
connections and connection timeouts, never to requests where data has
|
114 |
-
made it to the server. By default, Requests does not retry failed
|
115 |
-
connections. If you need granular control over the conditions under
|
116 |
-
which we retry a request, import urllib3's ``Retry`` class and pass
|
117 |
-
that instead.
|
118 |
-
:param pool_block: Whether the connection pool should block for connections.
|
119 |
-
|
120 |
-
Usage::
|
121 |
-
|
122 |
-
>>> import requests
|
123 |
-
>>> s = requests.Session()
|
124 |
-
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
|
125 |
-
>>> s.mount('http://', a)
|
126 |
-
"""
|
127 |
-
|
128 |
-
__attrs__ = [
|
129 |
-
"max_retries",
|
130 |
-
"config",
|
131 |
-
"_pool_connections",
|
132 |
-
"_pool_maxsize",
|
133 |
-
"_pool_block",
|
134 |
-
]
|
135 |
-
|
136 |
-
def __init__(
|
137 |
-
self,
|
138 |
-
pool_connections=DEFAULT_POOLSIZE,
|
139 |
-
pool_maxsize=DEFAULT_POOLSIZE,
|
140 |
-
max_retries=DEFAULT_RETRIES,
|
141 |
-
pool_block=DEFAULT_POOLBLOCK,
|
142 |
-
):
|
143 |
-
if max_retries == DEFAULT_RETRIES:
|
144 |
-
self.max_retries = Retry(0, read=False)
|
145 |
-
else:
|
146 |
-
self.max_retries = Retry.from_int(max_retries)
|
147 |
-
self.config = {}
|
148 |
-
self.proxy_manager = {}
|
149 |
-
|
150 |
-
super().__init__()
|
151 |
-
|
152 |
-
self._pool_connections = pool_connections
|
153 |
-
self._pool_maxsize = pool_maxsize
|
154 |
-
self._pool_block = pool_block
|
155 |
-
|
156 |
-
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
|
157 |
-
|
158 |
-
def __getstate__(self):
|
159 |
-
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
|
160 |
-
|
161 |
-
def __setstate__(self, state):
|
162 |
-
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
|
163 |
-
# self.poolmanager uses a lambda function, which isn't pickleable.
|
164 |
-
self.proxy_manager = {}
|
165 |
-
self.config = {}
|
166 |
-
|
167 |
-
for attr, value in state.items():
|
168 |
-
setattr(self, attr, value)
|
169 |
-
|
170 |
-
self.init_poolmanager(
|
171 |
-
self._pool_connections, self._pool_maxsize, block=self._pool_block
|
172 |
-
)
|
173 |
-
|
174 |
-
def init_poolmanager(
|
175 |
-
self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs
|
176 |
-
):
|
177 |
-
"""Initializes a urllib3 PoolManager.
|
178 |
-
|
179 |
-
This method should not be called from user code, and is only
|
180 |
-
exposed for use when subclassing the
|
181 |
-
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
182 |
-
|
183 |
-
:param connections: The number of urllib3 connection pools to cache.
|
184 |
-
:param maxsize: The maximum number of connections to save in the pool.
|
185 |
-
:param block: Block when no free connections are available.
|
186 |
-
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
|
187 |
-
"""
|
188 |
-
# save these values for pickling
|
189 |
-
self._pool_connections = connections
|
190 |
-
self._pool_maxsize = maxsize
|
191 |
-
self._pool_block = block
|
192 |
-
|
193 |
-
self.poolmanager = PoolManager(
|
194 |
-
num_pools=connections,
|
195 |
-
maxsize=maxsize,
|
196 |
-
block=block,
|
197 |
-
strict=True,
|
198 |
-
**pool_kwargs,
|
199 |
-
)
|
200 |
-
|
201 |
-
def proxy_manager_for(self, proxy, **proxy_kwargs):
|
202 |
-
"""Return urllib3 ProxyManager for the given proxy.
|
203 |
-
|
204 |
-
This method should not be called from user code, and is only
|
205 |
-
exposed for use when subclassing the
|
206 |
-
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
207 |
-
|
208 |
-
:param proxy: The proxy to return a urllib3 ProxyManager for.
|
209 |
-
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
|
210 |
-
:returns: ProxyManager
|
211 |
-
:rtype: urllib3.ProxyManager
|
212 |
-
"""
|
213 |
-
if proxy in self.proxy_manager:
|
214 |
-
manager = self.proxy_manager[proxy]
|
215 |
-
elif proxy.lower().startswith("socks"):
|
216 |
-
username, password = get_auth_from_url(proxy)
|
217 |
-
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
|
218 |
-
proxy,
|
219 |
-
username=username,
|
220 |
-
password=password,
|
221 |
-
num_pools=self._pool_connections,
|
222 |
-
maxsize=self._pool_maxsize,
|
223 |
-
block=self._pool_block,
|
224 |
-
**proxy_kwargs,
|
225 |
-
)
|
226 |
-
else:
|
227 |
-
proxy_headers = self.proxy_headers(proxy)
|
228 |
-
manager = self.proxy_manager[proxy] = proxy_from_url(
|
229 |
-
proxy,
|
230 |
-
proxy_headers=proxy_headers,
|
231 |
-
num_pools=self._pool_connections,
|
232 |
-
maxsize=self._pool_maxsize,
|
233 |
-
block=self._pool_block,
|
234 |
-
**proxy_kwargs,
|
235 |
-
)
|
236 |
-
|
237 |
-
return manager
|
238 |
-
|
239 |
-
def cert_verify(self, conn, url, verify, cert):
|
240 |
-
"""Verify a SSL certificate. This method should not be called from user
|
241 |
-
code, and is only exposed for use when subclassing the
|
242 |
-
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
243 |
-
|
244 |
-
:param conn: The urllib3 connection object associated with the cert.
|
245 |
-
:param url: The requested URL.
|
246 |
-
:param verify: Either a boolean, in which case it controls whether we verify
|
247 |
-
the server's TLS certificate, or a string, in which case it must be a path
|
248 |
-
to a CA bundle to use
|
249 |
-
:param cert: The SSL certificate to verify.
|
250 |
-
"""
|
251 |
-
if url.lower().startswith("https") and verify:
|
252 |
-
|
253 |
-
cert_loc = None
|
254 |
-
|
255 |
-
# Allow self-specified cert location.
|
256 |
-
if verify is not True:
|
257 |
-
cert_loc = verify
|
258 |
-
|
259 |
-
if not cert_loc:
|
260 |
-
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
|
261 |
-
|
262 |
-
if not cert_loc or not os.path.exists(cert_loc):
|
263 |
-
raise OSError(
|
264 |
-
f"Could not find a suitable TLS CA certificate bundle, "
|
265 |
-
f"invalid path: {cert_loc}"
|
266 |
-
)
|
267 |
-
|
268 |
-
conn.cert_reqs = "CERT_REQUIRED"
|
269 |
-
|
270 |
-
if not os.path.isdir(cert_loc):
|
271 |
-
conn.ca_certs = cert_loc
|
272 |
-
else:
|
273 |
-
conn.ca_cert_dir = cert_loc
|
274 |
-
else:
|
275 |
-
conn.cert_reqs = "CERT_NONE"
|
276 |
-
conn.ca_certs = None
|
277 |
-
conn.ca_cert_dir = None
|
278 |
-
|
279 |
-
if cert:
|
280 |
-
if not isinstance(cert, basestring):
|
281 |
-
conn.cert_file = cert[0]
|
282 |
-
conn.key_file = cert[1]
|
283 |
-
else:
|
284 |
-
conn.cert_file = cert
|
285 |
-
conn.key_file = None
|
286 |
-
if conn.cert_file and not os.path.exists(conn.cert_file):
|
287 |
-
raise OSError(
|
288 |
-
f"Could not find the TLS certificate file, "
|
289 |
-
f"invalid path: {conn.cert_file}"
|
290 |
-
)
|
291 |
-
if conn.key_file and not os.path.exists(conn.key_file):
|
292 |
-
raise OSError(
|
293 |
-
f"Could not find the TLS key file, invalid path: {conn.key_file}"
|
294 |
-
)
|
295 |
-
|
296 |
-
def build_response(self, req, resp):
|
297 |
-
"""Builds a :class:`Response <requests.Response>` object from a urllib3
|
298 |
-
response. This should not be called from user code, and is only exposed
|
299 |
-
for use when subclassing the
|
300 |
-
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
|
301 |
-
|
302 |
-
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
|
303 |
-
:param resp: The urllib3 response object.
|
304 |
-
:rtype: requests.Response
|
305 |
-
"""
|
306 |
-
response = Response()
|
307 |
-
|
308 |
-
# Fallback to None if there's no status_code, for whatever reason.
|
309 |
-
response.status_code = getattr(resp, "status", None)
|
310 |
-
|
311 |
-
# Make headers case-insensitive.
|
312 |
-
response.headers = CaseInsensitiveDict(getattr(resp, "headers", {}))
|
313 |
-
|
314 |
-
# Set encoding.
|
315 |
-
response.encoding = get_encoding_from_headers(response.headers)
|
316 |
-
response.raw = resp
|
317 |
-
response.reason = response.raw.reason
|
318 |
-
|
319 |
-
if isinstance(req.url, bytes):
|
320 |
-
response.url = req.url.decode("utf-8")
|
321 |
-
else:
|
322 |
-
response.url = req.url
|
323 |
-
|
324 |
-
# Add new cookies from the server.
|
325 |
-
extract_cookies_to_jar(response.cookies, req, resp)
|
326 |
-
|
327 |
-
# Give the Response some context.
|
328 |
-
response.request = req
|
329 |
-
response.connection = self
|
330 |
-
|
331 |
-
return response
|
332 |
-
|
333 |
-
def get_connection(self, url, proxies=None):
|
334 |
-
"""Returns a urllib3 connection for the given URL. This should not be
|
335 |
-
called from user code, and is only exposed for use when subclassing the
|
336 |
-
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
337 |
-
|
338 |
-
:param url: The URL to connect to.
|
339 |
-
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
|
340 |
-
:rtype: urllib3.ConnectionPool
|
341 |
-
"""
|
342 |
-
proxy = select_proxy(url, proxies)
|
343 |
-
|
344 |
-
if proxy:
|
345 |
-
proxy = prepend_scheme_if_needed(proxy, "http")
|
346 |
-
proxy_url = parse_url(proxy)
|
347 |
-
if not proxy_url.host:
|
348 |
-
raise InvalidProxyURL(
|
349 |
-
"Please check proxy URL. It is malformed "
|
350 |
-
"and could be missing the host."
|
351 |
-
)
|
352 |
-
proxy_manager = self.proxy_manager_for(proxy)
|
353 |
-
conn = proxy_manager.connection_from_url(url)
|
354 |
-
else:
|
355 |
-
# Only scheme should be lower case
|
356 |
-
parsed = urlparse(url)
|
357 |
-
url = parsed.geturl()
|
358 |
-
conn = self.poolmanager.connection_from_url(url)
|
359 |
-
|
360 |
-
return conn
|
361 |
-
|
362 |
-
def close(self):
|
363 |
-
"""Disposes of any internal state.
|
364 |
-
|
365 |
-
Currently, this closes the PoolManager and any active ProxyManager,
|
366 |
-
which closes any pooled connections.
|
367 |
-
"""
|
368 |
-
self.poolmanager.clear()
|
369 |
-
for proxy in self.proxy_manager.values():
|
370 |
-
proxy.clear()
|
371 |
-
|
372 |
-
def request_url(self, request, proxies):
|
373 |
-
"""Obtain the url to use when making the final request.
|
374 |
-
|
375 |
-
If the message is being sent through a HTTP proxy, the full URL has to
|
376 |
-
be used. Otherwise, we should only use the path portion of the URL.
|
377 |
-
|
378 |
-
This should not be called from user code, and is only exposed for use
|
379 |
-
when subclassing the
|
380 |
-
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
381 |
-
|
382 |
-
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
|
383 |
-
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
|
384 |
-
:rtype: str
|
385 |
-
"""
|
386 |
-
proxy = select_proxy(request.url, proxies)
|
387 |
-
scheme = urlparse(request.url).scheme
|
388 |
-
|
389 |
-
is_proxied_http_request = proxy and scheme != "https"
|
390 |
-
using_socks_proxy = False
|
391 |
-
if proxy:
|
392 |
-
proxy_scheme = urlparse(proxy).scheme.lower()
|
393 |
-
using_socks_proxy = proxy_scheme.startswith("socks")
|
394 |
-
|
395 |
-
url = request.path_url
|
396 |
-
if is_proxied_http_request and not using_socks_proxy:
|
397 |
-
url = urldefragauth(request.url)
|
398 |
-
|
399 |
-
return url
|
400 |
-
|
401 |
-
def add_headers(self, request, **kwargs):
|
402 |
-
"""Add any headers needed by the connection. As of v2.0 this does
|
403 |
-
nothing by default, but is left for overriding by users that subclass
|
404 |
-
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
405 |
-
|
406 |
-
This should not be called from user code, and is only exposed for use
|
407 |
-
when subclassing the
|
408 |
-
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
409 |
-
|
410 |
-
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
|
411 |
-
:param kwargs: The keyword arguments from the call to send().
|
412 |
-
"""
|
413 |
-
pass
|
414 |
-
|
415 |
-
def proxy_headers(self, proxy):
|
416 |
-
"""Returns a dictionary of the headers to add to any request sent
|
417 |
-
through a proxy. This works with urllib3 magic to ensure that they are
|
418 |
-
correctly sent to the proxy, rather than in a tunnelled request if
|
419 |
-
CONNECT is being used.
|
420 |
-
|
421 |
-
This should not be called from user code, and is only exposed for use
|
422 |
-
when subclassing the
|
423 |
-
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
424 |
-
|
425 |
-
:param proxy: The url of the proxy being used for this request.
|
426 |
-
:rtype: dict
|
427 |
-
"""
|
428 |
-
headers = {}
|
429 |
-
username, password = get_auth_from_url(proxy)
|
430 |
-
|
431 |
-
if username:
|
432 |
-
headers["Proxy-Authorization"] = _basic_auth_str(username, password)
|
433 |
-
|
434 |
-
return headers
|
435 |
-
|
436 |
-
def send(
|
437 |
-
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
|
438 |
-
):
|
439 |
-
"""Sends PreparedRequest object. Returns Response object.
|
440 |
-
|
441 |
-
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
|
442 |
-
:param stream: (optional) Whether to stream the request content.
|
443 |
-
:param timeout: (optional) How long to wait for the server to send
|
444 |
-
data before giving up, as a float, or a :ref:`(connect timeout,
|
445 |
-
read timeout) <timeouts>` tuple.
|
446 |
-
:type timeout: float or tuple or urllib3 Timeout object
|
447 |
-
:param verify: (optional) Either a boolean, in which case it controls whether
|
448 |
-
we verify the server's TLS certificate, or a string, in which case it
|
449 |
-
must be a path to a CA bundle to use
|
450 |
-
:param cert: (optional) Any user-provided SSL certificate to be trusted.
|
451 |
-
:param proxies: (optional) The proxies dictionary to apply to the request.
|
452 |
-
:rtype: requests.Response
|
453 |
-
"""
|
454 |
-
|
455 |
-
try:
|
456 |
-
conn = self.get_connection(request.url, proxies)
|
457 |
-
except LocationValueError as e:
|
458 |
-
raise InvalidURL(e, request=request)
|
459 |
-
|
460 |
-
self.cert_verify(conn, request.url, verify, cert)
|
461 |
-
url = self.request_url(request, proxies)
|
462 |
-
self.add_headers(
|
463 |
-
request,
|
464 |
-
stream=stream,
|
465 |
-
timeout=timeout,
|
466 |
-
verify=verify,
|
467 |
-
cert=cert,
|
468 |
-
proxies=proxies,
|
469 |
-
)
|
470 |
-
|
471 |
-
chunked = not (request.body is None or "Content-Length" in request.headers)
|
472 |
-
|
473 |
-
if isinstance(timeout, tuple):
|
474 |
-
try:
|
475 |
-
connect, read = timeout
|
476 |
-
timeout = TimeoutSauce(connect=connect, read=read)
|
477 |
-
except ValueError:
|
478 |
-
raise ValueError(
|
479 |
-
f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, "
|
480 |
-
f"or a single float to set both timeouts to the same value."
|
481 |
-
)
|
482 |
-
elif isinstance(timeout, TimeoutSauce):
|
483 |
-
pass
|
484 |
-
else:
|
485 |
-
timeout = TimeoutSauce(connect=timeout, read=timeout)
|
486 |
-
|
487 |
-
try:
|
488 |
-
if not chunked:
|
489 |
-
resp = conn.urlopen(
|
490 |
-
method=request.method,
|
491 |
-
url=url,
|
492 |
-
body=request.body,
|
493 |
-
headers=request.headers,
|
494 |
-
redirect=False,
|
495 |
-
assert_same_host=False,
|
496 |
-
preload_content=False,
|
497 |
-
decode_content=False,
|
498 |
-
retries=self.max_retries,
|
499 |
-
timeout=timeout,
|
500 |
-
)
|
501 |
-
|
502 |
-
# Send the request.
|
503 |
-
else:
|
504 |
-
if hasattr(conn, "proxy_pool"):
|
505 |
-
conn = conn.proxy_pool
|
506 |
-
|
507 |
-
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
|
508 |
-
|
509 |
-
try:
|
510 |
-
skip_host = "Host" in request.headers
|
511 |
-
low_conn.putrequest(
|
512 |
-
request.method,
|
513 |
-
url,
|
514 |
-
skip_accept_encoding=True,
|
515 |
-
skip_host=skip_host,
|
516 |
-
)
|
517 |
-
|
518 |
-
for header, value in request.headers.items():
|
519 |
-
low_conn.putheader(header, value)
|
520 |
-
|
521 |
-
low_conn.endheaders()
|
522 |
-
|
523 |
-
for i in request.body:
|
524 |
-
low_conn.send(hex(len(i))[2:].encode("utf-8"))
|
525 |
-
low_conn.send(b"\r\n")
|
526 |
-
low_conn.send(i)
|
527 |
-
low_conn.send(b"\r\n")
|
528 |
-
low_conn.send(b"0\r\n\r\n")
|
529 |
-
|
530 |
-
# Receive the response from the server
|
531 |
-
r = low_conn.getresponse()
|
532 |
-
|
533 |
-
resp = HTTPResponse.from_httplib(
|
534 |
-
r,
|
535 |
-
pool=conn,
|
536 |
-
connection=low_conn,
|
537 |
-
preload_content=False,
|
538 |
-
decode_content=False,
|
539 |
-
)
|
540 |
-
except Exception:
|
541 |
-
# If we hit any problems here, clean up the connection.
|
542 |
-
# Then, raise so that we can handle the actual exception.
|
543 |
-
low_conn.close()
|
544 |
-
raise
|
545 |
-
|
546 |
-
except (ProtocolError, OSError) as err:
|
547 |
-
raise ConnectionError(err, request=request)
|
548 |
-
|
549 |
-
except MaxRetryError as e:
|
550 |
-
if isinstance(e.reason, ConnectTimeoutError):
|
551 |
-
# TODO: Remove this in 3.0.0: see #2811
|
552 |
-
if not isinstance(e.reason, NewConnectionError):
|
553 |
-
raise ConnectTimeout(e, request=request)
|
554 |
-
|
555 |
-
if isinstance(e.reason, ResponseError):
|
556 |
-
raise RetryError(e, request=request)
|
557 |
-
|
558 |
-
if isinstance(e.reason, _ProxyError):
|
559 |
-
raise ProxyError(e, request=request)
|
560 |
-
|
561 |
-
if isinstance(e.reason, _SSLError):
|
562 |
-
# This branch is for urllib3 v1.22 and later.
|
563 |
-
raise SSLError(e, request=request)
|
564 |
-
|
565 |
-
raise ConnectionError(e, request=request)
|
566 |
-
|
567 |
-
except ClosedPoolError as e:
|
568 |
-
raise ConnectionError(e, request=request)
|
569 |
-
|
570 |
-
except _ProxyError as e:
|
571 |
-
raise ProxyError(e)
|
572 |
-
|
573 |
-
except (_SSLError, _HTTPError) as e:
|
574 |
-
if isinstance(e, _SSLError):
|
575 |
-
# This branch is for urllib3 versions earlier than v1.22
|
576 |
-
raise SSLError(e, request=request)
|
577 |
-
elif isinstance(e, ReadTimeoutError):
|
578 |
-
raise ReadTimeout(e, request=request)
|
579 |
-
elif isinstance(e, _InvalidHeader):
|
580 |
-
raise InvalidHeader(e, request=request)
|
581 |
-
else:
|
582 |
-
raise
|
583 |
-
|
584 |
-
return self.build_response(request, resp)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/msvc9compiler.py
DELETED
@@ -1,832 +0,0 @@
|
|
1 |
-
"""distutils.msvc9compiler
|
2 |
-
|
3 |
-
Contains MSVCCompiler, an implementation of the abstract CCompiler class
|
4 |
-
for the Microsoft Visual Studio 2008.
|
5 |
-
|
6 |
-
The module is compatible with VS 2005 and VS 2008. You can find legacy support
|
7 |
-
for older versions of VS in distutils.msvccompiler.
|
8 |
-
"""
|
9 |
-
|
10 |
-
# Written by Perry Stoll
|
11 |
-
# hacked by Robin Becker and Thomas Heller to do a better job of
|
12 |
-
# finding DevStudio (through the registry)
|
13 |
-
# ported to VS2005 and VS 2008 by Christian Heimes
|
14 |
-
|
15 |
-
import os
|
16 |
-
import subprocess
|
17 |
-
import sys
|
18 |
-
import re
|
19 |
-
import warnings
|
20 |
-
|
21 |
-
from distutils.errors import (
|
22 |
-
DistutilsExecError,
|
23 |
-
DistutilsPlatformError,
|
24 |
-
CompileError,
|
25 |
-
LibError,
|
26 |
-
LinkError,
|
27 |
-
)
|
28 |
-
from distutils.ccompiler import CCompiler, gen_lib_options
|
29 |
-
from distutils import log
|
30 |
-
from distutils.util import get_platform
|
31 |
-
|
32 |
-
import winreg
|
33 |
-
|
34 |
-
warnings.warn(
|
35 |
-
"msvc9compiler is deprecated and slated to be removed "
|
36 |
-
"in the future. Please discontinue use or file an issue "
|
37 |
-
"with pypa/distutils describing your use case.",
|
38 |
-
DeprecationWarning,
|
39 |
-
)
|
40 |
-
|
41 |
-
RegOpenKeyEx = winreg.OpenKeyEx
|
42 |
-
RegEnumKey = winreg.EnumKey
|
43 |
-
RegEnumValue = winreg.EnumValue
|
44 |
-
RegError = winreg.error
|
45 |
-
|
46 |
-
HKEYS = (
|
47 |
-
winreg.HKEY_USERS,
|
48 |
-
winreg.HKEY_CURRENT_USER,
|
49 |
-
winreg.HKEY_LOCAL_MACHINE,
|
50 |
-
winreg.HKEY_CLASSES_ROOT,
|
51 |
-
)
|
52 |
-
|
53 |
-
NATIVE_WIN64 = sys.platform == 'win32' and sys.maxsize > 2**32
|
54 |
-
if NATIVE_WIN64:
|
55 |
-
# Visual C++ is a 32-bit application, so we need to look in
|
56 |
-
# the corresponding registry branch, if we're running a
|
57 |
-
# 64-bit Python on Win64
|
58 |
-
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
|
59 |
-
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
|
60 |
-
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
|
61 |
-
else:
|
62 |
-
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
|
63 |
-
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
|
64 |
-
NET_BASE = r"Software\Microsoft\.NETFramework"
|
65 |
-
|
66 |
-
# A map keyed by get_platform() return values to values accepted by
|
67 |
-
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
|
68 |
-
# the param to cross-compile on x86 targeting amd64.)
|
69 |
-
PLAT_TO_VCVARS = {
|
70 |
-
'win32': 'x86',
|
71 |
-
'win-amd64': 'amd64',
|
72 |
-
}
|
73 |
-
|
74 |
-
|
75 |
-
class Reg:
|
76 |
-
"""Helper class to read values from the registry"""
|
77 |
-
|
78 |
-
def get_value(cls, path, key):
|
79 |
-
for base in HKEYS:
|
80 |
-
d = cls.read_values(base, path)
|
81 |
-
if d and key in d:
|
82 |
-
return d[key]
|
83 |
-
raise KeyError(key)
|
84 |
-
|
85 |
-
get_value = classmethod(get_value)
|
86 |
-
|
87 |
-
def read_keys(cls, base, key):
|
88 |
-
"""Return list of registry keys."""
|
89 |
-
try:
|
90 |
-
handle = RegOpenKeyEx(base, key)
|
91 |
-
except RegError:
|
92 |
-
return None
|
93 |
-
L = []
|
94 |
-
i = 0
|
95 |
-
while True:
|
96 |
-
try:
|
97 |
-
k = RegEnumKey(handle, i)
|
98 |
-
except RegError:
|
99 |
-
break
|
100 |
-
L.append(k)
|
101 |
-
i += 1
|
102 |
-
return L
|
103 |
-
|
104 |
-
read_keys = classmethod(read_keys)
|
105 |
-
|
106 |
-
def read_values(cls, base, key):
|
107 |
-
"""Return dict of registry keys and values.
|
108 |
-
|
109 |
-
All names are converted to lowercase.
|
110 |
-
"""
|
111 |
-
try:
|
112 |
-
handle = RegOpenKeyEx(base, key)
|
113 |
-
except RegError:
|
114 |
-
return None
|
115 |
-
d = {}
|
116 |
-
i = 0
|
117 |
-
while True:
|
118 |
-
try:
|
119 |
-
name, value, type = RegEnumValue(handle, i)
|
120 |
-
except RegError:
|
121 |
-
break
|
122 |
-
name = name.lower()
|
123 |
-
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
|
124 |
-
i += 1
|
125 |
-
return d
|
126 |
-
|
127 |
-
read_values = classmethod(read_values)
|
128 |
-
|
129 |
-
def convert_mbcs(s):
|
130 |
-
dec = getattr(s, "decode", None)
|
131 |
-
if dec is not None:
|
132 |
-
try:
|
133 |
-
s = dec("mbcs")
|
134 |
-
except UnicodeError:
|
135 |
-
pass
|
136 |
-
return s
|
137 |
-
|
138 |
-
convert_mbcs = staticmethod(convert_mbcs)
|
139 |
-
|
140 |
-
|
141 |
-
class MacroExpander:
|
142 |
-
def __init__(self, version):
|
143 |
-
self.macros = {}
|
144 |
-
self.vsbase = VS_BASE % version
|
145 |
-
self.load_macros(version)
|
146 |
-
|
147 |
-
def set_macro(self, macro, path, key):
|
148 |
-
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
|
149 |
-
|
150 |
-
def load_macros(self, version):
|
151 |
-
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
|
152 |
-
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
|
153 |
-
self.set_macro("FrameworkDir", NET_BASE, "installroot")
|
154 |
-
try:
|
155 |
-
if version >= 8.0:
|
156 |
-
self.set_macro("FrameworkSDKDir", NET_BASE, "sdkinstallrootv2.0")
|
157 |
-
else:
|
158 |
-
raise KeyError("sdkinstallrootv2.0")
|
159 |
-
except KeyError:
|
160 |
-
raise DistutilsPlatformError(
|
161 |
-
"""Python was built with Visual Studio 2008;
|
162 |
-
extensions must be built with a compiler than can generate compatible binaries.
|
163 |
-
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
|
164 |
-
you can try compiling with MingW32, by passing "-c mingw32" to setup.py."""
|
165 |
-
)
|
166 |
-
|
167 |
-
if version >= 9.0:
|
168 |
-
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
|
169 |
-
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
|
170 |
-
else:
|
171 |
-
p = r"Software\Microsoft\NET Framework Setup\Product"
|
172 |
-
for base in HKEYS:
|
173 |
-
try:
|
174 |
-
h = RegOpenKeyEx(base, p)
|
175 |
-
except RegError:
|
176 |
-
continue
|
177 |
-
key = RegEnumKey(h, 0)
|
178 |
-
d = Reg.get_value(base, r"{}\{}".format(p, key))
|
179 |
-
self.macros["$(FrameworkVersion)"] = d["version"]
|
180 |
-
|
181 |
-
def sub(self, s):
|
182 |
-
for k, v in self.macros.items():
|
183 |
-
s = s.replace(k, v)
|
184 |
-
return s
|
185 |
-
|
186 |
-
|
187 |
-
def get_build_version():
|
188 |
-
"""Return the version of MSVC that was used to build Python.
|
189 |
-
|
190 |
-
For Python 2.3 and up, the version number is included in
|
191 |
-
sys.version. For earlier versions, assume the compiler is MSVC 6.
|
192 |
-
"""
|
193 |
-
prefix = "MSC v."
|
194 |
-
i = sys.version.find(prefix)
|
195 |
-
if i == -1:
|
196 |
-
return 6
|
197 |
-
i = i + len(prefix)
|
198 |
-
s, rest = sys.version[i:].split(" ", 1)
|
199 |
-
majorVersion = int(s[:-2]) - 6
|
200 |
-
if majorVersion >= 13:
|
201 |
-
# v13 was skipped and should be v14
|
202 |
-
majorVersion += 1
|
203 |
-
minorVersion = int(s[2:3]) / 10.0
|
204 |
-
# I don't think paths are affected by minor version in version 6
|
205 |
-
if majorVersion == 6:
|
206 |
-
minorVersion = 0
|
207 |
-
if majorVersion >= 6:
|
208 |
-
return majorVersion + minorVersion
|
209 |
-
# else we don't know what version of the compiler this is
|
210 |
-
return None
|
211 |
-
|
212 |
-
|
213 |
-
def normalize_and_reduce_paths(paths):
|
214 |
-
"""Return a list of normalized paths with duplicates removed.
|
215 |
-
|
216 |
-
The current order of paths is maintained.
|
217 |
-
"""
|
218 |
-
# Paths are normalized so things like: /a and /a/ aren't both preserved.
|
219 |
-
reduced_paths = []
|
220 |
-
for p in paths:
|
221 |
-
np = os.path.normpath(p)
|
222 |
-
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
|
223 |
-
if np not in reduced_paths:
|
224 |
-
reduced_paths.append(np)
|
225 |
-
return reduced_paths
|
226 |
-
|
227 |
-
|
228 |
-
def removeDuplicates(variable):
|
229 |
-
"""Remove duplicate values of an environment variable."""
|
230 |
-
oldList = variable.split(os.pathsep)
|
231 |
-
newList = []
|
232 |
-
for i in oldList:
|
233 |
-
if i not in newList:
|
234 |
-
newList.append(i)
|
235 |
-
newVariable = os.pathsep.join(newList)
|
236 |
-
return newVariable
|
237 |
-
|
238 |
-
|
239 |
-
def find_vcvarsall(version):
|
240 |
-
"""Find the vcvarsall.bat file
|
241 |
-
|
242 |
-
At first it tries to find the productdir of VS 2008 in the registry. If
|
243 |
-
that fails it falls back to the VS90COMNTOOLS env var.
|
244 |
-
"""
|
245 |
-
vsbase = VS_BASE % version
|
246 |
-
try:
|
247 |
-
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, "productdir")
|
248 |
-
except KeyError:
|
249 |
-
log.debug("Unable to find productdir in registry")
|
250 |
-
productdir = None
|
251 |
-
|
252 |
-
if not productdir or not os.path.isdir(productdir):
|
253 |
-
toolskey = "VS%0.f0COMNTOOLS" % version
|
254 |
-
toolsdir = os.environ.get(toolskey, None)
|
255 |
-
|
256 |
-
if toolsdir and os.path.isdir(toolsdir):
|
257 |
-
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
|
258 |
-
productdir = os.path.abspath(productdir)
|
259 |
-
if not os.path.isdir(productdir):
|
260 |
-
log.debug("%s is not a valid directory" % productdir)
|
261 |
-
return None
|
262 |
-
else:
|
263 |
-
log.debug("Env var %s is not set or invalid" % toolskey)
|
264 |
-
if not productdir:
|
265 |
-
log.debug("No productdir found")
|
266 |
-
return None
|
267 |
-
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
|
268 |
-
if os.path.isfile(vcvarsall):
|
269 |
-
return vcvarsall
|
270 |
-
log.debug("Unable to find vcvarsall.bat")
|
271 |
-
return None
|
272 |
-
|
273 |
-
|
274 |
-
def query_vcvarsall(version, arch="x86"):
|
275 |
-
"""Launch vcvarsall.bat and read the settings from its environment"""
|
276 |
-
vcvarsall = find_vcvarsall(version)
|
277 |
-
interesting = {"include", "lib", "libpath", "path"}
|
278 |
-
result = {}
|
279 |
-
|
280 |
-
if vcvarsall is None:
|
281 |
-
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
|
282 |
-
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
|
283 |
-
popen = subprocess.Popen(
|
284 |
-
'"{}" {} & set'.format(vcvarsall, arch),
|
285 |
-
stdout=subprocess.PIPE,
|
286 |
-
stderr=subprocess.PIPE,
|
287 |
-
)
|
288 |
-
try:
|
289 |
-
stdout, stderr = popen.communicate()
|
290 |
-
if popen.wait() != 0:
|
291 |
-
raise DistutilsPlatformError(stderr.decode("mbcs"))
|
292 |
-
|
293 |
-
stdout = stdout.decode("mbcs")
|
294 |
-
for line in stdout.split("\n"):
|
295 |
-
line = Reg.convert_mbcs(line)
|
296 |
-
if '=' not in line:
|
297 |
-
continue
|
298 |
-
line = line.strip()
|
299 |
-
key, value = line.split('=', 1)
|
300 |
-
key = key.lower()
|
301 |
-
if key in interesting:
|
302 |
-
if value.endswith(os.pathsep):
|
303 |
-
value = value[:-1]
|
304 |
-
result[key] = removeDuplicates(value)
|
305 |
-
|
306 |
-
finally:
|
307 |
-
popen.stdout.close()
|
308 |
-
popen.stderr.close()
|
309 |
-
|
310 |
-
if len(result) != len(interesting):
|
311 |
-
raise ValueError(str(list(result.keys())))
|
312 |
-
|
313 |
-
return result
|
314 |
-
|
315 |
-
|
316 |
-
# More globals
|
317 |
-
VERSION = get_build_version()
|
318 |
-
# MACROS = MacroExpander(VERSION)
|
319 |
-
|
320 |
-
|
321 |
-
class MSVCCompiler(CCompiler):
|
322 |
-
"""Concrete class that implements an interface to Microsoft Visual C++,
|
323 |
-
as defined by the CCompiler abstract class."""
|
324 |
-
|
325 |
-
compiler_type = 'msvc'
|
326 |
-
|
327 |
-
# Just set this so CCompiler's constructor doesn't barf. We currently
|
328 |
-
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
|
329 |
-
# as it really isn't necessary for this sort of single-compiler class.
|
330 |
-
# Would be nice to have a consistent interface with UnixCCompiler,
|
331 |
-
# though, so it's worth thinking about.
|
332 |
-
executables = {}
|
333 |
-
|
334 |
-
# Private class data (need to distinguish C from C++ source for compiler)
|
335 |
-
_c_extensions = ['.c']
|
336 |
-
_cpp_extensions = ['.cc', '.cpp', '.cxx']
|
337 |
-
_rc_extensions = ['.rc']
|
338 |
-
_mc_extensions = ['.mc']
|
339 |
-
|
340 |
-
# Needed for the filename generation methods provided by the
|
341 |
-
# base class, CCompiler.
|
342 |
-
src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions
|
343 |
-
res_extension = '.res'
|
344 |
-
obj_extension = '.obj'
|
345 |
-
static_lib_extension = '.lib'
|
346 |
-
shared_lib_extension = '.dll'
|
347 |
-
static_lib_format = shared_lib_format = '%s%s'
|
348 |
-
exe_extension = '.exe'
|
349 |
-
|
350 |
-
def __init__(self, verbose=0, dry_run=0, force=0):
|
351 |
-
super().__init__(verbose, dry_run, force)
|
352 |
-
self.__version = VERSION
|
353 |
-
self.__root = r"Software\Microsoft\VisualStudio"
|
354 |
-
# self.__macros = MACROS
|
355 |
-
self.__paths = []
|
356 |
-
# target platform (.plat_name is consistent with 'bdist')
|
357 |
-
self.plat_name = None
|
358 |
-
self.__arch = None # deprecated name
|
359 |
-
self.initialized = False
|
360 |
-
|
361 |
-
def initialize(self, plat_name=None): # noqa: C901
|
362 |
-
# multi-init means we would need to check platform same each time...
|
363 |
-
assert not self.initialized, "don't init multiple times"
|
364 |
-
if self.__version < 8.0:
|
365 |
-
raise DistutilsPlatformError(
|
366 |
-
"VC %0.1f is not supported by this module" % self.__version
|
367 |
-
)
|
368 |
-
if plat_name is None:
|
369 |
-
plat_name = get_platform()
|
370 |
-
# sanity check for platforms to prevent obscure errors later.
|
371 |
-
ok_plats = 'win32', 'win-amd64'
|
372 |
-
if plat_name not in ok_plats:
|
373 |
-
raise DistutilsPlatformError(
|
374 |
-
"--plat-name must be one of {}".format(ok_plats)
|
375 |
-
)
|
376 |
-
|
377 |
-
if (
|
378 |
-
"DISTUTILS_USE_SDK" in os.environ
|
379 |
-
and "MSSdk" in os.environ
|
380 |
-
and self.find_exe("cl.exe")
|
381 |
-
):
|
382 |
-
# Assume that the SDK set up everything alright; don't try to be
|
383 |
-
# smarter
|
384 |
-
self.cc = "cl.exe"
|
385 |
-
self.linker = "link.exe"
|
386 |
-
self.lib = "lib.exe"
|
387 |
-
self.rc = "rc.exe"
|
388 |
-
self.mc = "mc.exe"
|
389 |
-
else:
|
390 |
-
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
|
391 |
-
# to cross compile, you use 'x86_amd64'.
|
392 |
-
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
|
393 |
-
# compile use 'x86' (ie, it runs the x86 compiler directly)
|
394 |
-
if plat_name == get_platform() or plat_name == 'win32':
|
395 |
-
# native build or cross-compile to win32
|
396 |
-
plat_spec = PLAT_TO_VCVARS[plat_name]
|
397 |
-
else:
|
398 |
-
# cross compile from win32 -> some 64bit
|
399 |
-
plat_spec = (
|
400 |
-
PLAT_TO_VCVARS[get_platform()] + '_' + PLAT_TO_VCVARS[plat_name]
|
401 |
-
)
|
402 |
-
|
403 |
-
vc_env = query_vcvarsall(VERSION, plat_spec)
|
404 |
-
|
405 |
-
self.__paths = vc_env['path'].split(os.pathsep)
|
406 |
-
os.environ['lib'] = vc_env['lib']
|
407 |
-
os.environ['include'] = vc_env['include']
|
408 |
-
|
409 |
-
if len(self.__paths) == 0:
|
410 |
-
raise DistutilsPlatformError(
|
411 |
-
"Python was built with %s, "
|
412 |
-
"and extensions need to be built with the same "
|
413 |
-
"version of the compiler, but it isn't installed." % self.__product
|
414 |
-
)
|
415 |
-
|
416 |
-
self.cc = self.find_exe("cl.exe")
|
417 |
-
self.linker = self.find_exe("link.exe")
|
418 |
-
self.lib = self.find_exe("lib.exe")
|
419 |
-
self.rc = self.find_exe("rc.exe") # resource compiler
|
420 |
-
self.mc = self.find_exe("mc.exe") # message compiler
|
421 |
-
# self.set_path_env_var('lib')
|
422 |
-
# self.set_path_env_var('include')
|
423 |
-
|
424 |
-
# extend the MSVC path with the current path
|
425 |
-
try:
|
426 |
-
for p in os.environ['path'].split(';'):
|
427 |
-
self.__paths.append(p)
|
428 |
-
except KeyError:
|
429 |
-
pass
|
430 |
-
self.__paths = normalize_and_reduce_paths(self.__paths)
|
431 |
-
os.environ['path'] = ";".join(self.__paths)
|
432 |
-
|
433 |
-
self.preprocess_options = None
|
434 |
-
if self.__arch == "x86":
|
435 |
-
self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/DNDEBUG']
|
436 |
-
self.compile_options_debug = [
|
437 |
-
'/nologo',
|
438 |
-
'/Od',
|
439 |
-
'/MDd',
|
440 |
-
'/W3',
|
441 |
-
'/Z7',
|
442 |
-
'/D_DEBUG',
|
443 |
-
]
|
444 |
-
else:
|
445 |
-
# Win64
|
446 |
-
self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/GS-', '/DNDEBUG']
|
447 |
-
self.compile_options_debug = [
|
448 |
-
'/nologo',
|
449 |
-
'/Od',
|
450 |
-
'/MDd',
|
451 |
-
'/W3',
|
452 |
-
'/GS-',
|
453 |
-
'/Z7',
|
454 |
-
'/D_DEBUG',
|
455 |
-
]
|
456 |
-
|
457 |
-
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
|
458 |
-
if self.__version >= 7:
|
459 |
-
self.ldflags_shared_debug = ['/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG']
|
460 |
-
self.ldflags_static = ['/nologo']
|
461 |
-
|
462 |
-
self.initialized = True
|
463 |
-
|
464 |
-
# -- Worker methods ------------------------------------------------
|
465 |
-
|
466 |
-
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
|
467 |
-
# Copied from ccompiler.py, extended to return .res as 'object'-file
|
468 |
-
# for .rc input file
|
469 |
-
if output_dir is None:
|
470 |
-
output_dir = ''
|
471 |
-
obj_names = []
|
472 |
-
for src_name in source_filenames:
|
473 |
-
(base, ext) = os.path.splitext(src_name)
|
474 |
-
base = os.path.splitdrive(base)[1] # Chop off the drive
|
475 |
-
base = base[os.path.isabs(base) :] # If abs, chop off leading /
|
476 |
-
if ext not in self.src_extensions:
|
477 |
-
# Better to raise an exception instead of silently continuing
|
478 |
-
# and later complain about sources and targets having
|
479 |
-
# different lengths
|
480 |
-
raise CompileError("Don't know how to compile %s" % src_name)
|
481 |
-
if strip_dir:
|
482 |
-
base = os.path.basename(base)
|
483 |
-
if ext in self._rc_extensions:
|
484 |
-
obj_names.append(os.path.join(output_dir, base + self.res_extension))
|
485 |
-
elif ext in self._mc_extensions:
|
486 |
-
obj_names.append(os.path.join(output_dir, base + self.res_extension))
|
487 |
-
else:
|
488 |
-
obj_names.append(os.path.join(output_dir, base + self.obj_extension))
|
489 |
-
return obj_names
|
490 |
-
|
491 |
-
def compile( # noqa: C901
|
492 |
-
self,
|
493 |
-
sources,
|
494 |
-
output_dir=None,
|
495 |
-
macros=None,
|
496 |
-
include_dirs=None,
|
497 |
-
debug=0,
|
498 |
-
extra_preargs=None,
|
499 |
-
extra_postargs=None,
|
500 |
-
depends=None,
|
501 |
-
):
|
502 |
-
|
503 |
-
if not self.initialized:
|
504 |
-
self.initialize()
|
505 |
-
compile_info = self._setup_compile(
|
506 |
-
output_dir, macros, include_dirs, sources, depends, extra_postargs
|
507 |
-
)
|
508 |
-
macros, objects, extra_postargs, pp_opts, build = compile_info
|
509 |
-
|
510 |
-
compile_opts = extra_preargs or []
|
511 |
-
compile_opts.append('/c')
|
512 |
-
if debug:
|
513 |
-
compile_opts.extend(self.compile_options_debug)
|
514 |
-
else:
|
515 |
-
compile_opts.extend(self.compile_options)
|
516 |
-
|
517 |
-
for obj in objects:
|
518 |
-
try:
|
519 |
-
src, ext = build[obj]
|
520 |
-
except KeyError:
|
521 |
-
continue
|
522 |
-
if debug:
|
523 |
-
# pass the full pathname to MSVC in debug mode,
|
524 |
-
# this allows the debugger to find the source file
|
525 |
-
# without asking the user to browse for it
|
526 |
-
src = os.path.abspath(src)
|
527 |
-
|
528 |
-
if ext in self._c_extensions:
|
529 |
-
input_opt = "/Tc" + src
|
530 |
-
elif ext in self._cpp_extensions:
|
531 |
-
input_opt = "/Tp" + src
|
532 |
-
elif ext in self._rc_extensions:
|
533 |
-
# compile .RC to .RES file
|
534 |
-
input_opt = src
|
535 |
-
output_opt = "/fo" + obj
|
536 |
-
try:
|
537 |
-
self.spawn([self.rc] + pp_opts + [output_opt] + [input_opt])
|
538 |
-
except DistutilsExecError as msg:
|
539 |
-
raise CompileError(msg)
|
540 |
-
continue
|
541 |
-
elif ext in self._mc_extensions:
|
542 |
-
# Compile .MC to .RC file to .RES file.
|
543 |
-
# * '-h dir' specifies the directory for the
|
544 |
-
# generated include file
|
545 |
-
# * '-r dir' specifies the target directory of the
|
546 |
-
# generated RC file and the binary message resource
|
547 |
-
# it includes
|
548 |
-
#
|
549 |
-
# For now (since there are no options to change this),
|
550 |
-
# we use the source-directory for the include file and
|
551 |
-
# the build directory for the RC file and message
|
552 |
-
# resources. This works at least for win32all.
|
553 |
-
h_dir = os.path.dirname(src)
|
554 |
-
rc_dir = os.path.dirname(obj)
|
555 |
-
try:
|
556 |
-
# first compile .MC to .RC and .H file
|
557 |
-
self.spawn([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src])
|
558 |
-
base, _ = os.path.splitext(os.path.basename(src))
|
559 |
-
rc_file = os.path.join(rc_dir, base + '.rc')
|
560 |
-
# then compile .RC to .RES file
|
561 |
-
self.spawn([self.rc] + ["/fo" + obj] + [rc_file])
|
562 |
-
|
563 |
-
except DistutilsExecError as msg:
|
564 |
-
raise CompileError(msg)
|
565 |
-
continue
|
566 |
-
else:
|
567 |
-
# how to handle this file?
|
568 |
-
raise CompileError(
|
569 |
-
"Don't know how to compile {} to {}".format(src, obj)
|
570 |
-
)
|
571 |
-
|
572 |
-
output_opt = "/Fo" + obj
|
573 |
-
try:
|
574 |
-
self.spawn(
|
575 |
-
[self.cc]
|
576 |
-
+ compile_opts
|
577 |
-
+ pp_opts
|
578 |
-
+ [input_opt, output_opt]
|
579 |
-
+ extra_postargs
|
580 |
-
)
|
581 |
-
except DistutilsExecError as msg:
|
582 |
-
raise CompileError(msg)
|
583 |
-
|
584 |
-
return objects
|
585 |
-
|
586 |
-
def create_static_lib(
|
587 |
-
self, objects, output_libname, output_dir=None, debug=0, target_lang=None
|
588 |
-
):
|
589 |
-
|
590 |
-
if not self.initialized:
|
591 |
-
self.initialize()
|
592 |
-
(objects, output_dir) = self._fix_object_args(objects, output_dir)
|
593 |
-
output_filename = self.library_filename(output_libname, output_dir=output_dir)
|
594 |
-
|
595 |
-
if self._need_link(objects, output_filename):
|
596 |
-
lib_args = objects + ['/OUT:' + output_filename]
|
597 |
-
if debug:
|
598 |
-
pass # XXX what goes here?
|
599 |
-
try:
|
600 |
-
self.spawn([self.lib] + lib_args)
|
601 |
-
except DistutilsExecError as msg:
|
602 |
-
raise LibError(msg)
|
603 |
-
else:
|
604 |
-
log.debug("skipping %s (up-to-date)", output_filename)
|
605 |
-
|
606 |
-
def link( # noqa: C901
|
607 |
-
self,
|
608 |
-
target_desc,
|
609 |
-
objects,
|
610 |
-
output_filename,
|
611 |
-
output_dir=None,
|
612 |
-
libraries=None,
|
613 |
-
library_dirs=None,
|
614 |
-
runtime_library_dirs=None,
|
615 |
-
export_symbols=None,
|
616 |
-
debug=0,
|
617 |
-
extra_preargs=None,
|
618 |
-
extra_postargs=None,
|
619 |
-
build_temp=None,
|
620 |
-
target_lang=None,
|
621 |
-
):
|
622 |
-
|
623 |
-
if not self.initialized:
|
624 |
-
self.initialize()
|
625 |
-
(objects, output_dir) = self._fix_object_args(objects, output_dir)
|
626 |
-
fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
|
627 |
-
(libraries, library_dirs, runtime_library_dirs) = fixed_args
|
628 |
-
|
629 |
-
if runtime_library_dirs:
|
630 |
-
self.warn(
|
631 |
-
"I don't know what to do with 'runtime_library_dirs': "
|
632 |
-
+ str(runtime_library_dirs)
|
633 |
-
)
|
634 |
-
|
635 |
-
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries)
|
636 |
-
if output_dir is not None:
|
637 |
-
output_filename = os.path.join(output_dir, output_filename)
|
638 |
-
|
639 |
-
if self._need_link(objects, output_filename):
|
640 |
-
if target_desc == CCompiler.EXECUTABLE:
|
641 |
-
if debug:
|
642 |
-
ldflags = self.ldflags_shared_debug[1:]
|
643 |
-
else:
|
644 |
-
ldflags = self.ldflags_shared[1:]
|
645 |
-
else:
|
646 |
-
if debug:
|
647 |
-
ldflags = self.ldflags_shared_debug
|
648 |
-
else:
|
649 |
-
ldflags = self.ldflags_shared
|
650 |
-
|
651 |
-
export_opts = []
|
652 |
-
for sym in export_symbols or []:
|
653 |
-
export_opts.append("/EXPORT:" + sym)
|
654 |
-
|
655 |
-
ld_args = (
|
656 |
-
ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename]
|
657 |
-
)
|
658 |
-
|
659 |
-
# The MSVC linker generates .lib and .exp files, which cannot be
|
660 |
-
# suppressed by any linker switches. The .lib files may even be
|
661 |
-
# needed! Make sure they are generated in the temporary build
|
662 |
-
# directory. Since they have different names for debug and release
|
663 |
-
# builds, they can go into the same directory.
|
664 |
-
build_temp = os.path.dirname(objects[0])
|
665 |
-
if export_symbols is not None:
|
666 |
-
(dll_name, dll_ext) = os.path.splitext(
|
667 |
-
os.path.basename(output_filename)
|
668 |
-
)
|
669 |
-
implib_file = os.path.join(build_temp, self.library_filename(dll_name))
|
670 |
-
ld_args.append('/IMPLIB:' + implib_file)
|
671 |
-
|
672 |
-
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
|
673 |
-
|
674 |
-
if extra_preargs:
|
675 |
-
ld_args[:0] = extra_preargs
|
676 |
-
if extra_postargs:
|
677 |
-
ld_args.extend(extra_postargs)
|
678 |
-
|
679 |
-
self.mkpath(os.path.dirname(output_filename))
|
680 |
-
try:
|
681 |
-
self.spawn([self.linker] + ld_args)
|
682 |
-
except DistutilsExecError as msg:
|
683 |
-
raise LinkError(msg)
|
684 |
-
|
685 |
-
# embed the manifest
|
686 |
-
# XXX - this is somewhat fragile - if mt.exe fails, distutils
|
687 |
-
# will still consider the DLL up-to-date, but it will not have a
|
688 |
-
# manifest. Maybe we should link to a temp file? OTOH, that
|
689 |
-
# implies a build environment error that shouldn't go undetected.
|
690 |
-
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
|
691 |
-
if mfinfo is not None:
|
692 |
-
mffilename, mfid = mfinfo
|
693 |
-
out_arg = '-outputresource:{};{}'.format(output_filename, mfid)
|
694 |
-
try:
|
695 |
-
self.spawn(['mt.exe', '-nologo', '-manifest', mffilename, out_arg])
|
696 |
-
except DistutilsExecError as msg:
|
697 |
-
raise LinkError(msg)
|
698 |
-
else:
|
699 |
-
log.debug("skipping %s (up-to-date)", output_filename)
|
700 |
-
|
701 |
-
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
|
702 |
-
# If we need a manifest at all, an embedded manifest is recommended.
|
703 |
-
# See MSDN article titled
|
704 |
-
# "How to: Embed a Manifest Inside a C/C++ Application"
|
705 |
-
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
|
706 |
-
# Ask the linker to generate the manifest in the temp dir, so
|
707 |
-
# we can check it, and possibly embed it, later.
|
708 |
-
temp_manifest = os.path.join(
|
709 |
-
build_temp, os.path.basename(output_filename) + ".manifest"
|
710 |
-
)
|
711 |
-
ld_args.append('/MANIFESTFILE:' + temp_manifest)
|
712 |
-
|
713 |
-
def manifest_get_embed_info(self, target_desc, ld_args):
|
714 |
-
# If a manifest should be embedded, return a tuple of
|
715 |
-
# (manifest_filename, resource_id). Returns None if no manifest
|
716 |
-
# should be embedded. See http://bugs.python.org/issue7833 for why
|
717 |
-
# we want to avoid any manifest for extension modules if we can)
|
718 |
-
for arg in ld_args:
|
719 |
-
if arg.startswith("/MANIFESTFILE:"):
|
720 |
-
temp_manifest = arg.split(":", 1)[1]
|
721 |
-
break
|
722 |
-
else:
|
723 |
-
# no /MANIFESTFILE so nothing to do.
|
724 |
-
return None
|
725 |
-
if target_desc == CCompiler.EXECUTABLE:
|
726 |
-
# by default, executables always get the manifest with the
|
727 |
-
# CRT referenced.
|
728 |
-
mfid = 1
|
729 |
-
else:
|
730 |
-
# Extension modules try and avoid any manifest if possible.
|
731 |
-
mfid = 2
|
732 |
-
temp_manifest = self._remove_visual_c_ref(temp_manifest)
|
733 |
-
if temp_manifest is None:
|
734 |
-
return None
|
735 |
-
return temp_manifest, mfid
|
736 |
-
|
737 |
-
def _remove_visual_c_ref(self, manifest_file):
|
738 |
-
try:
|
739 |
-
# Remove references to the Visual C runtime, so they will
|
740 |
-
# fall through to the Visual C dependency of Python.exe.
|
741 |
-
# This way, when installed for a restricted user (e.g.
|
742 |
-
# runtimes are not in WinSxS folder, but in Python's own
|
743 |
-
# folder), the runtimes do not need to be in every folder
|
744 |
-
# with .pyd's.
|
745 |
-
# Returns either the filename of the modified manifest or
|
746 |
-
# None if no manifest should be embedded.
|
747 |
-
manifest_f = open(manifest_file)
|
748 |
-
try:
|
749 |
-
manifest_buf = manifest_f.read()
|
750 |
-
finally:
|
751 |
-
manifest_f.close()
|
752 |
-
pattern = re.compile(
|
753 |
-
r"""<assemblyIdentity.*?name=("|')Microsoft\."""
|
754 |
-
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
|
755 |
-
re.DOTALL,
|
756 |
-
)
|
757 |
-
manifest_buf = re.sub(pattern, "", manifest_buf)
|
758 |
-
pattern = r"<dependentAssembly>\s*</dependentAssembly>"
|
759 |
-
manifest_buf = re.sub(pattern, "", manifest_buf)
|
760 |
-
# Now see if any other assemblies are referenced - if not, we
|
761 |
-
# don't want a manifest embedded.
|
762 |
-
pattern = re.compile(
|
763 |
-
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
|
764 |
-
r""".*?(?:/>|</assemblyIdentity>)""",
|
765 |
-
re.DOTALL,
|
766 |
-
)
|
767 |
-
if re.search(pattern, manifest_buf) is None:
|
768 |
-
return None
|
769 |
-
|
770 |
-
manifest_f = open(manifest_file, 'w')
|
771 |
-
try:
|
772 |
-
manifest_f.write(manifest_buf)
|
773 |
-
return manifest_file
|
774 |
-
finally:
|
775 |
-
manifest_f.close()
|
776 |
-
except OSError:
|
777 |
-
pass
|
778 |
-
|
779 |
-
# -- Miscellaneous methods -----------------------------------------
|
780 |
-
# These are all used by the 'gen_lib_options() function, in
|
781 |
-
# ccompiler.py.
|
782 |
-
|
783 |
-
def library_dir_option(self, dir):
|
784 |
-
return "/LIBPATH:" + dir
|
785 |
-
|
786 |
-
def runtime_library_dir_option(self, dir):
|
787 |
-
raise DistutilsPlatformError(
|
788 |
-
"don't know how to set runtime library search path for MSVC++"
|
789 |
-
)
|
790 |
-
|
791 |
-
def library_option(self, lib):
|
792 |
-
return self.library_filename(lib)
|
793 |
-
|
794 |
-
def find_library_file(self, dirs, lib, debug=0):
|
795 |
-
# Prefer a debugging library if found (and requested), but deal
|
796 |
-
# with it if we don't have one.
|
797 |
-
if debug:
|
798 |
-
try_names = [lib + "_d", lib]
|
799 |
-
else:
|
800 |
-
try_names = [lib]
|
801 |
-
for dir in dirs:
|
802 |
-
for name in try_names:
|
803 |
-
libfile = os.path.join(dir, self.library_filename(name))
|
804 |
-
if os.path.exists(libfile):
|
805 |
-
return libfile
|
806 |
-
else:
|
807 |
-
# Oops, didn't find it in *any* of 'dirs'
|
808 |
-
return None
|
809 |
-
|
810 |
-
# Helper methods for using the MSVC registry settings
|
811 |
-
|
812 |
-
def find_exe(self, exe):
|
813 |
-
"""Return path to an MSVC executable program.
|
814 |
-
|
815 |
-
Tries to find the program in several places: first, one of the
|
816 |
-
MSVC program search paths from the registry; next, the directories
|
817 |
-
in the PATH environment variable. If any of those work, return an
|
818 |
-
absolute path that is known to exist. If none of them work, just
|
819 |
-
return the original program name, 'exe'.
|
820 |
-
"""
|
821 |
-
for p in self.__paths:
|
822 |
-
fn = os.path.join(os.path.abspath(p), exe)
|
823 |
-
if os.path.isfile(fn):
|
824 |
-
return fn
|
825 |
-
|
826 |
-
# didn't find it; try existing path
|
827 |
-
for p in os.environ['Path'].split(';'):
|
828 |
-
fn = os.path.join(os.path.abspath(p), exe)
|
829 |
-
if os.path.isfile(fn):
|
830 |
-
return fn
|
831 |
-
|
832 |
-
return exe
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_collections.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import collections
|
2 |
-
|
3 |
-
|
4 |
-
# from jaraco.collections 3.3
|
5 |
-
class FreezableDefaultDict(collections.defaultdict):
|
6 |
-
"""
|
7 |
-
Often it is desirable to prevent the mutation of
|
8 |
-
a default dict after its initial construction, such
|
9 |
-
as to prevent mutation during iteration.
|
10 |
-
|
11 |
-
>>> dd = FreezableDefaultDict(list)
|
12 |
-
>>> dd[0].append('1')
|
13 |
-
>>> dd.freeze()
|
14 |
-
>>> dd[1]
|
15 |
-
[]
|
16 |
-
>>> len(dd)
|
17 |
-
1
|
18 |
-
"""
|
19 |
-
|
20 |
-
def __missing__(self, key):
|
21 |
-
return getattr(self, '_frozen', super().__missing__)(key)
|
22 |
-
|
23 |
-
def freeze(self):
|
24 |
-
self._frozen = lambda key: self.default_factory()
|
25 |
-
|
26 |
-
|
27 |
-
class Pair(collections.namedtuple('Pair', 'name value')):
|
28 |
-
@classmethod
|
29 |
-
def parse(cls, text):
|
30 |
-
return cls(*map(str.strip, text.split("=", 1)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/version.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import pkg_resources
|
2 |
-
|
3 |
-
try:
|
4 |
-
__version__ = pkg_resources.get_distribution('setuptools').version
|
5 |
-
except Exception:
|
6 |
-
__version__ = 'unknown'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/WavJourney/utils.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import re
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
import yaml
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
|
9 |
-
#### path related code BEGIN ####
|
10 |
-
def get_session_path(session_id):
|
11 |
-
return Path(f'output/sessions/{session_id}')
|
12 |
-
|
13 |
-
def get_system_voice_preset_path():
|
14 |
-
return Path('data/voice_presets')
|
15 |
-
|
16 |
-
def get_session_voice_preset_path(session_id):
|
17 |
-
return Path(f'{get_session_path(session_id)}/voice_presets')
|
18 |
-
|
19 |
-
def get_session_audio_path(session_id):
|
20 |
-
return Path(f'{get_session_path(session_id)}/audio')
|
21 |
-
|
22 |
-
def rescale_to_match_energy(segment1, segment2):
|
23 |
-
ratio = get_energy_ratio(segment1, segment2)
|
24 |
-
recaled_segment1 = segment1 / ratio
|
25 |
-
return recaled_segment1.numpy()
|
26 |
-
#### path related code END ####
|
27 |
-
|
28 |
-
def text_to_abbrev_prompt(input_text):
|
29 |
-
return re.sub(r'[^a-zA-Z_]', '', '_'.join(input_text.split()[:5]))
|
30 |
-
|
31 |
-
def get_energy(x):
|
32 |
-
return np.mean(x ** 2)
|
33 |
-
|
34 |
-
|
35 |
-
def get_energy_ratio(segment1, segment2):
|
36 |
-
energy1 = get_energy(segment1)
|
37 |
-
energy2 = max(get_energy(segment2), 1e-10)
|
38 |
-
ratio = (energy1 / energy2) ** 0.5
|
39 |
-
ratio = torch.tensor(ratio)
|
40 |
-
ratio = torch.clamp(ratio, 0.02, 50)
|
41 |
-
return ratio
|
42 |
-
|
43 |
-
def fade(audio_data, fade_duration=2, sr=32000):
|
44 |
-
audio_duration = audio_data.shape[0] / sr
|
45 |
-
|
46 |
-
# automated choose fade duration
|
47 |
-
if audio_duration >=8:
|
48 |
-
# keep fade_duration 2
|
49 |
-
pass
|
50 |
-
else:
|
51 |
-
fade_duration = audio_duration / 5
|
52 |
-
|
53 |
-
fade_sampels = int(sr * fade_duration)
|
54 |
-
fade_in = np.linspace(0, 1, fade_sampels)
|
55 |
-
fade_out = np.linspace(1, 0, fade_sampels)
|
56 |
-
|
57 |
-
audio_data_fade_in = audio_data[:fade_sampels] * fade_in
|
58 |
-
audio_data_fade_out = audio_data[-fade_sampels:] * fade_out
|
59 |
-
|
60 |
-
audio_data_faded = np.concatenate((audio_data_fade_in, audio_data[len(fade_in):-len(fade_out)], audio_data_fade_out))
|
61 |
-
return audio_data_faded
|
62 |
-
|
63 |
-
# def get_key(config='config.yaml'):
|
64 |
-
# with open('config.yaml', 'r') as file:
|
65 |
-
# config = yaml.safe_load(file)
|
66 |
-
# return config['OpenAI-Key'] if 'OpenAI-Key' in config else None
|
67 |
-
|
68 |
-
def get_service_port():
|
69 |
-
service_port = os.environ.get('WAVJOURNEY_SERVICE_PORT')
|
70 |
-
return service_port
|
71 |
-
|
72 |
-
def get_service_url():
|
73 |
-
service_url = os.environ.get('WAVJOURNEY_SERVICE_URL')
|
74 |
-
return service_url
|
75 |
-
|
76 |
-
def get_api_key():
|
77 |
-
api_key = os.environ.get('WAVJOURNEY_OPENAI_KEY')
|
78 |
-
return api_key
|
79 |
-
|
80 |
-
def get_max_script_lines():
|
81 |
-
max_lines = int(os.environ.get('WAVJOURNEY_MAX_SCRIPT_LINES', 999))
|
82 |
-
return max_lines
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/mdx.py
DELETED
@@ -1,228 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import onnxruntime as ort
|
3 |
-
from tqdm import tqdm
|
4 |
-
import warnings
|
5 |
-
import numpy as np
|
6 |
-
import hashlib
|
7 |
-
import queue
|
8 |
-
import threading
|
9 |
-
|
10 |
-
warnings.filterwarnings("ignore")
|
11 |
-
|
12 |
-
class MDX_Model:
|
13 |
-
def __init__(self, device, dim_f, dim_t, n_fft, hop=1024, stem_name=None, compensation=1.000):
|
14 |
-
self.dim_f = dim_f
|
15 |
-
self.dim_t = dim_t
|
16 |
-
self.dim_c = 4
|
17 |
-
self.n_fft = n_fft
|
18 |
-
self.hop = hop
|
19 |
-
self.stem_name = stem_name
|
20 |
-
self.compensation = compensation
|
21 |
-
|
22 |
-
self.n_bins = self.n_fft//2+1
|
23 |
-
self.chunk_size = hop * (self.dim_t-1)
|
24 |
-
self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(device)
|
25 |
-
|
26 |
-
out_c = self.dim_c
|
27 |
-
|
28 |
-
self.freq_pad = torch.zeros([1, out_c, self.n_bins-self.dim_f, self.dim_t]).to(device)
|
29 |
-
|
30 |
-
def stft(self, x):
|
31 |
-
x = x.reshape([-1, self.chunk_size])
|
32 |
-
x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True, return_complex=True)
|
33 |
-
x = torch.view_as_real(x)
|
34 |
-
x = x.permute([0,3,1,2])
|
35 |
-
x = x.reshape([-1,2,2,self.n_bins,self.dim_t]).reshape([-1,4,self.n_bins,self.dim_t])
|
36 |
-
return x[:,:,:self.dim_f]
|
37 |
-
|
38 |
-
def istft(self, x, freq_pad=None):
|
39 |
-
freq_pad = self.freq_pad.repeat([x.shape[0],1,1,1]) if freq_pad is None else freq_pad
|
40 |
-
x = torch.cat([x, freq_pad], -2)
|
41 |
-
# c = 4*2 if self.target_name=='*' else 2
|
42 |
-
x = x.reshape([-1,2,2,self.n_bins,self.dim_t]).reshape([-1,2,self.n_bins,self.dim_t])
|
43 |
-
x = x.permute([0,2,3,1])
|
44 |
-
x = x.contiguous()
|
45 |
-
x = torch.view_as_complex(x)
|
46 |
-
x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True)
|
47 |
-
return x.reshape([-1,2,self.chunk_size])
|
48 |
-
|
49 |
-
|
50 |
-
class MDX:
|
51 |
-
|
52 |
-
DEFAULT_SR = 44100
|
53 |
-
# Unit: seconds
|
54 |
-
DEFAULT_CHUNK_SIZE = 0 * DEFAULT_SR
|
55 |
-
DEFAULT_MARGIN_SIZE = 1 * DEFAULT_SR
|
56 |
-
|
57 |
-
DEFAULT_PROCESSOR = 0
|
58 |
-
|
59 |
-
def __init__(self, model_path:str, params:MDX_Model, processor=DEFAULT_PROCESSOR):
|
60 |
-
|
61 |
-
# Set the device and the provider (CPU or CUDA)
|
62 |
-
self.device = torch.device(f'cuda:{processor}') if processor >= 0 else torch.device('cpu')
|
63 |
-
self.provider = ['CUDAExecutionProvider'] if processor >= 0 else ['CPUExecutionProvider']
|
64 |
-
|
65 |
-
self.model = params
|
66 |
-
|
67 |
-
# Load the ONNX model using ONNX Runtime
|
68 |
-
self.ort = ort.InferenceSession(model_path, providers=self.provider)
|
69 |
-
# Preload the model for faster performance
|
70 |
-
self.ort.run(None, {'input':torch.rand(1, 4, params.dim_f, params.dim_t).numpy()})
|
71 |
-
self.process = lambda spec:self.ort.run(None, {'input': spec.cpu().numpy()})[0]
|
72 |
-
|
73 |
-
self.prog = None
|
74 |
-
|
75 |
-
@staticmethod
|
76 |
-
def get_hash(model_path):
|
77 |
-
try:
|
78 |
-
with open(model_path, 'rb') as f:
|
79 |
-
f.seek(- 10000 * 1024, 2)
|
80 |
-
model_hash = hashlib.md5(f.read()).hexdigest()
|
81 |
-
except:
|
82 |
-
model_hash = hashlib.md5(open(model_path,'rb').read()).hexdigest()
|
83 |
-
|
84 |
-
return model_hash
|
85 |
-
|
86 |
-
@staticmethod
|
87 |
-
def segment(wave, combine=True, chunk_size=DEFAULT_CHUNK_SIZE, margin_size=DEFAULT_MARGIN_SIZE):
|
88 |
-
"""
|
89 |
-
Segment or join segmented wave array
|
90 |
-
|
91 |
-
Args:
|
92 |
-
wave: (np.array) Wave array to be segmented or joined
|
93 |
-
combine: (bool) If True, combines segmented wave array. If False, segments wave array.
|
94 |
-
chunk_size: (int) Size of each segment (in samples)
|
95 |
-
margin_size: (int) Size of margin between segments (in samples)
|
96 |
-
|
97 |
-
Returns:
|
98 |
-
numpy array: Segmented or joined wave array
|
99 |
-
"""
|
100 |
-
|
101 |
-
if combine:
|
102 |
-
processed_wave = None # Initializing as None instead of [] for later numpy array concatenation
|
103 |
-
for segment_count, segment in enumerate(wave):
|
104 |
-
start = 0 if segment_count == 0 else margin_size
|
105 |
-
end = None if segment_count == len(wave)-1 else -margin_size
|
106 |
-
if margin_size == 0:
|
107 |
-
end = None
|
108 |
-
if processed_wave is None: # Create array for first segment
|
109 |
-
processed_wave = segment[:, start:end]
|
110 |
-
else: # Concatenate to existing array for subsequent segments
|
111 |
-
processed_wave = np.concatenate((processed_wave, segment[:, start:end]), axis=-1)
|
112 |
-
|
113 |
-
else:
|
114 |
-
processed_wave = []
|
115 |
-
sample_count = wave.shape[-1]
|
116 |
-
|
117 |
-
if chunk_size <= 0 or chunk_size > sample_count:
|
118 |
-
chunk_size = sample_count
|
119 |
-
|
120 |
-
if margin_size > chunk_size:
|
121 |
-
margin_size = chunk_size
|
122 |
-
|
123 |
-
for segment_count, skip in enumerate(range(0, sample_count, chunk_size)):
|
124 |
-
|
125 |
-
margin = 0 if segment_count == 0 else margin_size
|
126 |
-
end = min(skip+chunk_size+margin_size, sample_count)
|
127 |
-
start = skip-margin
|
128 |
-
|
129 |
-
cut = wave[:,start:end].copy()
|
130 |
-
processed_wave.append(cut)
|
131 |
-
|
132 |
-
if end == sample_count:
|
133 |
-
break
|
134 |
-
|
135 |
-
return processed_wave
|
136 |
-
|
137 |
-
def pad_wave(self, wave):
|
138 |
-
"""
|
139 |
-
Pad the wave array to match the required chunk size
|
140 |
-
|
141 |
-
Args:
|
142 |
-
wave: (np.array) Wave array to be padded
|
143 |
-
|
144 |
-
Returns:
|
145 |
-
tuple: (padded_wave, pad, trim)
|
146 |
-
- padded_wave: Padded wave array
|
147 |
-
- pad: Number of samples that were padded
|
148 |
-
- trim: Number of samples that were trimmed
|
149 |
-
"""
|
150 |
-
n_sample = wave.shape[1]
|
151 |
-
trim = self.model.n_fft//2
|
152 |
-
gen_size = self.model.chunk_size-2*trim
|
153 |
-
pad = gen_size - n_sample%gen_size
|
154 |
-
|
155 |
-
# Padded wave
|
156 |
-
wave_p = np.concatenate((np.zeros((2,trim)), wave, np.zeros((2,pad)), np.zeros((2,trim))), 1)
|
157 |
-
|
158 |
-
mix_waves = []
|
159 |
-
for i in range(0, n_sample+pad, gen_size):
|
160 |
-
waves = np.array(wave_p[:, i:i+self.model.chunk_size])
|
161 |
-
mix_waves.append(waves)
|
162 |
-
|
163 |
-
mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(self.device)
|
164 |
-
|
165 |
-
return mix_waves, pad, trim
|
166 |
-
|
167 |
-
def _process_wave(self, mix_waves, trim, pad, q:queue.Queue, _id:int):
|
168 |
-
"""
|
169 |
-
Process each wave segment in a multi-threaded environment
|
170 |
-
|
171 |
-
Args:
|
172 |
-
mix_waves: (torch.Tensor) Wave segments to be processed
|
173 |
-
trim: (int) Number of samples trimmed during padding
|
174 |
-
pad: (int) Number of samples padded during padding
|
175 |
-
q: (queue.Queue) Queue to hold the processed wave segments
|
176 |
-
_id: (int) Identifier of the processed wave segment
|
177 |
-
|
178 |
-
Returns:
|
179 |
-
numpy array: Processed wave segment
|
180 |
-
"""
|
181 |
-
mix_waves = mix_waves.split(1)
|
182 |
-
with torch.no_grad():
|
183 |
-
pw = []
|
184 |
-
for mix_wave in mix_waves:
|
185 |
-
self.prog.update()
|
186 |
-
spec = self.model.stft(mix_wave)
|
187 |
-
processed_spec = torch.tensor(self.process(spec))
|
188 |
-
processed_wav = self.model.istft(processed_spec.to(self.device))
|
189 |
-
processed_wav = processed_wav[:,:,trim:-trim].transpose(0,1).reshape(2, -1).cpu().numpy()
|
190 |
-
pw.append(processed_wav)
|
191 |
-
processed_signal = np.concatenate(pw, axis=-1)[:, :-pad]
|
192 |
-
q.put({_id:processed_signal})
|
193 |
-
return processed_signal
|
194 |
-
|
195 |
-
def process_wave(self, wave:np.array, mt_threads=1):
|
196 |
-
"""
|
197 |
-
Process the wave array in a multi-threaded environment
|
198 |
-
|
199 |
-
Args:
|
200 |
-
wave: (np.array) Wave array to be processed
|
201 |
-
mt_threads: (int) Number of threads to be used for processing
|
202 |
-
|
203 |
-
Returns:
|
204 |
-
numpy array: Processed wave array
|
205 |
-
"""
|
206 |
-
self.prog = tqdm(total=0)
|
207 |
-
chunk = wave.shape[-1]//mt_threads
|
208 |
-
waves = self.segment(wave, False, chunk)
|
209 |
-
|
210 |
-
# Create a queue to hold the processed wave segments
|
211 |
-
q = queue.Queue()
|
212 |
-
threads = []
|
213 |
-
for c, batch in enumerate(waves):
|
214 |
-
mix_waves, pad, trim = self.pad_wave(batch)
|
215 |
-
self.prog.total = len(mix_waves)*mt_threads
|
216 |
-
thread = threading.Thread(target=self._process_wave, args=(mix_waves, trim, pad, q, c))
|
217 |
-
thread.start()
|
218 |
-
threads.append(thread)
|
219 |
-
for thread in threads:
|
220 |
-
thread.join()
|
221 |
-
self.prog.close()
|
222 |
-
|
223 |
-
processed_batches = []
|
224 |
-
while not q.empty():
|
225 |
-
processed_batches.append(q.get())
|
226 |
-
processed_batches = [list(wave.values())[0] for wave in sorted(processed_batches, key=lambda d: list(d.keys())[0])]
|
227 |
-
assert len(processed_batches) == len(waves), 'Incomplete processed batches, please reduce batch size!'
|
228 |
-
return self.segment(processed_batches, True, chunk)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Beasto/Day_to_Night_Cyclegan/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Day To Night Cyclegan
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.27.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat/src/lib/updateSettings.ts
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import { invalidate } from "$app/navigation";
|
2 |
-
import { base } from "$app/paths";
|
3 |
-
import { error } from "$lib/stores/errors";
|
4 |
-
import type { Settings } from "./types/Settings";
|
5 |
-
import { UrlDependency } from "./types/UrlDependency";
|
6 |
-
|
7 |
-
export async function updateSettings(
|
8 |
-
settings: Partial<Omit<Settings, "sessionId">>
|
9 |
-
): Promise<boolean> {
|
10 |
-
try {
|
11 |
-
const res = await fetch(`${base}/settings`, {
|
12 |
-
method: "PATCH",
|
13 |
-
headers: { "Content-Type": "application/json" },
|
14 |
-
body: JSON.stringify(settings),
|
15 |
-
});
|
16 |
-
if (!res.ok) {
|
17 |
-
error.set("Error while updating settings, try again.");
|
18 |
-
return false;
|
19 |
-
}
|
20 |
-
await invalidate(UrlDependency.Settings);
|
21 |
-
return true;
|
22 |
-
} catch (err) {
|
23 |
-
console.error(err);
|
24 |
-
error.set(String(err));
|
25 |
-
return false;
|
26 |
-
}
|
27 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/temp_dir.py
DELETED
@@ -1,246 +0,0 @@
|
|
1 |
-
import errno
|
2 |
-
import itertools
|
3 |
-
import logging
|
4 |
-
import os.path
|
5 |
-
import tempfile
|
6 |
-
from contextlib import ExitStack, contextmanager
|
7 |
-
from typing import Any, Dict, Generator, Optional, TypeVar, Union
|
8 |
-
|
9 |
-
from pip._internal.utils.misc import enum, rmtree
|
10 |
-
|
11 |
-
logger = logging.getLogger(__name__)
|
12 |
-
|
13 |
-
_T = TypeVar("_T", bound="TempDirectory")
|
14 |
-
|
15 |
-
|
16 |
-
# Kinds of temporary directories. Only needed for ones that are
|
17 |
-
# globally-managed.
|
18 |
-
tempdir_kinds = enum(
|
19 |
-
BUILD_ENV="build-env",
|
20 |
-
EPHEM_WHEEL_CACHE="ephem-wheel-cache",
|
21 |
-
REQ_BUILD="req-build",
|
22 |
-
)
|
23 |
-
|
24 |
-
|
25 |
-
_tempdir_manager: Optional[ExitStack] = None
|
26 |
-
|
27 |
-
|
28 |
-
@contextmanager
|
29 |
-
def global_tempdir_manager() -> Generator[None, None, None]:
|
30 |
-
global _tempdir_manager
|
31 |
-
with ExitStack() as stack:
|
32 |
-
old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack
|
33 |
-
try:
|
34 |
-
yield
|
35 |
-
finally:
|
36 |
-
_tempdir_manager = old_tempdir_manager
|
37 |
-
|
38 |
-
|
39 |
-
class TempDirectoryTypeRegistry:
|
40 |
-
"""Manages temp directory behavior"""
|
41 |
-
|
42 |
-
def __init__(self) -> None:
|
43 |
-
self._should_delete: Dict[str, bool] = {}
|
44 |
-
|
45 |
-
def set_delete(self, kind: str, value: bool) -> None:
|
46 |
-
"""Indicate whether a TempDirectory of the given kind should be
|
47 |
-
auto-deleted.
|
48 |
-
"""
|
49 |
-
self._should_delete[kind] = value
|
50 |
-
|
51 |
-
def get_delete(self, kind: str) -> bool:
|
52 |
-
"""Get configured auto-delete flag for a given TempDirectory type,
|
53 |
-
default True.
|
54 |
-
"""
|
55 |
-
return self._should_delete.get(kind, True)
|
56 |
-
|
57 |
-
|
58 |
-
_tempdir_registry: Optional[TempDirectoryTypeRegistry] = None
|
59 |
-
|
60 |
-
|
61 |
-
@contextmanager
|
62 |
-
def tempdir_registry() -> Generator[TempDirectoryTypeRegistry, None, None]:
|
63 |
-
"""Provides a scoped global tempdir registry that can be used to dictate
|
64 |
-
whether directories should be deleted.
|
65 |
-
"""
|
66 |
-
global _tempdir_registry
|
67 |
-
old_tempdir_registry = _tempdir_registry
|
68 |
-
_tempdir_registry = TempDirectoryTypeRegistry()
|
69 |
-
try:
|
70 |
-
yield _tempdir_registry
|
71 |
-
finally:
|
72 |
-
_tempdir_registry = old_tempdir_registry
|
73 |
-
|
74 |
-
|
75 |
-
class _Default:
|
76 |
-
pass
|
77 |
-
|
78 |
-
|
79 |
-
_default = _Default()
|
80 |
-
|
81 |
-
|
82 |
-
class TempDirectory:
|
83 |
-
"""Helper class that owns and cleans up a temporary directory.
|
84 |
-
|
85 |
-
This class can be used as a context manager or as an OO representation of a
|
86 |
-
temporary directory.
|
87 |
-
|
88 |
-
Attributes:
|
89 |
-
path
|
90 |
-
Location to the created temporary directory
|
91 |
-
delete
|
92 |
-
Whether the directory should be deleted when exiting
|
93 |
-
(when used as a contextmanager)
|
94 |
-
|
95 |
-
Methods:
|
96 |
-
cleanup()
|
97 |
-
Deletes the temporary directory
|
98 |
-
|
99 |
-
When used as a context manager, if the delete attribute is True, on
|
100 |
-
exiting the context the temporary directory is deleted.
|
101 |
-
"""
|
102 |
-
|
103 |
-
def __init__(
|
104 |
-
self,
|
105 |
-
path: Optional[str] = None,
|
106 |
-
delete: Union[bool, None, _Default] = _default,
|
107 |
-
kind: str = "temp",
|
108 |
-
globally_managed: bool = False,
|
109 |
-
):
|
110 |
-
super().__init__()
|
111 |
-
|
112 |
-
if delete is _default:
|
113 |
-
if path is not None:
|
114 |
-
# If we were given an explicit directory, resolve delete option
|
115 |
-
# now.
|
116 |
-
delete = False
|
117 |
-
else:
|
118 |
-
# Otherwise, we wait until cleanup and see what
|
119 |
-
# tempdir_registry says.
|
120 |
-
delete = None
|
121 |
-
|
122 |
-
# The only time we specify path is in for editables where it
|
123 |
-
# is the value of the --src option.
|
124 |
-
if path is None:
|
125 |
-
path = self._create(kind)
|
126 |
-
|
127 |
-
self._path = path
|
128 |
-
self._deleted = False
|
129 |
-
self.delete = delete
|
130 |
-
self.kind = kind
|
131 |
-
|
132 |
-
if globally_managed:
|
133 |
-
assert _tempdir_manager is not None
|
134 |
-
_tempdir_manager.enter_context(self)
|
135 |
-
|
136 |
-
@property
|
137 |
-
def path(self) -> str:
|
138 |
-
assert not self._deleted, f"Attempted to access deleted path: {self._path}"
|
139 |
-
return self._path
|
140 |
-
|
141 |
-
def __repr__(self) -> str:
|
142 |
-
return f"<{self.__class__.__name__} {self.path!r}>"
|
143 |
-
|
144 |
-
def __enter__(self: _T) -> _T:
|
145 |
-
return self
|
146 |
-
|
147 |
-
def __exit__(self, exc: Any, value: Any, tb: Any) -> None:
|
148 |
-
if self.delete is not None:
|
149 |
-
delete = self.delete
|
150 |
-
elif _tempdir_registry:
|
151 |
-
delete = _tempdir_registry.get_delete(self.kind)
|
152 |
-
else:
|
153 |
-
delete = True
|
154 |
-
|
155 |
-
if delete:
|
156 |
-
self.cleanup()
|
157 |
-
|
158 |
-
def _create(self, kind: str) -> str:
|
159 |
-
"""Create a temporary directory and store its path in self.path"""
|
160 |
-
# We realpath here because some systems have their default tmpdir
|
161 |
-
# symlinked to another directory. This tends to confuse build
|
162 |
-
# scripts, so we canonicalize the path by traversing potential
|
163 |
-
# symlinks here.
|
164 |
-
path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
|
165 |
-
logger.debug("Created temporary directory: %s", path)
|
166 |
-
return path
|
167 |
-
|
168 |
-
def cleanup(self) -> None:
|
169 |
-
"""Remove the temporary directory created and reset state"""
|
170 |
-
self._deleted = True
|
171 |
-
if not os.path.exists(self._path):
|
172 |
-
return
|
173 |
-
rmtree(self._path)
|
174 |
-
|
175 |
-
|
176 |
-
class AdjacentTempDirectory(TempDirectory):
|
177 |
-
"""Helper class that creates a temporary directory adjacent to a real one.
|
178 |
-
|
179 |
-
Attributes:
|
180 |
-
original
|
181 |
-
The original directory to create a temp directory for.
|
182 |
-
path
|
183 |
-
After calling create() or entering, contains the full
|
184 |
-
path to the temporary directory.
|
185 |
-
delete
|
186 |
-
Whether the directory should be deleted when exiting
|
187 |
-
(when used as a contextmanager)
|
188 |
-
|
189 |
-
"""
|
190 |
-
|
191 |
-
# The characters that may be used to name the temp directory
|
192 |
-
# We always prepend a ~ and then rotate through these until
|
193 |
-
# a usable name is found.
|
194 |
-
# pkg_resources raises a different error for .dist-info folder
|
195 |
-
# with leading '-' and invalid metadata
|
196 |
-
LEADING_CHARS = "-~.=%0123456789"
|
197 |
-
|
198 |
-
def __init__(self, original: str, delete: Optional[bool] = None) -> None:
|
199 |
-
self.original = original.rstrip("/\\")
|
200 |
-
super().__init__(delete=delete)
|
201 |
-
|
202 |
-
@classmethod
|
203 |
-
def _generate_names(cls, name: str) -> Generator[str, None, None]:
|
204 |
-
"""Generates a series of temporary names.
|
205 |
-
|
206 |
-
The algorithm replaces the leading characters in the name
|
207 |
-
with ones that are valid filesystem characters, but are not
|
208 |
-
valid package names (for both Python and pip definitions of
|
209 |
-
package).
|
210 |
-
"""
|
211 |
-
for i in range(1, len(name)):
|
212 |
-
for candidate in itertools.combinations_with_replacement(
|
213 |
-
cls.LEADING_CHARS, i - 1
|
214 |
-
):
|
215 |
-
new_name = "~" + "".join(candidate) + name[i:]
|
216 |
-
if new_name != name:
|
217 |
-
yield new_name
|
218 |
-
|
219 |
-
# If we make it this far, we will have to make a longer name
|
220 |
-
for i in range(len(cls.LEADING_CHARS)):
|
221 |
-
for candidate in itertools.combinations_with_replacement(
|
222 |
-
cls.LEADING_CHARS, i
|
223 |
-
):
|
224 |
-
new_name = "~" + "".join(candidate) + name
|
225 |
-
if new_name != name:
|
226 |
-
yield new_name
|
227 |
-
|
228 |
-
def _create(self, kind: str) -> str:
|
229 |
-
root, name = os.path.split(self.original)
|
230 |
-
for candidate in self._generate_names(name):
|
231 |
-
path = os.path.join(root, candidate)
|
232 |
-
try:
|
233 |
-
os.mkdir(path)
|
234 |
-
except OSError as ex:
|
235 |
-
# Continue if the name exists already
|
236 |
-
if ex.errno != errno.EEXIST:
|
237 |
-
raise
|
238 |
-
else:
|
239 |
-
path = os.path.realpath(path)
|
240 |
-
break
|
241 |
-
else:
|
242 |
-
# Final fallback on the default behavior.
|
243 |
-
path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
|
244 |
-
|
245 |
-
logger.debug("Created temporary directory: %s", path)
|
246 |
-
return path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/download.py
DELETED
@@ -1,790 +0,0 @@
|
|
1 |
-
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import heapq
|
14 |
-
import logging
|
15 |
-
import threading
|
16 |
-
|
17 |
-
from s3transfer.compat import seekable
|
18 |
-
from s3transfer.exceptions import RetriesExceededError
|
19 |
-
from s3transfer.futures import IN_MEMORY_DOWNLOAD_TAG
|
20 |
-
from s3transfer.tasks import SubmissionTask, Task
|
21 |
-
from s3transfer.utils import (
|
22 |
-
S3_RETRYABLE_DOWNLOAD_ERRORS,
|
23 |
-
CountCallbackInvoker,
|
24 |
-
DeferredOpenFile,
|
25 |
-
FunctionContainer,
|
26 |
-
StreamReaderProgress,
|
27 |
-
calculate_num_parts,
|
28 |
-
calculate_range_parameter,
|
29 |
-
get_callbacks,
|
30 |
-
invoke_progress_callbacks,
|
31 |
-
)
|
32 |
-
|
33 |
-
logger = logging.getLogger(__name__)
|
34 |
-
|
35 |
-
|
36 |
-
class DownloadOutputManager:
|
37 |
-
"""Base manager class for handling various types of files for downloads
|
38 |
-
|
39 |
-
This class is typically used for the DownloadSubmissionTask class to help
|
40 |
-
determine the following:
|
41 |
-
|
42 |
-
* Provides the fileobj to write to downloads to
|
43 |
-
* Get a task to complete once everything downloaded has been written
|
44 |
-
|
45 |
-
The answers/implementations differ for the various types of file outputs
|
46 |
-
that may be accepted. All implementations must subclass and override
|
47 |
-
public methods from this class.
|
48 |
-
"""
|
49 |
-
|
50 |
-
def __init__(self, osutil, transfer_coordinator, io_executor):
|
51 |
-
self._osutil = osutil
|
52 |
-
self._transfer_coordinator = transfer_coordinator
|
53 |
-
self._io_executor = io_executor
|
54 |
-
|
55 |
-
@classmethod
|
56 |
-
def is_compatible(cls, download_target, osutil):
|
57 |
-
"""Determines if the target for the download is compatible with manager
|
58 |
-
|
59 |
-
:param download_target: The target for which the upload will write
|
60 |
-
data to.
|
61 |
-
|
62 |
-
:param osutil: The os utility to be used for the transfer
|
63 |
-
|
64 |
-
:returns: True if the manager can handle the type of target specified
|
65 |
-
otherwise returns False.
|
66 |
-
"""
|
67 |
-
raise NotImplementedError('must implement is_compatible()')
|
68 |
-
|
69 |
-
def get_download_task_tag(self):
|
70 |
-
"""Get the tag (if any) to associate all GetObjectTasks
|
71 |
-
|
72 |
-
:rtype: s3transfer.futures.TaskTag
|
73 |
-
:returns: The tag to associate all GetObjectTasks with
|
74 |
-
"""
|
75 |
-
return None
|
76 |
-
|
77 |
-
def get_fileobj_for_io_writes(self, transfer_future):
|
78 |
-
"""Get file-like object to use for io writes in the io executor
|
79 |
-
|
80 |
-
:type transfer_future: s3transfer.futures.TransferFuture
|
81 |
-
:param transfer_future: The future associated with upload request
|
82 |
-
|
83 |
-
returns: A file-like object to write to
|
84 |
-
"""
|
85 |
-
raise NotImplementedError('must implement get_fileobj_for_io_writes()')
|
86 |
-
|
87 |
-
def queue_file_io_task(self, fileobj, data, offset):
|
88 |
-
"""Queue IO write for submission to the IO executor.
|
89 |
-
|
90 |
-
This method accepts an IO executor and information about the
|
91 |
-
downloaded data, and handles submitting this to the IO executor.
|
92 |
-
|
93 |
-
This method may defer submission to the IO executor if necessary.
|
94 |
-
|
95 |
-
"""
|
96 |
-
self._transfer_coordinator.submit(
|
97 |
-
self._io_executor, self.get_io_write_task(fileobj, data, offset)
|
98 |
-
)
|
99 |
-
|
100 |
-
def get_io_write_task(self, fileobj, data, offset):
|
101 |
-
"""Get an IO write task for the requested set of data
|
102 |
-
|
103 |
-
This task can be ran immediately or be submitted to the IO executor
|
104 |
-
for it to run.
|
105 |
-
|
106 |
-
:type fileobj: file-like object
|
107 |
-
:param fileobj: The file-like object to write to
|
108 |
-
|
109 |
-
:type data: bytes
|
110 |
-
:param data: The data to write out
|
111 |
-
|
112 |
-
:type offset: integer
|
113 |
-
:param offset: The offset to write the data to in the file-like object
|
114 |
-
|
115 |
-
:returns: An IO task to be used to write data to a file-like object
|
116 |
-
"""
|
117 |
-
return IOWriteTask(
|
118 |
-
self._transfer_coordinator,
|
119 |
-
main_kwargs={
|
120 |
-
'fileobj': fileobj,
|
121 |
-
'data': data,
|
122 |
-
'offset': offset,
|
123 |
-
},
|
124 |
-
)
|
125 |
-
|
126 |
-
def get_final_io_task(self):
|
127 |
-
"""Get the final io task to complete the download
|
128 |
-
|
129 |
-
This is needed because based on the architecture of the TransferManager
|
130 |
-
the final tasks will be sent to the IO executor, but the executor
|
131 |
-
needs a final task for it to signal that the transfer is done and
|
132 |
-
all done callbacks can be run.
|
133 |
-
|
134 |
-
:rtype: s3transfer.tasks.Task
|
135 |
-
:returns: A final task to completed in the io executor
|
136 |
-
"""
|
137 |
-
raise NotImplementedError('must implement get_final_io_task()')
|
138 |
-
|
139 |
-
def _get_fileobj_from_filename(self, filename):
|
140 |
-
f = DeferredOpenFile(
|
141 |
-
filename, mode='wb', open_function=self._osutil.open
|
142 |
-
)
|
143 |
-
# Make sure the file gets closed and we remove the temporary file
|
144 |
-
# if anything goes wrong during the process.
|
145 |
-
self._transfer_coordinator.add_failure_cleanup(f.close)
|
146 |
-
return f
|
147 |
-
|
148 |
-
|
149 |
-
class DownloadFilenameOutputManager(DownloadOutputManager):
|
150 |
-
def __init__(self, osutil, transfer_coordinator, io_executor):
|
151 |
-
super().__init__(osutil, transfer_coordinator, io_executor)
|
152 |
-
self._final_filename = None
|
153 |
-
self._temp_filename = None
|
154 |
-
self._temp_fileobj = None
|
155 |
-
|
156 |
-
@classmethod
|
157 |
-
def is_compatible(cls, download_target, osutil):
|
158 |
-
return isinstance(download_target, str)
|
159 |
-
|
160 |
-
def get_fileobj_for_io_writes(self, transfer_future):
|
161 |
-
fileobj = transfer_future.meta.call_args.fileobj
|
162 |
-
self._final_filename = fileobj
|
163 |
-
self._temp_filename = self._osutil.get_temp_filename(fileobj)
|
164 |
-
self._temp_fileobj = self._get_temp_fileobj()
|
165 |
-
return self._temp_fileobj
|
166 |
-
|
167 |
-
def get_final_io_task(self):
|
168 |
-
# A task to rename the file from the temporary file to its final
|
169 |
-
# location is needed. This should be the last task needed to complete
|
170 |
-
# the download.
|
171 |
-
return IORenameFileTask(
|
172 |
-
transfer_coordinator=self._transfer_coordinator,
|
173 |
-
main_kwargs={
|
174 |
-
'fileobj': self._temp_fileobj,
|
175 |
-
'final_filename': self._final_filename,
|
176 |
-
'osutil': self._osutil,
|
177 |
-
},
|
178 |
-
is_final=True,
|
179 |
-
)
|
180 |
-
|
181 |
-
def _get_temp_fileobj(self):
|
182 |
-
f = self._get_fileobj_from_filename(self._temp_filename)
|
183 |
-
self._transfer_coordinator.add_failure_cleanup(
|
184 |
-
self._osutil.remove_file, self._temp_filename
|
185 |
-
)
|
186 |
-
return f
|
187 |
-
|
188 |
-
|
189 |
-
class DownloadSeekableOutputManager(DownloadOutputManager):
|
190 |
-
@classmethod
|
191 |
-
def is_compatible(cls, download_target, osutil):
|
192 |
-
return seekable(download_target)
|
193 |
-
|
194 |
-
def get_fileobj_for_io_writes(self, transfer_future):
|
195 |
-
# Return the fileobj provided to the future.
|
196 |
-
return transfer_future.meta.call_args.fileobj
|
197 |
-
|
198 |
-
def get_final_io_task(self):
|
199 |
-
# This task will serve the purpose of signaling when all of the io
|
200 |
-
# writes have finished so done callbacks can be called.
|
201 |
-
return CompleteDownloadNOOPTask(
|
202 |
-
transfer_coordinator=self._transfer_coordinator
|
203 |
-
)
|
204 |
-
|
205 |
-
|
206 |
-
class DownloadNonSeekableOutputManager(DownloadOutputManager):
|
207 |
-
def __init__(
|
208 |
-
self, osutil, transfer_coordinator, io_executor, defer_queue=None
|
209 |
-
):
|
210 |
-
super().__init__(osutil, transfer_coordinator, io_executor)
|
211 |
-
if defer_queue is None:
|
212 |
-
defer_queue = DeferQueue()
|
213 |
-
self._defer_queue = defer_queue
|
214 |
-
self._io_submit_lock = threading.Lock()
|
215 |
-
|
216 |
-
@classmethod
|
217 |
-
def is_compatible(cls, download_target, osutil):
|
218 |
-
return hasattr(download_target, 'write')
|
219 |
-
|
220 |
-
def get_download_task_tag(self):
|
221 |
-
return IN_MEMORY_DOWNLOAD_TAG
|
222 |
-
|
223 |
-
def get_fileobj_for_io_writes(self, transfer_future):
|
224 |
-
return transfer_future.meta.call_args.fileobj
|
225 |
-
|
226 |
-
def get_final_io_task(self):
|
227 |
-
return CompleteDownloadNOOPTask(
|
228 |
-
transfer_coordinator=self._transfer_coordinator
|
229 |
-
)
|
230 |
-
|
231 |
-
def queue_file_io_task(self, fileobj, data, offset):
|
232 |
-
with self._io_submit_lock:
|
233 |
-
writes = self._defer_queue.request_writes(offset, data)
|
234 |
-
for write in writes:
|
235 |
-
data = write['data']
|
236 |
-
logger.debug(
|
237 |
-
"Queueing IO offset %s for fileobj: %s",
|
238 |
-
write['offset'],
|
239 |
-
fileobj,
|
240 |
-
)
|
241 |
-
super().queue_file_io_task(fileobj, data, offset)
|
242 |
-
|
243 |
-
def get_io_write_task(self, fileobj, data, offset):
|
244 |
-
return IOStreamingWriteTask(
|
245 |
-
self._transfer_coordinator,
|
246 |
-
main_kwargs={
|
247 |
-
'fileobj': fileobj,
|
248 |
-
'data': data,
|
249 |
-
},
|
250 |
-
)
|
251 |
-
|
252 |
-
|
253 |
-
class DownloadSpecialFilenameOutputManager(DownloadNonSeekableOutputManager):
|
254 |
-
def __init__(
|
255 |
-
self, osutil, transfer_coordinator, io_executor, defer_queue=None
|
256 |
-
):
|
257 |
-
super().__init__(
|
258 |
-
osutil, transfer_coordinator, io_executor, defer_queue
|
259 |
-
)
|
260 |
-
self._fileobj = None
|
261 |
-
|
262 |
-
@classmethod
|
263 |
-
def is_compatible(cls, download_target, osutil):
|
264 |
-
return isinstance(download_target, str) and osutil.is_special_file(
|
265 |
-
download_target
|
266 |
-
)
|
267 |
-
|
268 |
-
def get_fileobj_for_io_writes(self, transfer_future):
|
269 |
-
filename = transfer_future.meta.call_args.fileobj
|
270 |
-
self._fileobj = self._get_fileobj_from_filename(filename)
|
271 |
-
return self._fileobj
|
272 |
-
|
273 |
-
def get_final_io_task(self):
|
274 |
-
# Make sure the file gets closed once the transfer is done.
|
275 |
-
return IOCloseTask(
|
276 |
-
transfer_coordinator=self._transfer_coordinator,
|
277 |
-
is_final=True,
|
278 |
-
main_kwargs={'fileobj': self._fileobj},
|
279 |
-
)
|
280 |
-
|
281 |
-
|
282 |
-
class DownloadSubmissionTask(SubmissionTask):
|
283 |
-
"""Task for submitting tasks to execute a download"""
|
284 |
-
|
285 |
-
def _get_download_output_manager_cls(self, transfer_future, osutil):
|
286 |
-
"""Retrieves a class for managing output for a download
|
287 |
-
|
288 |
-
:type transfer_future: s3transfer.futures.TransferFuture
|
289 |
-
:param transfer_future: The transfer future for the request
|
290 |
-
|
291 |
-
:type osutil: s3transfer.utils.OSUtils
|
292 |
-
:param osutil: The os utility associated to the transfer
|
293 |
-
|
294 |
-
:rtype: class of DownloadOutputManager
|
295 |
-
:returns: The appropriate class to use for managing a specific type of
|
296 |
-
input for downloads.
|
297 |
-
"""
|
298 |
-
download_manager_resolver_chain = [
|
299 |
-
DownloadSpecialFilenameOutputManager,
|
300 |
-
DownloadFilenameOutputManager,
|
301 |
-
DownloadSeekableOutputManager,
|
302 |
-
DownloadNonSeekableOutputManager,
|
303 |
-
]
|
304 |
-
|
305 |
-
fileobj = transfer_future.meta.call_args.fileobj
|
306 |
-
for download_manager_cls in download_manager_resolver_chain:
|
307 |
-
if download_manager_cls.is_compatible(fileobj, osutil):
|
308 |
-
return download_manager_cls
|
309 |
-
raise RuntimeError(
|
310 |
-
'Output {} of type: {} is not supported.'.format(
|
311 |
-
fileobj, type(fileobj)
|
312 |
-
)
|
313 |
-
)
|
314 |
-
|
315 |
-
def _submit(
|
316 |
-
self,
|
317 |
-
client,
|
318 |
-
config,
|
319 |
-
osutil,
|
320 |
-
request_executor,
|
321 |
-
io_executor,
|
322 |
-
transfer_future,
|
323 |
-
bandwidth_limiter=None,
|
324 |
-
):
|
325 |
-
"""
|
326 |
-
:param client: The client associated with the transfer manager
|
327 |
-
|
328 |
-
:type config: s3transfer.manager.TransferConfig
|
329 |
-
:param config: The transfer config associated with the transfer
|
330 |
-
manager
|
331 |
-
|
332 |
-
:type osutil: s3transfer.utils.OSUtil
|
333 |
-
:param osutil: The os utility associated to the transfer manager
|
334 |
-
|
335 |
-
:type request_executor: s3transfer.futures.BoundedExecutor
|
336 |
-
:param request_executor: The request executor associated with the
|
337 |
-
transfer manager
|
338 |
-
|
339 |
-
:type io_executor: s3transfer.futures.BoundedExecutor
|
340 |
-
:param io_executor: The io executor associated with the
|
341 |
-
transfer manager
|
342 |
-
|
343 |
-
:type transfer_future: s3transfer.futures.TransferFuture
|
344 |
-
:param transfer_future: The transfer future associated with the
|
345 |
-
transfer request that tasks are being submitted for
|
346 |
-
|
347 |
-
:type bandwidth_limiter: s3transfer.bandwidth.BandwidthLimiter
|
348 |
-
:param bandwidth_limiter: The bandwidth limiter to use when
|
349 |
-
downloading streams
|
350 |
-
"""
|
351 |
-
if transfer_future.meta.size is None:
|
352 |
-
# If a size was not provided figure out the size for the
|
353 |
-
# user.
|
354 |
-
response = client.head_object(
|
355 |
-
Bucket=transfer_future.meta.call_args.bucket,
|
356 |
-
Key=transfer_future.meta.call_args.key,
|
357 |
-
**transfer_future.meta.call_args.extra_args,
|
358 |
-
)
|
359 |
-
transfer_future.meta.provide_transfer_size(
|
360 |
-
response['ContentLength']
|
361 |
-
)
|
362 |
-
|
363 |
-
download_output_manager = self._get_download_output_manager_cls(
|
364 |
-
transfer_future, osutil
|
365 |
-
)(osutil, self._transfer_coordinator, io_executor)
|
366 |
-
|
367 |
-
# If it is greater than threshold do a ranged download, otherwise
|
368 |
-
# do a regular GetObject download.
|
369 |
-
if transfer_future.meta.size < config.multipart_threshold:
|
370 |
-
self._submit_download_request(
|
371 |
-
client,
|
372 |
-
config,
|
373 |
-
osutil,
|
374 |
-
request_executor,
|
375 |
-
io_executor,
|
376 |
-
download_output_manager,
|
377 |
-
transfer_future,
|
378 |
-
bandwidth_limiter,
|
379 |
-
)
|
380 |
-
else:
|
381 |
-
self._submit_ranged_download_request(
|
382 |
-
client,
|
383 |
-
config,
|
384 |
-
osutil,
|
385 |
-
request_executor,
|
386 |
-
io_executor,
|
387 |
-
download_output_manager,
|
388 |
-
transfer_future,
|
389 |
-
bandwidth_limiter,
|
390 |
-
)
|
391 |
-
|
392 |
-
def _submit_download_request(
|
393 |
-
self,
|
394 |
-
client,
|
395 |
-
config,
|
396 |
-
osutil,
|
397 |
-
request_executor,
|
398 |
-
io_executor,
|
399 |
-
download_output_manager,
|
400 |
-
transfer_future,
|
401 |
-
bandwidth_limiter,
|
402 |
-
):
|
403 |
-
call_args = transfer_future.meta.call_args
|
404 |
-
|
405 |
-
# Get a handle to the file that will be used for writing downloaded
|
406 |
-
# contents
|
407 |
-
fileobj = download_output_manager.get_fileobj_for_io_writes(
|
408 |
-
transfer_future
|
409 |
-
)
|
410 |
-
|
411 |
-
# Get the needed callbacks for the task
|
412 |
-
progress_callbacks = get_callbacks(transfer_future, 'progress')
|
413 |
-
|
414 |
-
# Get any associated tags for the get object task.
|
415 |
-
get_object_tag = download_output_manager.get_download_task_tag()
|
416 |
-
|
417 |
-
# Get the final io task to run once the download is complete.
|
418 |
-
final_task = download_output_manager.get_final_io_task()
|
419 |
-
|
420 |
-
# Submit the task to download the object.
|
421 |
-
self._transfer_coordinator.submit(
|
422 |
-
request_executor,
|
423 |
-
ImmediatelyWriteIOGetObjectTask(
|
424 |
-
transfer_coordinator=self._transfer_coordinator,
|
425 |
-
main_kwargs={
|
426 |
-
'client': client,
|
427 |
-
'bucket': call_args.bucket,
|
428 |
-
'key': call_args.key,
|
429 |
-
'fileobj': fileobj,
|
430 |
-
'extra_args': call_args.extra_args,
|
431 |
-
'callbacks': progress_callbacks,
|
432 |
-
'max_attempts': config.num_download_attempts,
|
433 |
-
'download_output_manager': download_output_manager,
|
434 |
-
'io_chunksize': config.io_chunksize,
|
435 |
-
'bandwidth_limiter': bandwidth_limiter,
|
436 |
-
},
|
437 |
-
done_callbacks=[final_task],
|
438 |
-
),
|
439 |
-
tag=get_object_tag,
|
440 |
-
)
|
441 |
-
|
442 |
-
def _submit_ranged_download_request(
|
443 |
-
self,
|
444 |
-
client,
|
445 |
-
config,
|
446 |
-
osutil,
|
447 |
-
request_executor,
|
448 |
-
io_executor,
|
449 |
-
download_output_manager,
|
450 |
-
transfer_future,
|
451 |
-
bandwidth_limiter,
|
452 |
-
):
|
453 |
-
call_args = transfer_future.meta.call_args
|
454 |
-
|
455 |
-
# Get the needed progress callbacks for the task
|
456 |
-
progress_callbacks = get_callbacks(transfer_future, 'progress')
|
457 |
-
|
458 |
-
# Get a handle to the file that will be used for writing downloaded
|
459 |
-
# contents
|
460 |
-
fileobj = download_output_manager.get_fileobj_for_io_writes(
|
461 |
-
transfer_future
|
462 |
-
)
|
463 |
-
|
464 |
-
# Determine the number of parts
|
465 |
-
part_size = config.multipart_chunksize
|
466 |
-
num_parts = calculate_num_parts(transfer_future.meta.size, part_size)
|
467 |
-
|
468 |
-
# Get any associated tags for the get object task.
|
469 |
-
get_object_tag = download_output_manager.get_download_task_tag()
|
470 |
-
|
471 |
-
# Callback invoker to submit the final io task once all downloads
|
472 |
-
# are complete.
|
473 |
-
finalize_download_invoker = CountCallbackInvoker(
|
474 |
-
self._get_final_io_task_submission_callback(
|
475 |
-
download_output_manager, io_executor
|
476 |
-
)
|
477 |
-
)
|
478 |
-
for i in range(num_parts):
|
479 |
-
# Calculate the range parameter
|
480 |
-
range_parameter = calculate_range_parameter(
|
481 |
-
part_size, i, num_parts
|
482 |
-
)
|
483 |
-
|
484 |
-
# Inject the Range parameter to the parameters to be passed in
|
485 |
-
# as extra args
|
486 |
-
extra_args = {'Range': range_parameter}
|
487 |
-
extra_args.update(call_args.extra_args)
|
488 |
-
finalize_download_invoker.increment()
|
489 |
-
# Submit the ranged downloads
|
490 |
-
self._transfer_coordinator.submit(
|
491 |
-
request_executor,
|
492 |
-
GetObjectTask(
|
493 |
-
transfer_coordinator=self._transfer_coordinator,
|
494 |
-
main_kwargs={
|
495 |
-
'client': client,
|
496 |
-
'bucket': call_args.bucket,
|
497 |
-
'key': call_args.key,
|
498 |
-
'fileobj': fileobj,
|
499 |
-
'extra_args': extra_args,
|
500 |
-
'callbacks': progress_callbacks,
|
501 |
-
'max_attempts': config.num_download_attempts,
|
502 |
-
'start_index': i * part_size,
|
503 |
-
'download_output_manager': download_output_manager,
|
504 |
-
'io_chunksize': config.io_chunksize,
|
505 |
-
'bandwidth_limiter': bandwidth_limiter,
|
506 |
-
},
|
507 |
-
done_callbacks=[finalize_download_invoker.decrement],
|
508 |
-
),
|
509 |
-
tag=get_object_tag,
|
510 |
-
)
|
511 |
-
finalize_download_invoker.finalize()
|
512 |
-
|
513 |
-
def _get_final_io_task_submission_callback(
|
514 |
-
self, download_manager, io_executor
|
515 |
-
):
|
516 |
-
final_task = download_manager.get_final_io_task()
|
517 |
-
return FunctionContainer(
|
518 |
-
self._transfer_coordinator.submit, io_executor, final_task
|
519 |
-
)
|
520 |
-
|
521 |
-
def _calculate_range_param(self, part_size, part_index, num_parts):
|
522 |
-
# Used to calculate the Range parameter
|
523 |
-
start_range = part_index * part_size
|
524 |
-
if part_index == num_parts - 1:
|
525 |
-
end_range = ''
|
526 |
-
else:
|
527 |
-
end_range = start_range + part_size - 1
|
528 |
-
range_param = f'bytes={start_range}-{end_range}'
|
529 |
-
return range_param
|
530 |
-
|
531 |
-
|
532 |
-
class GetObjectTask(Task):
|
533 |
-
def _main(
|
534 |
-
self,
|
535 |
-
client,
|
536 |
-
bucket,
|
537 |
-
key,
|
538 |
-
fileobj,
|
539 |
-
extra_args,
|
540 |
-
callbacks,
|
541 |
-
max_attempts,
|
542 |
-
download_output_manager,
|
543 |
-
io_chunksize,
|
544 |
-
start_index=0,
|
545 |
-
bandwidth_limiter=None,
|
546 |
-
):
|
547 |
-
"""Downloads an object and places content into io queue
|
548 |
-
|
549 |
-
:param client: The client to use when calling GetObject
|
550 |
-
:param bucket: The bucket to download from
|
551 |
-
:param key: The key to download from
|
552 |
-
:param fileobj: The file handle to write content to
|
553 |
-
:param exta_args: Any extra arguments to include in GetObject request
|
554 |
-
:param callbacks: List of progress callbacks to invoke on download
|
555 |
-
:param max_attempts: The number of retries to do when downloading
|
556 |
-
:param download_output_manager: The download output manager associated
|
557 |
-
with the current download.
|
558 |
-
:param io_chunksize: The size of each io chunk to read from the
|
559 |
-
download stream and queue in the io queue.
|
560 |
-
:param start_index: The location in the file to start writing the
|
561 |
-
content of the key to.
|
562 |
-
:param bandwidth_limiter: The bandwidth limiter to use when throttling
|
563 |
-
the downloading of data in streams.
|
564 |
-
"""
|
565 |
-
last_exception = None
|
566 |
-
for i in range(max_attempts):
|
567 |
-
try:
|
568 |
-
current_index = start_index
|
569 |
-
response = client.get_object(
|
570 |
-
Bucket=bucket, Key=key, **extra_args
|
571 |
-
)
|
572 |
-
streaming_body = StreamReaderProgress(
|
573 |
-
response['Body'], callbacks
|
574 |
-
)
|
575 |
-
if bandwidth_limiter:
|
576 |
-
streaming_body = (
|
577 |
-
bandwidth_limiter.get_bandwith_limited_stream(
|
578 |
-
streaming_body, self._transfer_coordinator
|
579 |
-
)
|
580 |
-
)
|
581 |
-
|
582 |
-
chunks = DownloadChunkIterator(streaming_body, io_chunksize)
|
583 |
-
for chunk in chunks:
|
584 |
-
# If the transfer is done because of a cancellation
|
585 |
-
# or error somewhere else, stop trying to submit more
|
586 |
-
# data to be written and break out of the download.
|
587 |
-
if not self._transfer_coordinator.done():
|
588 |
-
self._handle_io(
|
589 |
-
download_output_manager,
|
590 |
-
fileobj,
|
591 |
-
chunk,
|
592 |
-
current_index,
|
593 |
-
)
|
594 |
-
current_index += len(chunk)
|
595 |
-
else:
|
596 |
-
return
|
597 |
-
return
|
598 |
-
except S3_RETRYABLE_DOWNLOAD_ERRORS as e:
|
599 |
-
logger.debug(
|
600 |
-
"Retrying exception caught (%s), "
|
601 |
-
"retrying request, (attempt %s / %s)",
|
602 |
-
e,
|
603 |
-
i,
|
604 |
-
max_attempts,
|
605 |
-
exc_info=True,
|
606 |
-
)
|
607 |
-
last_exception = e
|
608 |
-
# Also invoke the progress callbacks to indicate that we
|
609 |
-
# are trying to download the stream again and all progress
|
610 |
-
# for this GetObject has been lost.
|
611 |
-
invoke_progress_callbacks(
|
612 |
-
callbacks, start_index - current_index
|
613 |
-
)
|
614 |
-
continue
|
615 |
-
raise RetriesExceededError(last_exception)
|
616 |
-
|
617 |
-
def _handle_io(self, download_output_manager, fileobj, chunk, index):
|
618 |
-
download_output_manager.queue_file_io_task(fileobj, chunk, index)
|
619 |
-
|
620 |
-
|
621 |
-
class ImmediatelyWriteIOGetObjectTask(GetObjectTask):
|
622 |
-
"""GetObjectTask that immediately writes to the provided file object
|
623 |
-
|
624 |
-
This is useful for downloads where it is known only one thread is
|
625 |
-
downloading the object so there is no reason to go through the
|
626 |
-
overhead of using an IO queue and executor.
|
627 |
-
"""
|
628 |
-
|
629 |
-
def _handle_io(self, download_output_manager, fileobj, chunk, index):
|
630 |
-
task = download_output_manager.get_io_write_task(fileobj, chunk, index)
|
631 |
-
task()
|
632 |
-
|
633 |
-
|
634 |
-
class IOWriteTask(Task):
|
635 |
-
def _main(self, fileobj, data, offset):
|
636 |
-
"""Pulls off an io queue to write contents to a file
|
637 |
-
|
638 |
-
:param fileobj: The file handle to write content to
|
639 |
-
:param data: The data to write
|
640 |
-
:param offset: The offset to write the data to.
|
641 |
-
"""
|
642 |
-
fileobj.seek(offset)
|
643 |
-
fileobj.write(data)
|
644 |
-
|
645 |
-
|
646 |
-
class IOStreamingWriteTask(Task):
|
647 |
-
"""Task for writing data to a non-seekable stream."""
|
648 |
-
|
649 |
-
def _main(self, fileobj, data):
|
650 |
-
"""Write data to a fileobj.
|
651 |
-
|
652 |
-
Data will be written directly to the fileobj without
|
653 |
-
any prior seeking.
|
654 |
-
|
655 |
-
:param fileobj: The fileobj to write content to
|
656 |
-
:param data: The data to write
|
657 |
-
|
658 |
-
"""
|
659 |
-
fileobj.write(data)
|
660 |
-
|
661 |
-
|
662 |
-
class IORenameFileTask(Task):
|
663 |
-
"""A task to rename a temporary file to its final filename
|
664 |
-
|
665 |
-
:param fileobj: The file handle that content was written to.
|
666 |
-
:param final_filename: The final name of the file to rename to
|
667 |
-
upon completion of writing the contents.
|
668 |
-
:param osutil: OS utility
|
669 |
-
"""
|
670 |
-
|
671 |
-
def _main(self, fileobj, final_filename, osutil):
|
672 |
-
fileobj.close()
|
673 |
-
osutil.rename_file(fileobj.name, final_filename)
|
674 |
-
|
675 |
-
|
676 |
-
class IOCloseTask(Task):
|
677 |
-
"""A task to close out a file once the download is complete.
|
678 |
-
|
679 |
-
:param fileobj: The fileobj to close.
|
680 |
-
"""
|
681 |
-
|
682 |
-
def _main(self, fileobj):
|
683 |
-
fileobj.close()
|
684 |
-
|
685 |
-
|
686 |
-
class CompleteDownloadNOOPTask(Task):
|
687 |
-
"""A NOOP task to serve as an indicator that the download is complete
|
688 |
-
|
689 |
-
Note that the default for is_final is set to True because this should
|
690 |
-
always be the last task.
|
691 |
-
"""
|
692 |
-
|
693 |
-
def __init__(
|
694 |
-
self,
|
695 |
-
transfer_coordinator,
|
696 |
-
main_kwargs=None,
|
697 |
-
pending_main_kwargs=None,
|
698 |
-
done_callbacks=None,
|
699 |
-
is_final=True,
|
700 |
-
):
|
701 |
-
super().__init__(
|
702 |
-
transfer_coordinator=transfer_coordinator,
|
703 |
-
main_kwargs=main_kwargs,
|
704 |
-
pending_main_kwargs=pending_main_kwargs,
|
705 |
-
done_callbacks=done_callbacks,
|
706 |
-
is_final=is_final,
|
707 |
-
)
|
708 |
-
|
709 |
-
def _main(self):
|
710 |
-
pass
|
711 |
-
|
712 |
-
|
713 |
-
class DownloadChunkIterator:
|
714 |
-
def __init__(self, body, chunksize):
|
715 |
-
"""Iterator to chunk out a downloaded S3 stream
|
716 |
-
|
717 |
-
:param body: A readable file-like object
|
718 |
-
:param chunksize: The amount to read each time
|
719 |
-
"""
|
720 |
-
self._body = body
|
721 |
-
self._chunksize = chunksize
|
722 |
-
self._num_reads = 0
|
723 |
-
|
724 |
-
def __iter__(self):
|
725 |
-
return self
|
726 |
-
|
727 |
-
def __next__(self):
|
728 |
-
chunk = self._body.read(self._chunksize)
|
729 |
-
self._num_reads += 1
|
730 |
-
if chunk:
|
731 |
-
return chunk
|
732 |
-
elif self._num_reads == 1:
|
733 |
-
# Even though the response may have not had any
|
734 |
-
# content, we still want to account for an empty object's
|
735 |
-
# existence so return the empty chunk for that initial
|
736 |
-
# read.
|
737 |
-
return chunk
|
738 |
-
raise StopIteration()
|
739 |
-
|
740 |
-
next = __next__
|
741 |
-
|
742 |
-
|
743 |
-
class DeferQueue:
|
744 |
-
"""IO queue that defers write requests until they are queued sequentially.
|
745 |
-
|
746 |
-
This class is used to track IO data for a *single* fileobj.
|
747 |
-
|
748 |
-
You can send data to this queue, and it will defer any IO write requests
|
749 |
-
until it has the next contiguous block available (starting at 0).
|
750 |
-
|
751 |
-
"""
|
752 |
-
|
753 |
-
def __init__(self):
|
754 |
-
self._writes = []
|
755 |
-
self._pending_offsets = set()
|
756 |
-
self._next_offset = 0
|
757 |
-
|
758 |
-
def request_writes(self, offset, data):
|
759 |
-
"""Request any available writes given new incoming data.
|
760 |
-
|
761 |
-
You call this method by providing new data along with the
|
762 |
-
offset associated with the data. If that new data unlocks
|
763 |
-
any contiguous writes that can now be submitted, this
|
764 |
-
method will return all applicable writes.
|
765 |
-
|
766 |
-
This is done with 1 method call so you don't have to
|
767 |
-
make two method calls (put(), get()) which acquires a lock
|
768 |
-
each method call.
|
769 |
-
|
770 |
-
"""
|
771 |
-
if offset < self._next_offset:
|
772 |
-
# This is a request for a write that we've already
|
773 |
-
# seen. This can happen in the event of a retry
|
774 |
-
# where if we retry at at offset N/2, we'll requeue
|
775 |
-
# offsets 0-N/2 again.
|
776 |
-
return []
|
777 |
-
writes = []
|
778 |
-
if offset in self._pending_offsets:
|
779 |
-
# We've already queued this offset so this request is
|
780 |
-
# a duplicate. In this case we should ignore
|
781 |
-
# this request and prefer what's already queued.
|
782 |
-
return []
|
783 |
-
heapq.heappush(self._writes, (offset, data))
|
784 |
-
self._pending_offsets.add(offset)
|
785 |
-
while self._writes and self._writes[0][0] == self._next_offset:
|
786 |
-
next_write = heapq.heappop(self._writes)
|
787 |
-
writes.append({'offset': next_write[0], 'data': next_write[1]})
|
788 |
-
self._pending_offsets.remove(next_write[0])
|
789 |
-
self._next_offset += len(next_write[1])
|
790 |
-
return writes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/functional/argument.h
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
// Portions of this code are derived from
|
18 |
-
//
|
19 |
-
// Manjunath Kudlur's Carbon library
|
20 |
-
//
|
21 |
-
// and
|
22 |
-
//
|
23 |
-
// Based on Boost.Phoenix v1.2
|
24 |
-
// Copyright (c) 2001-2002 Joel de Guzman
|
25 |
-
|
26 |
-
#pragma once
|
27 |
-
|
28 |
-
#include <thrust/detail/config.h>
|
29 |
-
#include <thrust/tuple.h>
|
30 |
-
|
31 |
-
namespace thrust
|
32 |
-
{
|
33 |
-
namespace detail
|
34 |
-
{
|
35 |
-
namespace functional
|
36 |
-
{
|
37 |
-
|
38 |
-
template<unsigned int i, typename Env>
|
39 |
-
struct argument_helper
|
40 |
-
{
|
41 |
-
typedef typename thrust::tuple_element<i,Env>::type type;
|
42 |
-
};
|
43 |
-
|
44 |
-
template<unsigned int i>
|
45 |
-
struct argument_helper<i,thrust::null_type>
|
46 |
-
{
|
47 |
-
typedef thrust::null_type type;
|
48 |
-
};
|
49 |
-
|
50 |
-
|
51 |
-
template<unsigned int i>
|
52 |
-
class argument
|
53 |
-
{
|
54 |
-
public:
|
55 |
-
template<typename Env>
|
56 |
-
struct result
|
57 |
-
: argument_helper<i,Env>
|
58 |
-
{
|
59 |
-
};
|
60 |
-
|
61 |
-
__host__ __device__
|
62 |
-
THRUST_CONSTEXPR argument(){}
|
63 |
-
|
64 |
-
template<typename Env>
|
65 |
-
__host__ __device__
|
66 |
-
typename result<Env>::type eval(const Env &e) const
|
67 |
-
{
|
68 |
-
return thrust::get<i>(e);
|
69 |
-
} // end eval()
|
70 |
-
}; // end argument
|
71 |
-
|
72 |
-
} // end functional
|
73 |
-
} // end detail
|
74 |
-
} // end thrust
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/memory.h
DELETED
@@ -1,547 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file thrust/memory.h
|
18 |
-
* \brief Abstractions for Thrust's memory model.
|
19 |
-
*/
|
20 |
-
|
21 |
-
#include <thrust/detail/config.h>
|
22 |
-
|
23 |
-
#include <thrust/detail/type_traits/pointer_traits.h>
|
24 |
-
#include <thrust/detail/pointer.h>
|
25 |
-
#include <thrust/detail/reference.h>
|
26 |
-
#include <thrust/detail/raw_pointer_cast.h>
|
27 |
-
#include <thrust/detail/raw_reference_cast.h>
|
28 |
-
#include <thrust/detail/malloc_and_free.h>
|
29 |
-
#include <thrust/detail/temporary_buffer.h>
|
30 |
-
|
31 |
-
namespace thrust
|
32 |
-
{
|
33 |
-
|
34 |
-
/*! \defgroup memory_management Memory Management
|
35 |
-
*
|
36 |
-
* All Thrust functionalities related to memory allocation and deallocation.
|
37 |
-
*
|
38 |
-
*/
|
39 |
-
|
40 |
-
/** \addtogroup memory_management_classes Memory Management Classes
|
41 |
-
* \ingroup memory_management
|
42 |
-
* \{
|
43 |
-
*/
|
44 |
-
|
45 |
-
// define pointer for the purpose of Doxygenating it
|
46 |
-
// it is actually defined elsewhere
|
47 |
-
#if 0
|
48 |
-
/*! \p pointer stores a pointer to an object allocated in memory. Like \p device_ptr, this
|
49 |
-
* type ensures type safety when dispatching standard algorithms on ranges resident in memory.
|
50 |
-
*
|
51 |
-
* \p pointer generalizes \p device_ptr by relaxing the backend system associated with the \p pointer.
|
52 |
-
* Instead of the backend system specified by \p THRUST_DEFAULT_DEVICE_BACKEND, \p pointer's
|
53 |
-
* system is given by its second template parameter, \p Tag. For the purpose of Thrust dispatch,
|
54 |
-
* <tt>device_ptr<Element></tt> and <tt>pointer<Element,device_system_tag></tt> are considered equivalent.
|
55 |
-
*
|
56 |
-
* The raw pointer encapsulated by a \p pointer may be obtained through its <tt>get</tt> member function
|
57 |
-
* or the \p raw_pointer_cast free function.
|
58 |
-
*
|
59 |
-
* \tparam Element specifies the type of the pointed-to object.
|
60 |
-
*
|
61 |
-
* \tparam Tag specifies the system with which this \p pointer is associated. This may be any Thrust
|
62 |
-
* backend system, or a user-defined tag.
|
63 |
-
*
|
64 |
-
* \tparam Reference allows the client to specify the reference type returned upon derereference.
|
65 |
-
* By default, this type is <tt>reference<Element,pointer></tt>.
|
66 |
-
*
|
67 |
-
* \tparam Derived allows the client to specify the name of the derived type when \p pointer is used as
|
68 |
-
* a base class. This is useful to ensure that arithmetic on values of the derived type return
|
69 |
-
* values of the derived type as a result. By default, this type is <tt>pointer<Element,Tag,Reference></tt>.
|
70 |
-
*
|
71 |
-
* \note \p pointer is not a smart pointer; it is the client's responsibility to deallocate memory
|
72 |
-
* pointer to by \p pointer.
|
73 |
-
*
|
74 |
-
* \see device_ptr
|
75 |
-
* \see reference
|
76 |
-
* \see raw_pointer_cast
|
77 |
-
*/
|
78 |
-
template<typename Element, typename Tag, typename Reference = thrust::use_default, typename Derived = thrust::use_default>
|
79 |
-
class pointer
|
80 |
-
{
|
81 |
-
public:
|
82 |
-
/*! The type of the raw pointer
|
83 |
-
*/
|
84 |
-
typedef typename super_t::base_type raw_pointer;
|
85 |
-
|
86 |
-
/*! \p pointer's default constructor initializes its encapsulated pointer to \c 0
|
87 |
-
*/
|
88 |
-
__host__ __device__
|
89 |
-
pointer();
|
90 |
-
|
91 |
-
/*! This constructor allows construction of a <tt>pointer<const T, ...></tt> from a <tt>T*</tt>.
|
92 |
-
*
|
93 |
-
* \param ptr A raw pointer to copy from, presumed to point to a location in \p Tag's memory.
|
94 |
-
* \tparam OtherElement \p OtherElement shall be convertible to \p Element.
|
95 |
-
*/
|
96 |
-
template<typename OtherElement>
|
97 |
-
__host__ __device__
|
98 |
-
explicit pointer(OtherElement *ptr);
|
99 |
-
|
100 |
-
/*! This contructor allows initialization from another pointer-like object.
|
101 |
-
*
|
102 |
-
* \param other The \p OtherPointer to copy.
|
103 |
-
*
|
104 |
-
* \tparam OtherPointer The tag associated with \p OtherPointer shall be convertible to \p Tag,
|
105 |
-
* and its element type shall be convertible to \p Element.
|
106 |
-
*/
|
107 |
-
template<typename OtherPointer>
|
108 |
-
__host__ __device__
|
109 |
-
pointer(const OtherPointer &other,
|
110 |
-
typename thrust::detail::enable_if_pointer_is_convertible<
|
111 |
-
OtherPointer,
|
112 |
-
pointer<Element,Tag,Reference,Derived>
|
113 |
-
>::type * = 0);
|
114 |
-
|
115 |
-
/*! Assignment operator allows assigning from another pointer-like object with related type.
|
116 |
-
*
|
117 |
-
* \param other The other pointer-like object to assign from.
|
118 |
-
* \return <tt>*this</tt>
|
119 |
-
*
|
120 |
-
* \tparam OtherPointer The tag associated with \p OtherPointer shall be convertible to \p Tag,
|
121 |
-
* and its element type shall be convertible to \p Element.
|
122 |
-
*/
|
123 |
-
template<typename OtherPointer>
|
124 |
-
__host__ __device__
|
125 |
-
typename thrust::detail::enable_if_pointer_is_convertible<
|
126 |
-
OtherPointer,
|
127 |
-
pointer,
|
128 |
-
derived_type &
|
129 |
-
>::type
|
130 |
-
operator=(const OtherPointer &other);
|
131 |
-
|
132 |
-
/*! \p get returns this \p pointer's encapsulated raw pointer.
|
133 |
-
* \return This \p pointer's raw pointer.
|
134 |
-
*/
|
135 |
-
__host__ __device__
|
136 |
-
Element *get() const;
|
137 |
-
};
|
138 |
-
#endif
|
139 |
-
|
140 |
-
// define pointer for the purpose of Doxygenating it
|
141 |
-
// it is actually defined elsewhere
|
142 |
-
#if 0
|
143 |
-
/*! \p reference is a wrapped reference to an object stored in memory. \p reference generalizes
|
144 |
-
* \p device_reference by relaxing the type of pointer associated with the object. \p reference
|
145 |
-
* is the type of the result of dereferencing a tagged pointer-like object such as \p pointer, and
|
146 |
-
* intermediates operations on objects existing in a remote memory.
|
147 |
-
*
|
148 |
-
* \tparam Element specifies the type of the referent object.
|
149 |
-
* \tparam Pointer specifies the type of the result of taking the address of \p reference.
|
150 |
-
* \tparam Derived allows the client to specify the name of the derived type when \p reference is used as
|
151 |
-
* a base class. This is useful to ensure that assignment to objects of the derived type return
|
152 |
-
* values of the derived type as a result. By default, this type is <tt>reference<Element,Pointer></tt>.
|
153 |
-
*/
|
154 |
-
template<typename Element, typename Pointer, typename Derived = thrust::use_default>
|
155 |
-
class reference
|
156 |
-
{
|
157 |
-
public:
|
158 |
-
/*! The type of this \p reference's wrapped pointers.
|
159 |
-
*/
|
160 |
-
typedef Pointer pointer;
|
161 |
-
|
162 |
-
/*! The \p value_type of this \p reference.
|
163 |
-
*/
|
164 |
-
typedef typename thrust::detail::remove_const<Element>::type value_type;
|
165 |
-
|
166 |
-
/*! This copy constructor initializes this \p reference
|
167 |
-
* to refer to an object pointed to by the given \p pointer. After
|
168 |
-
* this \p reference is constructed, it shall refer to the
|
169 |
-
* object pointed to by \p ptr.
|
170 |
-
*
|
171 |
-
* \param ptr A \p pointer to copy from.
|
172 |
-
*/
|
173 |
-
__host__ __device__
|
174 |
-
explicit reference(const pointer &ptr);
|
175 |
-
|
176 |
-
/*! This copy constructor accepts a const reference to another
|
177 |
-
* \p reference of related type. After this \p reference is constructed,
|
178 |
-
* it shall refer to the same object as \p other.
|
179 |
-
*
|
180 |
-
* \param other A \p reference to copy from.
|
181 |
-
* \tparam OtherElement the element type of the other \p reference.
|
182 |
-
* \tparam OtherPointer the pointer type of the other \p reference.
|
183 |
-
* \tparam OtherDerived the derived type of the other \p reference.
|
184 |
-
*
|
185 |
-
* \note This constructor is templated primarily to allow initialization of
|
186 |
-
* <tt>reference<const T,...></tt> from <tt>reference<T,...></tt>.
|
187 |
-
*/
|
188 |
-
template<typename OtherElement, typename OtherPointer, typename OtherDerived>
|
189 |
-
__host__ __device__
|
190 |
-
reference(const reference<OtherElement,OtherPointer,OtherDerived> &other,
|
191 |
-
typename thrust::detail::enable_if_convertible<
|
192 |
-
typename reference<OtherElement,OtherPointer,OtherDerived>::pointer,
|
193 |
-
pointer
|
194 |
-
>::type * = 0);
|
195 |
-
|
196 |
-
/*! Copy assignment operator copy assigns from another \p reference.
|
197 |
-
*
|
198 |
-
* \param other The other \p reference to assign from.
|
199 |
-
* \return <tt>static_cast<derived_type&>(*this)</tt>
|
200 |
-
*/
|
201 |
-
__host__ __device__
|
202 |
-
derived_type &operator=(const reference &other);
|
203 |
-
|
204 |
-
/*! Assignment operator copy assigns from another \p reference of related type.
|
205 |
-
*
|
206 |
-
* \param other The other \p reference to assign from.
|
207 |
-
* \return <tt>static_cast<derived_type&>(*this)</tt>
|
208 |
-
*
|
209 |
-
* \tparam OtherElement the element type of the other \p reference.
|
210 |
-
* \tparam OtherPointer the pointer type of the other \p reference.
|
211 |
-
* \tparam OtherDerived the derived type of the other \p reference.
|
212 |
-
*/
|
213 |
-
template<typename OtherElement, typename OtherPointer, typename OtherDerived>
|
214 |
-
__host__ __device__
|
215 |
-
derived_type &operator=(const reference<OtherElement,OtherPointer,OtherDerived> &other);
|
216 |
-
|
217 |
-
/*! Assignment operator assigns from a \p value_type.
|
218 |
-
*
|
219 |
-
* \param x The \p value_type to assign from.
|
220 |
-
* \return <tt>static_cast<derived_type&>(*this)</tt>.
|
221 |
-
*/
|
222 |
-
__host__ __device__
|
223 |
-
derived_type &operator=(const value_type &x);
|
224 |
-
|
225 |
-
/*! Address-of operator returns a \p pointer pointing to the object
|
226 |
-
* referenced by this \p reference. It does not return the address of this
|
227 |
-
* \p reference.
|
228 |
-
*
|
229 |
-
* \return A \p pointer pointing to the referenct object.
|
230 |
-
*/
|
231 |
-
__host__ __device__
|
232 |
-
pointer operator&() const;
|
233 |
-
|
234 |
-
/*! Conversion operator converts this \p reference to \p value_type by
|
235 |
-
* returning a copy of the referent object.
|
236 |
-
*
|
237 |
-
* \return A copy of the referent object.
|
238 |
-
*/
|
239 |
-
__host__ __device__
|
240 |
-
operator value_type () const;
|
241 |
-
|
242 |
-
/*! Swaps the value of the referent object with another.
|
243 |
-
*
|
244 |
-
* \param other The other \p reference with which to swap.
|
245 |
-
* \note The argument is of type \p derived_type rather than \p reference.
|
246 |
-
*/
|
247 |
-
__host__ __device__
|
248 |
-
void swap(derived_type &other);
|
249 |
-
|
250 |
-
/*! Prefix increment operator increments the referent object.
|
251 |
-
*
|
252 |
-
* \return <tt>static_Cast<derived_type&>(*this)</tt>.
|
253 |
-
*
|
254 |
-
* \note Documentation for other arithmetic operators omitted for brevity.
|
255 |
-
*/
|
256 |
-
derived_type &operator++();
|
257 |
-
};
|
258 |
-
#endif
|
259 |
-
|
260 |
-
/*! \}
|
261 |
-
*/
|
262 |
-
|
263 |
-
/*!
|
264 |
-
* \addtogroup memory_management_functions Memory Management Functions
|
265 |
-
* \ingroup memory_management
|
266 |
-
* \{
|
267 |
-
*/
|
268 |
-
|
269 |
-
|
270 |
-
/*! \addtogroup allocation_functions
|
271 |
-
* \{
|
272 |
-
*/
|
273 |
-
|
274 |
-
|
275 |
-
/*! This version of \p malloc allocates untyped uninitialized storage associated with a given system.
|
276 |
-
*
|
277 |
-
* \param system The Thrust system with which to associate the storage.
|
278 |
-
* \param n The number of bytes of storage to allocate.
|
279 |
-
* \return If allocation succeeds, a pointer to the allocated storage; a null pointer otherwise.
|
280 |
-
* The pointer must be deallocated with \p thrust::free.
|
281 |
-
*
|
282 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
283 |
-
*
|
284 |
-
* \pre \p DerivedPolicy must be publically derived from <code>thrust::execution_policy<DerivedPolicy></code>.
|
285 |
-
*
|
286 |
-
* The following code snippet demonstrates how to use \p malloc to allocate a range of memory
|
287 |
-
* associated with Thrust's device system.
|
288 |
-
*
|
289 |
-
* \code
|
290 |
-
* #include <thrust/memory.h>
|
291 |
-
* ...
|
292 |
-
* // allocate some memory with thrust::malloc
|
293 |
-
* const int N = 100;
|
294 |
-
* thrust::device_system_tag device_sys;
|
295 |
-
* thrust::pointer<void,thrust::device_space_tag> void_ptr = thrust::malloc(device_sys, N);
|
296 |
-
*
|
297 |
-
* // manipulate memory
|
298 |
-
* ...
|
299 |
-
*
|
300 |
-
* // deallocate void_ptr with thrust::free
|
301 |
-
* thrust::free(device_sys, void_ptr);
|
302 |
-
* \endcode
|
303 |
-
*
|
304 |
-
* \see free
|
305 |
-
* \see device_malloc
|
306 |
-
*/
|
307 |
-
template<typename DerivedPolicy>
|
308 |
-
__host__ __device__
|
309 |
-
pointer<void,DerivedPolicy> malloc(const thrust::detail::execution_policy_base<DerivedPolicy> &system, std::size_t n);
|
310 |
-
|
311 |
-
|
312 |
-
/*! This version of \p malloc allocates typed uninitialized storage associated with a given system.
|
313 |
-
*
|
314 |
-
* \param system The Thrust system with which to associate the storage.
|
315 |
-
* \param n The number of elements of type \c T which the storage should accomodate.
|
316 |
-
* \return If allocation succeeds, a pointer to an allocation large enough to accomodate \c n
|
317 |
-
* elements of type \c T; a null pointer otherwise.
|
318 |
-
* The pointer must be deallocated with \p thrust::free.
|
319 |
-
*
|
320 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
321 |
-
*
|
322 |
-
* \pre \p DerivedPolicy must be publically derived from <code>thrust::execution_policy<DerivedPolicy></code>.
|
323 |
-
*
|
324 |
-
* The following code snippet demonstrates how to use \p malloc to allocate a range of memory
|
325 |
-
* to accomodate integers associated with Thrust's device system.
|
326 |
-
*
|
327 |
-
* \code
|
328 |
-
* #include <thrust/memory.h>
|
329 |
-
* ...
|
330 |
-
* // allocate storage for 100 ints with thrust::malloc
|
331 |
-
* const int N = 100;
|
332 |
-
* thrust::device_system_tag device_sys;
|
333 |
-
* thrust::pointer<int,thrust::device_system_tag> ptr = thrust::malloc<int>(device_sys, N);
|
334 |
-
*
|
335 |
-
* // manipulate memory
|
336 |
-
* ...
|
337 |
-
*
|
338 |
-
* // deallocate ptr with thrust::free
|
339 |
-
* thrust::free(device_sys, ptr);
|
340 |
-
* \endcode
|
341 |
-
*
|
342 |
-
* \see free
|
343 |
-
* \see device_malloc
|
344 |
-
*/
|
345 |
-
template<typename T, typename DerivedPolicy>
|
346 |
-
__host__ __device__
|
347 |
-
pointer<T,DerivedPolicy> malloc(const thrust::detail::execution_policy_base<DerivedPolicy> &system, std::size_t n);
|
348 |
-
|
349 |
-
|
350 |
-
/*! \p get_temporary_buffer returns a pointer to storage associated with a given Thrust system sufficient to store up to
|
351 |
-
* \p n objects of type \c T. If not enough storage is available to accomodate \p n objects, an implementation may return
|
352 |
-
* a smaller buffer. The number of objects the returned buffer can accomodate is also returned.
|
353 |
-
*
|
354 |
-
* Thrust uses \p get_temporary_buffer internally when allocating temporary storage required by algorithm implementations.
|
355 |
-
*
|
356 |
-
* The storage allocated with \p get_temporary_buffer must be returned to the system with \p return_temporary_buffer.
|
357 |
-
*
|
358 |
-
* \param system The Thrust system with which to associate the storage.
|
359 |
-
* \param n The requested number of objects of type \c T the storage should accomodate.
|
360 |
-
* \return A pair \c p such that <tt>p.first</tt> is a pointer to the allocated storage and <tt>p.second</tt> is the number of
|
361 |
-
* contiguous objects of type \c T that the storage can accomodate. If no storage can be allocated, <tt>p.first</tt> if
|
362 |
-
* no storage can be obtained. The storage must be returned to the system using \p return_temporary_buffer.
|
363 |
-
*
|
364 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
365 |
-
*
|
366 |
-
* \pre \p DerivedPolicy must be publically derived from <code>thrust::execution_policy<DerivedPolicy></code>.
|
367 |
-
*
|
368 |
-
* The following code snippet demonstrates how to use \p get_temporary_buffer to allocate a range of memory
|
369 |
-
* to accomodate integers associated with Thrust's device system.
|
370 |
-
*
|
371 |
-
* \code
|
372 |
-
* #include <thrust/memory.h>
|
373 |
-
* ...
|
374 |
-
* // allocate storage for 100 ints with thrust::get_temporary_buffer
|
375 |
-
* const int N = 100;
|
376 |
-
*
|
377 |
-
* typedef thrust::pair<
|
378 |
-
* thrust::pointer<int,thrust::device_system_tag>,
|
379 |
-
* std::ptrdiff_t
|
380 |
-
* > ptr_and_size_t;
|
381 |
-
*
|
382 |
-
* thrust::device_system_tag device_sys;
|
383 |
-
* ptr_and_size_t ptr_and_size = thrust::get_temporary_buffer<int>(device_sys, N);
|
384 |
-
*
|
385 |
-
* // manipulate up to 100 ints
|
386 |
-
* for(int i = 0; i < ptr_and_size.second; ++i)
|
387 |
-
* {
|
388 |
-
* *ptr_and_size.first = i;
|
389 |
-
* }
|
390 |
-
*
|
391 |
-
* // deallocate storage with thrust::return_temporary_buffer
|
392 |
-
* thrust::return_temporary_buffer(device_sys, ptr_and_size.first);
|
393 |
-
* \endcode
|
394 |
-
*
|
395 |
-
* \see malloc
|
396 |
-
* \see return_temporary_buffer
|
397 |
-
*/
|
398 |
-
template<typename T, typename DerivedPolicy>
|
399 |
-
__host__ __device__
|
400 |
-
thrust::pair<thrust::pointer<T,DerivedPolicy>, typename thrust::pointer<T,DerivedPolicy>::difference_type>
|
401 |
-
get_temporary_buffer(const thrust::detail::execution_policy_base<DerivedPolicy> &system, typename thrust::pointer<T,DerivedPolicy>::difference_type n);
|
402 |
-
|
403 |
-
|
404 |
-
/*! \} allocation_functions
|
405 |
-
*/
|
406 |
-
|
407 |
-
|
408 |
-
/*! \addtogroup deallocation_functions
|
409 |
-
* \{
|
410 |
-
*/
|
411 |
-
|
412 |
-
|
413 |
-
/*! \p free deallocates the storage previously allocated by \p thrust::malloc.
|
414 |
-
*
|
415 |
-
* \param system The Thrust system with which the storage is associated.
|
416 |
-
* \param ptr A pointer previously returned by \p thrust::malloc. If \p ptr is null, \p free
|
417 |
-
* does nothing.
|
418 |
-
*
|
419 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
420 |
-
*
|
421 |
-
* \pre \p ptr shall have been returned by a previous call to <tt>thrust::malloc(system, n)</tt> or <tt>thrust::malloc<T>(system, n)</tt> for some type \c T.
|
422 |
-
*
|
423 |
-
* The following code snippet demonstrates how to use \p free to deallocate a range of memory
|
424 |
-
* previously allocated with \p thrust::malloc.
|
425 |
-
*
|
426 |
-
* \code
|
427 |
-
* #include <thrust/memory.h>
|
428 |
-
* ...
|
429 |
-
* // allocate storage for 100 ints with thrust::malloc
|
430 |
-
* const int N = 100;
|
431 |
-
* thrust::device_system_tag device_sys;
|
432 |
-
* thrust::pointer<int,thrust::device_system_tag> ptr = thrust::malloc<int>(device_sys, N);
|
433 |
-
*
|
434 |
-
* // mainpulate memory
|
435 |
-
* ...
|
436 |
-
*
|
437 |
-
* // deallocate ptr with thrust::free
|
438 |
-
* thrust::free(device_sys, ptr);
|
439 |
-
* \endcode
|
440 |
-
*/
|
441 |
-
template<typename DerivedPolicy, typename Pointer>
|
442 |
-
__host__ __device__
|
443 |
-
void free(const thrust::detail::execution_policy_base<DerivedPolicy> &system, Pointer ptr);
|
444 |
-
|
445 |
-
|
446 |
-
/*! \p return_temporary_buffer deallocates storage associated with a given Thrust system previously allocated by \p get_temporary_buffer.
|
447 |
-
*
|
448 |
-
* Thrust uses \p return_temporary_buffer internally when deallocating temporary storage required by algorithm implementations.
|
449 |
-
*
|
450 |
-
* \param system The Thrust system with which the storage is associated.
|
451 |
-
* \param p A pointer previously returned by \p thrust::get_temporary_buffer. If \p ptr is null, \p return_temporary_buffer does nothing.
|
452 |
-
*
|
453 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
454 |
-
*
|
455 |
-
* \pre \p p shall have been previously allocated by \p thrust::get_temporary_buffer.
|
456 |
-
*
|
457 |
-
* The following code snippet demonstrates how to use \p return_temporary_buffer to deallocate a range of memory
|
458 |
-
* previously allocated by \p get_temporary_buffer.
|
459 |
-
*
|
460 |
-
* \code
|
461 |
-
* #include <thrust/memory.h>
|
462 |
-
* ...
|
463 |
-
* // allocate storage for 100 ints with thrust::get_temporary_buffer
|
464 |
-
* const int N = 100;
|
465 |
-
*
|
466 |
-
* typedef thrust::pair<
|
467 |
-
* thrust::pointer<int,thrust::device_system_tag>,
|
468 |
-
* std::ptrdiff_t
|
469 |
-
* > ptr_and_size_t;
|
470 |
-
*
|
471 |
-
* thrust::device_system_tag device_sys;
|
472 |
-
* ptr_and_size_t ptr_and_size = thrust::get_temporary_buffer<int>(device_sys, N);
|
473 |
-
*
|
474 |
-
* // manipulate up to 100 ints
|
475 |
-
* for(int i = 0; i < ptr_and_size.second; ++i)
|
476 |
-
* {
|
477 |
-
* *ptr_and_size.first = i;
|
478 |
-
* }
|
479 |
-
*
|
480 |
-
* // deallocate storage with thrust::return_temporary_buffer
|
481 |
-
* thrust::return_temporary_buffer(device_sys, ptr_and_size.first);
|
482 |
-
* \endcode
|
483 |
-
*
|
484 |
-
* \see free
|
485 |
-
* \see get_temporary_buffer
|
486 |
-
*/
|
487 |
-
template<typename DerivedPolicy, typename Pointer>
|
488 |
-
__host__ __device__
|
489 |
-
void return_temporary_buffer(const thrust::detail::execution_policy_base<DerivedPolicy> &system, Pointer p, std::ptrdiff_t n);
|
490 |
-
|
491 |
-
|
492 |
-
/*! \} deallocation_functions
|
493 |
-
*/
|
494 |
-
|
495 |
-
|
496 |
-
/*! \p raw_pointer_cast creates a "raw" pointer from a pointer-like type,
|
497 |
-
* simply returning the wrapped pointer, should it exist.
|
498 |
-
*
|
499 |
-
* \param ptr The pointer of interest.
|
500 |
-
* \return <tt>ptr.get()</tt>, if the expression is well formed; <tt>ptr</tt>, otherwise.
|
501 |
-
* \see raw_reference_cast
|
502 |
-
*/
|
503 |
-
template<typename Pointer>
|
504 |
-
__host__ __device__
|
505 |
-
typename thrust::detail::pointer_traits<Pointer>::raw_pointer
|
506 |
-
raw_pointer_cast(Pointer ptr);
|
507 |
-
|
508 |
-
|
509 |
-
/*! \p raw_reference_cast creates a "raw" reference from a wrapped reference type,
|
510 |
-
* simply returning the underlying reference, should it exist.
|
511 |
-
*
|
512 |
-
* If the argument is not a reference wrapper, the result is a reference to the argument.
|
513 |
-
*
|
514 |
-
* \param ref The reference of interest.
|
515 |
-
* \return <tt>*thrust::raw_pointer_cast(&ref)</tt>.
|
516 |
-
* \note There are two versions of \p raw_reference_cast. One for <tt>const</tt> references,
|
517 |
-
* and one for non-<tt>const</tt>.
|
518 |
-
* \see raw_pointer_cast
|
519 |
-
*/
|
520 |
-
template<typename T>
|
521 |
-
__host__ __device__
|
522 |
-
typename detail::raw_reference<T>::type
|
523 |
-
raw_reference_cast(T &ref);
|
524 |
-
|
525 |
-
|
526 |
-
/*! \p raw_reference_cast creates a "raw" reference from a wrapped reference type,
|
527 |
-
* simply returning the underlying reference, should it exist.
|
528 |
-
*
|
529 |
-
* If the argument is not a reference wrapper, the result is a reference to the argument.
|
530 |
-
*
|
531 |
-
* \param ref The reference of interest.
|
532 |
-
* \return <tt>*thrust::raw_pointer_cast(&ref)</tt>.
|
533 |
-
* \note There are two versions of \p raw_reference_cast. One for <tt>const</tt> references,
|
534 |
-
* and one for non-<tt>const</tt>.
|
535 |
-
* \see raw_pointer_cast
|
536 |
-
*/
|
537 |
-
template<typename T>
|
538 |
-
__host__ __device__
|
539 |
-
typename detail::raw_reference<const T>::type
|
540 |
-
raw_reference_cast(const T &ref);
|
541 |
-
|
542 |
-
|
543 |
-
/*! \}
|
544 |
-
*/
|
545 |
-
|
546 |
-
} // end thrust
|
547 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/generate.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits generate
|
22 |
-
#include <thrust/system/cpp/detail/generate.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/vector.h
DELETED
@@ -1,817 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
#include <cmath>
|
5 |
-
#include <iostream>
|
6 |
-
|
7 |
-
template <typename T>
|
8 |
-
struct TVector2 {
|
9 |
-
DEVICE TVector2() {}
|
10 |
-
|
11 |
-
template <typename T2>
|
12 |
-
DEVICE
|
13 |
-
TVector2(T2 x, T2 y) : x(T(x)), y(T(y)) {}
|
14 |
-
|
15 |
-
template <typename T2>
|
16 |
-
DEVICE
|
17 |
-
TVector2(const TVector2<T2> &v) : x(T(v.x)), y(T(v.y)) {}
|
18 |
-
|
19 |
-
DEVICE T& operator[](int i) {
|
20 |
-
return *(&x + i);
|
21 |
-
}
|
22 |
-
|
23 |
-
DEVICE T operator[](int i) const {
|
24 |
-
return *(&x + i);
|
25 |
-
}
|
26 |
-
|
27 |
-
T x, y;
|
28 |
-
};
|
29 |
-
|
30 |
-
template <typename T>
|
31 |
-
struct TVector3 {
|
32 |
-
DEVICE TVector3() {}
|
33 |
-
|
34 |
-
template <typename T2>
|
35 |
-
DEVICE
|
36 |
-
TVector3(T2 x, T2 y, T2 z) : x(T(x)), y(T(y)), z(T(z)) {}
|
37 |
-
|
38 |
-
template <typename T2>
|
39 |
-
DEVICE
|
40 |
-
TVector3(const TVector3<T2> &v) : x(T(v.x)), y(T(v.y)), z(T(v.z)) {}
|
41 |
-
|
42 |
-
DEVICE T& operator[](int i) {
|
43 |
-
return *(&x + i);
|
44 |
-
}
|
45 |
-
|
46 |
-
DEVICE T operator[](int i) const {
|
47 |
-
return *(&x + i);
|
48 |
-
}
|
49 |
-
|
50 |
-
T x, y, z;
|
51 |
-
};
|
52 |
-
|
53 |
-
template <typename T>
|
54 |
-
struct TVector4 {
|
55 |
-
DEVICE TVector4() {}
|
56 |
-
|
57 |
-
template <typename T2>
|
58 |
-
DEVICE
|
59 |
-
TVector4(T2 x, T2 y, T2 z, T2 w) : x(T(x)), y(T(y)), z(T(z)), w(T(w)) {}
|
60 |
-
|
61 |
-
template <typename T2>
|
62 |
-
DEVICE
|
63 |
-
TVector4(const TVector4<T2> &v) : x(T(v.x)), y(T(v.y)), z(T(v.z)), w(T(v.w)) {}
|
64 |
-
|
65 |
-
|
66 |
-
DEVICE T& operator[](int i) {
|
67 |
-
return *(&x + i);
|
68 |
-
}
|
69 |
-
|
70 |
-
DEVICE T operator[](int i) const {
|
71 |
-
return *(&x + i);
|
72 |
-
}
|
73 |
-
|
74 |
-
T x, y, z, w;
|
75 |
-
};
|
76 |
-
|
77 |
-
using Vector2f = TVector2<float>;
|
78 |
-
using Vector2d = TVector2<double>;
|
79 |
-
using Vector2i = TVector2<int>;
|
80 |
-
using Vector2 = TVector2<Real>;
|
81 |
-
using Vector3i = TVector3<int>;
|
82 |
-
using Vector3f = TVector3<float>;
|
83 |
-
using Vector3d = TVector3<double>;
|
84 |
-
using Vector3 = TVector3<Real>;
|
85 |
-
using Vector4f = TVector4<float>;
|
86 |
-
using Vector4d = TVector4<double>;
|
87 |
-
using Vector4 = TVector4<Real>;
|
88 |
-
|
89 |
-
template <typename T0, typename T1>
|
90 |
-
DEVICE
|
91 |
-
inline auto operator+(const TVector2<T0> &v0,
|
92 |
-
const TVector2<T1> &v1) -> TVector2<decltype(v0[0] + v1[0])> {
|
93 |
-
return TVector2<decltype(v0[0] + v1[0])>{
|
94 |
-
v0[0] + v1[0], v0[1] + v1[1]};
|
95 |
-
}
|
96 |
-
|
97 |
-
template <typename T0, typename T1>
|
98 |
-
DEVICE
|
99 |
-
inline auto operator+(const T0 &v0,
|
100 |
-
const TVector2<T1> &v1) -> TVector2<decltype(v0 + v1[0])> {
|
101 |
-
return TVector2<decltype(v0 + v1[0])>{v0 + v1[0], v0 + v1[1]};
|
102 |
-
}
|
103 |
-
|
104 |
-
template <typename T0, typename T1>
|
105 |
-
DEVICE
|
106 |
-
inline auto operator+(const T0 &v0,
|
107 |
-
const TVector3<T1> &v1) -> TVector3<decltype(v0[0] + v1)> {
|
108 |
-
return TVector3<decltype(v0 + v1[0])>{
|
109 |
-
v0 + v1[0], v0 + v1[1], v0 + v1[2]};
|
110 |
-
}
|
111 |
-
|
112 |
-
template <typename T0, typename T1>
|
113 |
-
DEVICE
|
114 |
-
inline auto operator+(const TVector2<T0> &v0,
|
115 |
-
const T1 &v1) -> TVector2<decltype(v0[0] + v1)> {
|
116 |
-
return TVector2<decltype(v0[0] + v1)>{
|
117 |
-
v0[0] + v1, v0[1] + v1};
|
118 |
-
}
|
119 |
-
|
120 |
-
template <typename T0, typename T1>
|
121 |
-
DEVICE
|
122 |
-
inline auto operator+(const TVector3<T0> &v0,
|
123 |
-
const T1 &v1) -> TVector3<decltype(v0[0] + v1[0])> {
|
124 |
-
return TVector3<decltype(v0[0] + v1)>{
|
125 |
-
v0[0] + v1, v0[1] + v1, v0[2] + v1};
|
126 |
-
}
|
127 |
-
|
128 |
-
template <typename T0, typename T1>
|
129 |
-
DEVICE
|
130 |
-
inline auto operator+(const TVector3<T0> &v0,
|
131 |
-
const TVector3<T1> &v1) -> TVector3<decltype(v0[0] + v1[0])> {
|
132 |
-
return TVector3<decltype(v0[0] + v1[0])>{
|
133 |
-
v0[0] + v1[0], v0[1] + v1[1], v0[2] + v1[2]};
|
134 |
-
}
|
135 |
-
|
136 |
-
template <typename T0, typename T1>
|
137 |
-
DEVICE
|
138 |
-
inline auto operator+(const TVector4<T0> &v0,
|
139 |
-
const TVector4<T1> &v1) -> TVector4<decltype(v0[0] + v1[0])> {
|
140 |
-
return TVector4<decltype(v0[0] + v1[0])>{
|
141 |
-
v0[0] + v1[0], v0[1] + v1[1], v0[2] + v1[2], v0[3] + v1[3]};
|
142 |
-
}
|
143 |
-
|
144 |
-
template <typename T0, typename T1>
|
145 |
-
DEVICE
|
146 |
-
inline auto operator+=(TVector2<T0> &v0,
|
147 |
-
const TVector2<T1> &v1) -> TVector2<T0>& {
|
148 |
-
v0[0] += v1[0];
|
149 |
-
v0[1] += v1[1];
|
150 |
-
return v0;
|
151 |
-
}
|
152 |
-
|
153 |
-
template <typename T0, typename T1>
|
154 |
-
DEVICE
|
155 |
-
inline auto operator+=(TVector3<T0> &v0,
|
156 |
-
const TVector3<T1> &v1) -> TVector3<T0>& {
|
157 |
-
v0[0] += v1[0];
|
158 |
-
v0[1] += v1[1];
|
159 |
-
v0[2] += v1[2];
|
160 |
-
return v0;
|
161 |
-
}
|
162 |
-
|
163 |
-
template <typename T0, typename T1>
|
164 |
-
DEVICE
|
165 |
-
inline auto operator+=(TVector3<T0> &v0,
|
166 |
-
const T1 &v1) -> TVector3<T0>& {
|
167 |
-
v0[0] += v1;
|
168 |
-
v0[1] += v1;
|
169 |
-
v0[2] += v1;
|
170 |
-
return v0;
|
171 |
-
}
|
172 |
-
|
173 |
-
template <typename T0, typename T1>
|
174 |
-
DEVICE
|
175 |
-
inline auto operator+=(TVector4<T0> &v0,
|
176 |
-
const TVector4<T1> &v1) -> TVector4<T0>& {
|
177 |
-
v0[0] += v1[0];
|
178 |
-
v0[1] += v1[1];
|
179 |
-
v0[2] += v1[2];
|
180 |
-
v0[3] += v1[3];
|
181 |
-
return v0;
|
182 |
-
}
|
183 |
-
|
184 |
-
template <typename T0, typename T1>
|
185 |
-
DEVICE
|
186 |
-
inline auto operator+=(TVector4<T0> &v0,
|
187 |
-
const T1 &v1) -> TVector4<T0>& {
|
188 |
-
v0[0] += v1;
|
189 |
-
v0[1] += v1;
|
190 |
-
v0[2] += v1;
|
191 |
-
v0[3] += v1;
|
192 |
-
return v0;
|
193 |
-
}
|
194 |
-
|
195 |
-
template <typename T0, typename T1>
|
196 |
-
DEVICE
|
197 |
-
inline auto operator-(const T0 &v0,
|
198 |
-
const TVector2<T1> &v1) -> TVector2<decltype(v0 - v1[0])> {
|
199 |
-
return TVector2<decltype(v0 - v1[0])>{v0 - v1[0], v0 - v1[1]};
|
200 |
-
}
|
201 |
-
|
202 |
-
template <typename T0, typename T1>
|
203 |
-
DEVICE
|
204 |
-
inline auto operator-(const T0 &v0,
|
205 |
-
const TVector3<T1> &v1) -> TVector2<decltype(v0 - v1[0])> {
|
206 |
-
return TVector3<decltype(v0 - v1[0])>{v0 - v1[0], v0 - v1[1], v0 - v1[2]};
|
207 |
-
}
|
208 |
-
|
209 |
-
template <typename T0, typename T1>
|
210 |
-
DEVICE
|
211 |
-
inline auto operator-(const TVector2<T0> &v0,
|
212 |
-
const T1 &v1) -> TVector2<decltype(v0[0] - v1)> {
|
213 |
-
return TVector2<decltype(v0[0] - v1)>{v0[0] - v1, v0[1] - v1};
|
214 |
-
}
|
215 |
-
|
216 |
-
template <typename T0, typename T1>
|
217 |
-
DEVICE
|
218 |
-
inline auto operator-(const TVector3<T0> &v0,
|
219 |
-
const T1 &v1) -> TVector3<decltype(v0[0] - v1)> {
|
220 |
-
return TVector3<decltype(v0[0] - v1)>{v0[0] - v1, v0[1] - v1, v0[2] - v1};
|
221 |
-
}
|
222 |
-
|
223 |
-
template <typename T0, typename T1>
|
224 |
-
DEVICE
|
225 |
-
inline auto operator-(const TVector2<T0> &v0,
|
226 |
-
const TVector2<T1> &v1) -> TVector2<decltype(v0[0] - v1[0])> {
|
227 |
-
return TVector2<decltype(v0[0] - v1[0])>{
|
228 |
-
v0[0] - v1[0], v0[1] - v1[1]};
|
229 |
-
}
|
230 |
-
|
231 |
-
template <typename T>
|
232 |
-
DEVICE
|
233 |
-
inline auto operator-(const TVector2<T> &v) -> TVector2<T> {
|
234 |
-
return TVector2<T>{-v[0], -v[1]};
|
235 |
-
}
|
236 |
-
|
237 |
-
template <typename T>
|
238 |
-
DEVICE
|
239 |
-
inline auto operator-(const TVector3<T> &v) -> TVector3<T> {
|
240 |
-
return TVector3<T>{-v[0], -v[1], -v[2]};
|
241 |
-
}
|
242 |
-
|
243 |
-
template <typename T0, typename T1>
|
244 |
-
DEVICE
|
245 |
-
inline auto operator-(const TVector3<T0> &v0,
|
246 |
-
const TVector3<T1> &v1) -> TVector3<decltype(v0[0] - v1[0])> {
|
247 |
-
return TVector3<decltype(v0[0] - v1[0])>{
|
248 |
-
v0[0] - v1[0], v0[1] - v1[1], v0[2] - v1[2]};
|
249 |
-
}
|
250 |
-
|
251 |
-
template <typename T0, typename T1>
|
252 |
-
DEVICE
|
253 |
-
inline auto operator-(const TVector4<T0> &v0,
|
254 |
-
const TVector4<T1> &v1) -> TVector4<decltype(v0[0] - v1[0])> {
|
255 |
-
return TVector4<decltype(v0[0] - v1[0])>{
|
256 |
-
v0[0] - v1[0], v0[1] - v1[1], v0[2] - v1[2], v0[3] - v1[3]};
|
257 |
-
}
|
258 |
-
|
259 |
-
template <typename T0, typename T1>
|
260 |
-
DEVICE
|
261 |
-
inline auto operator-=(TVector2<T0> &v0,
|
262 |
-
const TVector2<T1> &v1) -> TVector2<T0>& {
|
263 |
-
v0[0] -= v1[0];
|
264 |
-
v0[1] -= v1[1];
|
265 |
-
return v0;
|
266 |
-
}
|
267 |
-
|
268 |
-
template <typename T0, typename T1>
|
269 |
-
DEVICE
|
270 |
-
inline auto operator-=(TVector3<T0> &v0,
|
271 |
-
const TVector3<T1> &v1) -> TVector3<T0>& {
|
272 |
-
v0[0] -= v1[0];
|
273 |
-
v0[1] -= v1[1];
|
274 |
-
v0[2] -= v1[2];
|
275 |
-
return v0;
|
276 |
-
}
|
277 |
-
|
278 |
-
template <typename T0, typename T1>
|
279 |
-
DEVICE
|
280 |
-
inline auto operator*(const TVector2<T0> &v0,
|
281 |
-
const TVector2<T1> &v1) -> TVector2<decltype(v0[0] * v1[0])> {
|
282 |
-
return TVector2<decltype(v0[0] * v1[0])>{
|
283 |
-
v0[0] * v1[0], v0[1] * v1[1]};
|
284 |
-
}
|
285 |
-
|
286 |
-
template <typename T0, typename T1>
|
287 |
-
DEVICE
|
288 |
-
inline auto operator*(const TVector2<T0> &v0,
|
289 |
-
const T1 &s) -> TVector2<decltype(v0[0] * s)> {
|
290 |
-
return TVector2<decltype(v0[0] * s)>{
|
291 |
-
v0[0] * s, v0[1] * s};
|
292 |
-
}
|
293 |
-
|
294 |
-
template <typename T0, typename T1>
|
295 |
-
DEVICE
|
296 |
-
inline auto operator*(const T0 &s,
|
297 |
-
const TVector2<T1> &v0) -> TVector2<decltype(s * v0[0])> {
|
298 |
-
return TVector2<decltype(s * v0[0])>{s * v0[0], s * v0[1]};
|
299 |
-
}
|
300 |
-
|
301 |
-
template <typename T0, typename T1>
|
302 |
-
DEVICE
|
303 |
-
inline auto operator*=(TVector2<T0> &v0,
|
304 |
-
const T1 &s) -> TVector2<T0>& {
|
305 |
-
v0[0] *= s;
|
306 |
-
v0[1] *= s;
|
307 |
-
return v0;
|
308 |
-
}
|
309 |
-
|
310 |
-
template <typename T0, typename T1>
|
311 |
-
DEVICE
|
312 |
-
inline auto operator*(const TVector3<T0> &v0,
|
313 |
-
const T1 &s) -> TVector3<decltype(v0[0] * s)> {
|
314 |
-
return TVector3<decltype(v0[0] * s)>{
|
315 |
-
v0[0] * s, v0[1] * s, v0[2] * s};
|
316 |
-
}
|
317 |
-
|
318 |
-
template <typename T0, typename T1>
|
319 |
-
DEVICE
|
320 |
-
inline auto operator*(const T0 &s,
|
321 |
-
const TVector3<T1> &v0) -> TVector3<decltype(s * v0[0])> {
|
322 |
-
return TVector3<decltype(s * v0[0])>{
|
323 |
-
s * v0[0], s * v0[1], s * v0[2]};
|
324 |
-
}
|
325 |
-
|
326 |
-
template <typename T0, typename T1>
|
327 |
-
DEVICE
|
328 |
-
inline auto operator*=(TVector3<T0> &v0,
|
329 |
-
const T1 &s) -> TVector3<T0>& {
|
330 |
-
v0[0] *= s;
|
331 |
-
v0[1] *= s;
|
332 |
-
v0[2] *= s;
|
333 |
-
return v0;
|
334 |
-
}
|
335 |
-
|
336 |
-
template <typename T0, typename T1>
|
337 |
-
DEVICE
|
338 |
-
inline auto operator*=(TVector4<T0> &v0,
|
339 |
-
const T1 &s) -> TVector4<T0>& {
|
340 |
-
v0[0] *= s;
|
341 |
-
v0[1] *= s;
|
342 |
-
v0[2] *= s;
|
343 |
-
v0[3] *= s;
|
344 |
-
return v0;
|
345 |
-
}
|
346 |
-
|
347 |
-
template <typename T0, typename T1>
|
348 |
-
DEVICE
|
349 |
-
inline auto operator*(const TVector3<T0> &v0,
|
350 |
-
const TVector3<T1> &v1) -> TVector3<decltype(v0[0] * v1[0])> {
|
351 |
-
return TVector3<decltype(v0[0] * v1[0])>{
|
352 |
-
v0[0] * v1[0], v0[1] * v1[1], v0[2] * v1[2]};
|
353 |
-
}
|
354 |
-
|
355 |
-
template <typename T0, typename T1>
|
356 |
-
DEVICE
|
357 |
-
inline auto operator*(const TVector4<T0> &v0,
|
358 |
-
const T1 &s) -> TVector4<decltype(v0[0] * s)> {
|
359 |
-
return TVector4<decltype(v0[0] * s)>{
|
360 |
-
v0[0] * s, v0[1] * s, v0[2] * s, v0[3] * s};
|
361 |
-
}
|
362 |
-
|
363 |
-
template <typename T0, typename T1>
|
364 |
-
DEVICE
|
365 |
-
inline auto operator*(const T0 &s,
|
366 |
-
const TVector4<T1> &v0) -> TVector4<decltype(s * v0[0])> {
|
367 |
-
return TVector4<decltype(s * v0[0])>{
|
368 |
-
s * v0[0], s * v0[1], s * v0[2], s * v0[3]};
|
369 |
-
}
|
370 |
-
|
371 |
-
template <typename T0, typename T1>
|
372 |
-
DEVICE
|
373 |
-
inline auto operator*(const TVector4<T0> &v0,
|
374 |
-
const TVector4<T1> &v1) -> TVector4<decltype(v0[0] * v1[0])> {
|
375 |
-
return TVector4<decltype(v0[0] * v1[0])>{
|
376 |
-
v0[0] * v1[0], v0[1] * v1[1], v0[2] * v1[2], v0[3] * v1[3]};
|
377 |
-
}
|
378 |
-
|
379 |
-
template <typename T0, typename T1>
|
380 |
-
DEVICE
|
381 |
-
inline auto operator/(const TVector2<T0> &v0,
|
382 |
-
const T1 &s) -> TVector2<decltype(v0[0] / s)> {
|
383 |
-
auto inv_s = 1.f / s;
|
384 |
-
return v0 * inv_s;
|
385 |
-
}
|
386 |
-
|
387 |
-
template <typename T0, typename T1>
|
388 |
-
DEVICE
|
389 |
-
inline auto operator/(const TVector3<T0> &v0,
|
390 |
-
const T1 &s) -> TVector3<decltype(v0[0] / s)> {
|
391 |
-
auto inv_s = 1.f / s;
|
392 |
-
return v0 * inv_s;
|
393 |
-
}
|
394 |
-
|
395 |
-
template <typename T0, typename T1>
|
396 |
-
DEVICE
|
397 |
-
inline auto operator/(const TVector4<T0> &v0,
|
398 |
-
const T1 &s) -> TVector4<decltype(v0[0] / s)> {
|
399 |
-
auto inv_s = 1.f / s;
|
400 |
-
return v0 * inv_s;
|
401 |
-
}
|
402 |
-
|
403 |
-
template <typename T0, typename T1>
|
404 |
-
DEVICE
|
405 |
-
inline auto operator/(const T0 &s,
|
406 |
-
const TVector3<T1> &v1) -> TVector3<decltype(s / v1[0])> {
|
407 |
-
return TVector3<decltype(s / v1[0])>{
|
408 |
-
s / v1[0], s / v1[2], s / v1[2]};
|
409 |
-
}
|
410 |
-
|
411 |
-
template <typename T0, typename T1>
|
412 |
-
DEVICE
|
413 |
-
inline auto operator/(const TVector3<T0> &v0,
|
414 |
-
const TVector3<T1> &v1) -> TVector3<decltype(v0[0] / v1[0])> {
|
415 |
-
return TVector3<decltype(v0[0] / v1[0])>{
|
416 |
-
v0[0] / v1[0], v0[1] / v1[2], v0[2] / v1[2]};
|
417 |
-
}
|
418 |
-
|
419 |
-
template <typename T0, typename T1>
|
420 |
-
DEVICE
|
421 |
-
inline auto operator/(const TVector2<T0> &v0,
|
422 |
-
const TVector2<T1> &v1) -> TVector2<decltype(v0[0] / v1[0])> {
|
423 |
-
return TVector2<decltype(v0[0] / v1[0])>{
|
424 |
-
v0[0] / v1[0], v0[1] / v1[1]};
|
425 |
-
}
|
426 |
-
|
427 |
-
template <typename T0, typename T1>
|
428 |
-
DEVICE
|
429 |
-
inline auto operator/=(TVector3<T0> &v0,
|
430 |
-
const T1 &s) -> TVector3<T0>& {
|
431 |
-
auto inv_s = 1.f / s;
|
432 |
-
v0[0] *= inv_s;
|
433 |
-
v0[1] *= inv_s;
|
434 |
-
v0[2] *= inv_s;
|
435 |
-
return v0;
|
436 |
-
}
|
437 |
-
|
438 |
-
template <typename T0, typename T1>
|
439 |
-
DEVICE
|
440 |
-
inline auto operator/=(TVector4<T0> &v0,
|
441 |
-
const T1 &s) -> TVector4<T0>& {
|
442 |
-
auto inv_s = 1.f / s;
|
443 |
-
v0[0] *= inv_s;
|
444 |
-
v0[1] *= inv_s;
|
445 |
-
v0[2] *= inv_s;
|
446 |
-
v0[3] *= inv_s;
|
447 |
-
return v0;
|
448 |
-
}
|
449 |
-
|
450 |
-
template <typename T0, typename T1>
|
451 |
-
DEVICE
|
452 |
-
inline bool operator==(const TVector2<T0> &v0,
|
453 |
-
const TVector2<T1> &v1) {
|
454 |
-
return v0.x == v1.x && v0.y == v1.y;
|
455 |
-
}
|
456 |
-
|
457 |
-
template <typename T0, typename T1>
|
458 |
-
DEVICE
|
459 |
-
inline bool operator==(const TVector3<T0> &v0,
|
460 |
-
const TVector3<T1> &v1) {
|
461 |
-
return v0.x == v1.x && v0.y == v1.y && v0.z == v1.z;
|
462 |
-
}
|
463 |
-
|
464 |
-
template <typename T0, typename T1>
|
465 |
-
DEVICE
|
466 |
-
inline bool operator!=(const TVector3<T0> &v0,
|
467 |
-
const TVector3<T1> &v1) {
|
468 |
-
return v0.x != v1.x || v0.y != v1.y || v0.z != v1.z;
|
469 |
-
}
|
470 |
-
|
471 |
-
template <typename T>
|
472 |
-
DEVICE
|
473 |
-
inline TVector2<T> get_normal(const TVector2<T> &v) {
|
474 |
-
return TVector2<T>{v.y, -v.x};
|
475 |
-
}
|
476 |
-
|
477 |
-
template <typename T>
|
478 |
-
DEVICE
|
479 |
-
inline T length_squared(const TVector2<T> &v0) {
|
480 |
-
return square(v0[0]) + square(v0[1]);
|
481 |
-
}
|
482 |
-
|
483 |
-
template <typename T>
|
484 |
-
DEVICE
|
485 |
-
inline TVector2<T> d_length_squared(const TVector2<T> &v0, const T &d_l_sq) {
|
486 |
-
//l_sq = square(v0[0]) + square(v0[1])
|
487 |
-
return 2 * d_l_sq * v0;
|
488 |
-
}
|
489 |
-
|
490 |
-
template <typename T>
|
491 |
-
DEVICE
|
492 |
-
inline T length(const TVector2<T> &v0) {
|
493 |
-
return sqrt(length_squared(v0));
|
494 |
-
}
|
495 |
-
|
496 |
-
template <typename T>
|
497 |
-
DEVICE
|
498 |
-
inline TVector2<T> d_length(const TVector2<T> &v0, const T &d_l) {
|
499 |
-
auto l_sq = length_squared(v0);
|
500 |
-
auto l = sqrt(l_sq);
|
501 |
-
auto d_l_sq = 0.5f * d_l / l;
|
502 |
-
return d_length_squared(v0, T(d_l_sq));
|
503 |
-
}
|
504 |
-
|
505 |
-
template <typename T>
|
506 |
-
DEVICE
|
507 |
-
inline T length_squared(const TVector3<T> &v0) {
|
508 |
-
return square(v0[0]) + square(v0[1]) + square(v0[2]);
|
509 |
-
}
|
510 |
-
|
511 |
-
template <typename T>
|
512 |
-
DEVICE
|
513 |
-
inline TVector3<T> d_length_squared(const TVector3<T> &v0, const T &d_l_sq) {
|
514 |
-
//l_sq = square(v0[0]) + square(v0[1]) + square(v0[2])
|
515 |
-
return 2 * d_l_sq * v0;
|
516 |
-
}
|
517 |
-
|
518 |
-
template <typename T>
|
519 |
-
DEVICE
|
520 |
-
inline T length(const TVector3<T> &v0) {
|
521 |
-
return sqrt(length_squared(v0));
|
522 |
-
}
|
523 |
-
|
524 |
-
template <typename T>
|
525 |
-
DEVICE
|
526 |
-
inline TVector3<T> d_length(const TVector3<T> &v0, const T &d_l) {
|
527 |
-
auto l_sq = length_squared(v0);
|
528 |
-
auto l = sqrt(l_sq);
|
529 |
-
auto d_l_sq = 0.5f * d_l / l;
|
530 |
-
return d_length_squared(v0, d_l_sq);
|
531 |
-
}
|
532 |
-
|
533 |
-
template <typename T0, typename T1>
|
534 |
-
DEVICE
|
535 |
-
inline auto distance_squared(const TVector2<T0> &v0,
|
536 |
-
const TVector2<T1> &v1) -> decltype(length_squared(v1 - v0)) {
|
537 |
-
return length_squared(v1 - v0);
|
538 |
-
}
|
539 |
-
|
540 |
-
template <typename T0, typename T1>
|
541 |
-
DEVICE
|
542 |
-
inline auto distance_squared(const TVector3<T0> &v0,
|
543 |
-
const TVector3<T1> &v1) -> decltype(length_squared(v1 - v0)) {
|
544 |
-
return length_squared(v1 - v0);
|
545 |
-
}
|
546 |
-
|
547 |
-
template <typename T0, typename T1>
|
548 |
-
DEVICE
|
549 |
-
inline auto distance(const TVector2<T0> &v0,
|
550 |
-
const TVector2<T1> &v1) -> decltype(length(v1 - v0)) {
|
551 |
-
return length(v1 - v0);
|
552 |
-
}
|
553 |
-
|
554 |
-
template <typename T>
|
555 |
-
DEVICE
|
556 |
-
inline void d_distance(const TVector2<T> &v0,
|
557 |
-
const TVector2<T> &v1,
|
558 |
-
const T &d_output,
|
559 |
-
TVector2<T> &d_v0,
|
560 |
-
TVector2<T> &d_v1) {
|
561 |
-
auto d_v1_v0 = d_length(v1 - v0, d_output);
|
562 |
-
d_v0 -= d_v1_v0;
|
563 |
-
d_v1 += d_v1_v0;
|
564 |
-
}
|
565 |
-
|
566 |
-
template <typename T0, typename T1>
|
567 |
-
DEVICE
|
568 |
-
inline auto distance(const TVector3<T0> &v0,
|
569 |
-
const TVector3<T1> &v1) -> decltype(length(v1 - v0)) {
|
570 |
-
return length(v1 - v0);
|
571 |
-
}
|
572 |
-
|
573 |
-
template <typename T>
|
574 |
-
DEVICE
|
575 |
-
inline void d_distance(const TVector3<T> &v0,
|
576 |
-
const TVector3<T> &v1,
|
577 |
-
const T &d_output,
|
578 |
-
TVector3<T> &d_v0,
|
579 |
-
TVector3<T> &d_v1) {
|
580 |
-
auto d_v1_v0 = d_length(v1 - v0, d_output);
|
581 |
-
d_v0 -= d_v1_v0;
|
582 |
-
d_v1 += d_v1_v0;
|
583 |
-
}
|
584 |
-
|
585 |
-
template <typename T>
|
586 |
-
DEVICE
|
587 |
-
inline TVector2<T> normalize(const TVector2<T> &v0) {
|
588 |
-
return v0 / length(v0);
|
589 |
-
}
|
590 |
-
|
591 |
-
template <typename T>
|
592 |
-
DEVICE
|
593 |
-
inline TVector2<T> d_normalize(const TVector2<T> &v0, const TVector2<T> &d_n) {
|
594 |
-
auto l = length(v0);
|
595 |
-
auto n = v0 / l;
|
596 |
-
auto d_v0 = d_n / l;
|
597 |
-
auto d_l = -dot(d_n, n) / l;
|
598 |
-
// l = length(v0)
|
599 |
-
d_v0 += d_length(v0, d_l);
|
600 |
-
return d_v0;
|
601 |
-
}
|
602 |
-
|
603 |
-
template <typename T>
|
604 |
-
DEVICE
|
605 |
-
inline TVector3<T> normalize(const TVector3<T> &v0) {
|
606 |
-
return v0 / length(v0);
|
607 |
-
}
|
608 |
-
|
609 |
-
template <typename T>
|
610 |
-
DEVICE
|
611 |
-
inline TVector3<T> d_normalize(const TVector3<T> &v0, const TVector3<T> &d_n) {
|
612 |
-
auto l = length(v0);
|
613 |
-
auto n = v0 / l;
|
614 |
-
auto d_v0 = d_n / l;
|
615 |
-
auto d_l = -dot(d_n, n) / l;
|
616 |
-
// l = length(v0)
|
617 |
-
d_v0 += d_length(v0, d_l);
|
618 |
-
return d_v0;
|
619 |
-
}
|
620 |
-
|
621 |
-
template <typename T0, typename T1>
|
622 |
-
DEVICE
|
623 |
-
inline auto dot(const TVector2<T0> &v0, const TVector2<T1> &v1) -> decltype(v0[0] * v1[0]) {
|
624 |
-
return v0[0] * v1[0] +
|
625 |
-
v0[1] * v1[1];
|
626 |
-
}
|
627 |
-
|
628 |
-
template <typename T0, typename T1>
|
629 |
-
DEVICE
|
630 |
-
inline auto dot(const TVector3<T0> &v0, const TVector3<T1> &v1) -> decltype(v0[0] * v1[0]) {
|
631 |
-
return v0[0] * v1[0] +
|
632 |
-
v0[1] * v1[1] +
|
633 |
-
v0[2] * v1[2];
|
634 |
-
}
|
635 |
-
|
636 |
-
template <typename T0, typename T1>
|
637 |
-
DEVICE
|
638 |
-
inline auto dot(const TVector4<T0> &v0, const TVector4<T1> &v1) -> decltype(v0[0] * v1[0]) {
|
639 |
-
return v0[0] * v1[0] +
|
640 |
-
v0[1] * v1[1] +
|
641 |
-
v0[2] * v1[2] +
|
642 |
-
v0[3] * v1[3];
|
643 |
-
}
|
644 |
-
|
645 |
-
template <typename T0, typename T1>
|
646 |
-
DEVICE
|
647 |
-
inline auto cross(const TVector3<T0> &v0, const TVector3<T1> &v1) -> TVector3<decltype(v0[1] * v1[2] - v0[2] * v1[1])> {
|
648 |
-
return TVector3<decltype(v0[1] * v1[2] - v0[2] * v1[1])>{
|
649 |
-
v0[1] * v1[2] - v0[2] * v1[1],
|
650 |
-
v0[2] * v1[0] - v0[0] * v1[2],
|
651 |
-
v0[0] * v1[1] - v0[1] * v1[0]};
|
652 |
-
}
|
653 |
-
|
654 |
-
template <typename T>
|
655 |
-
DEVICE
|
656 |
-
inline void d_cross(const TVector3<T> &v0, const TVector3<T> &v1, const TVector3<T> &d_output,
|
657 |
-
TVector3<T> &d_v0, TVector3<T> &d_v1) {
|
658 |
-
d_v0 += cross(v1, d_output);
|
659 |
-
d_v1 += cross(d_output, v0);
|
660 |
-
}
|
661 |
-
|
662 |
-
template <typename T>
|
663 |
-
DEVICE
|
664 |
-
inline T luminance(const TVector3<T> &v) {
|
665 |
-
return 0.212671f * v[0] +
|
666 |
-
0.715160f * v[1] +
|
667 |
-
0.072169f * v[2];
|
668 |
-
}
|
669 |
-
|
670 |
-
template <typename T>
|
671 |
-
DEVICE
|
672 |
-
inline T sum(const T &v) {
|
673 |
-
return v;
|
674 |
-
}
|
675 |
-
|
676 |
-
template <typename T>
|
677 |
-
DEVICE
|
678 |
-
inline T sum(const TVector2<T> &v) {
|
679 |
-
return v[0] + v[1];
|
680 |
-
}
|
681 |
-
|
682 |
-
template <typename T>
|
683 |
-
DEVICE
|
684 |
-
inline T sum(const TVector3<T> &v) {
|
685 |
-
return v[0] + v[1] + v[2];
|
686 |
-
}
|
687 |
-
|
688 |
-
template <typename T>
|
689 |
-
DEVICE
|
690 |
-
inline T sum(const TVector4<T> &v) {
|
691 |
-
return v[0] + v[1] + v[2] + v[3];
|
692 |
-
}
|
693 |
-
|
694 |
-
template <typename T>
|
695 |
-
DEVICE
|
696 |
-
void coordinate_system(const TVector3<T> &n, TVector3<T> &x, TVector3<T> &y) {
|
697 |
-
if (n[2] < -1.f + 1e-6f) {
|
698 |
-
x = TVector3<T>{T(0), T(-1), T(0)};
|
699 |
-
y = TVector3<T>{T(-1), T(0), T(0)};
|
700 |
-
} else {
|
701 |
-
auto a = 1.f / (1.f + n[2]);
|
702 |
-
auto b = -n[0] * n[1] * a;
|
703 |
-
x = TVector3<T>{1.f - square(n[0]) * a, b, -n[0]};
|
704 |
-
y = TVector3<T>{b, 1.f - square(n[1]) * a, -n[1]};
|
705 |
-
}
|
706 |
-
}
|
707 |
-
|
708 |
-
template <typename T>
|
709 |
-
DEVICE
|
710 |
-
void d_coordinate_system(const TVector3<T> &n, const TVector3<T> &d_x, const TVector3<T> &d_y,
|
711 |
-
TVector3<T> &d_n) {
|
712 |
-
if (n[2] < -1.f + 1e-6f) {
|
713 |
-
//x = TVector3<T>{T(0), T(-1), T(0)};
|
714 |
-
//y = TVector3<T>{T(-1), T(0), T(0)};
|
715 |
-
// don't need to do anything
|
716 |
-
} else {
|
717 |
-
auto a = 1.f / (1.f + n[2]);
|
718 |
-
// auto b = -n[0] * n[1] * a;
|
719 |
-
// x = TVector3<T>{1.f - square(n[0]) * a, b, -n[0]}
|
720 |
-
d_n[0] -= 2.f * n[0] * d_x[0] * a;
|
721 |
-
auto d_a = -square(n[0]) * d_x[0];
|
722 |
-
auto d_b = d_x[1];
|
723 |
-
d_n[0] -= d_x[2];
|
724 |
-
// y = TVector3<T>{b, 1.f - square(n[1]) * a, -n[1]}
|
725 |
-
d_b += d_y[0];
|
726 |
-
d_n[1] -= 2.f * d_y[1] * n[1] * a;
|
727 |
-
d_a -= d_y[1] * square(n[1]);
|
728 |
-
d_n[1] -= d_y[2];
|
729 |
-
// b = -n[0] * n[1] * a
|
730 |
-
d_n[0] -= d_b * n[1] * a;
|
731 |
-
d_n[1] -= d_b * n[0] * a;
|
732 |
-
d_a -= d_b * n[0] * n[1];
|
733 |
-
// a = 1 / (1 + n[2])
|
734 |
-
d_n[2] -= d_a * a / (1 + n[2]);
|
735 |
-
}
|
736 |
-
}
|
737 |
-
|
738 |
-
DEVICE
|
739 |
-
inline bool isfinite(const Vector2 &v) {
|
740 |
-
return isfinite(v.x) &&
|
741 |
-
isfinite(v.y);
|
742 |
-
}
|
743 |
-
|
744 |
-
DEVICE
|
745 |
-
inline bool isfinite(const Vector3 &v) {
|
746 |
-
return isfinite(v.x) &&
|
747 |
-
isfinite(v.y) &&
|
748 |
-
isfinite(v.z);
|
749 |
-
}
|
750 |
-
|
751 |
-
DEVICE
|
752 |
-
inline bool isfinite(const Vector4 &v) {
|
753 |
-
return isfinite(v.x) &&
|
754 |
-
isfinite(v.y) &&
|
755 |
-
isfinite(v.z) &&
|
756 |
-
isfinite(v.w);
|
757 |
-
}
|
758 |
-
|
759 |
-
DEVICE
|
760 |
-
inline bool is_zero(const Vector3 &v) {
|
761 |
-
return v.x == 0 && v.y == 0 && v.z == 0;
|
762 |
-
}
|
763 |
-
|
764 |
-
template <typename T>
|
765 |
-
inline std::ostream& operator<<(std::ostream &os, const TVector2<T> &v) {
|
766 |
-
return os << "(" << v[0] << ", " << v[1] << ")";
|
767 |
-
}
|
768 |
-
|
769 |
-
template <typename T>
|
770 |
-
inline std::ostream& operator<<(std::ostream &os, const TVector3<T> &v) {
|
771 |
-
return os << "(" << v[0] << ", " << v[1] << ", " << v[2] << ")";
|
772 |
-
}
|
773 |
-
|
774 |
-
template <typename T>
|
775 |
-
inline std::ostream& operator<<(std::ostream &os, const TVector4<T> &v) {
|
776 |
-
return os << "(" << v[0] << ", " << v[1] << ", " << v[2] << ", " << v[3] << ")";
|
777 |
-
}
|
778 |
-
|
779 |
-
DEVICE
|
780 |
-
inline
|
781 |
-
float det(const Vector2f &a, const Vector2f &b) {
|
782 |
-
return a.x*b.y-b.x*a.y;
|
783 |
-
}
|
784 |
-
|
785 |
-
DEVICE
|
786 |
-
inline
|
787 |
-
Vector2f quadratic_closest_pt_approx(const Vector2f &b0,
|
788 |
-
const Vector2f &b1,
|
789 |
-
const Vector2f &b2,
|
790 |
-
float *t_ = nullptr) {
|
791 |
-
// From http://w3.impa.br/~diego/publications/NehHop08.pdf
|
792 |
-
float a=det(b0,b2), b=2*det(b1,b0), d=2*det(b2,b1);
|
793 |
-
float f=b*d-a*a;
|
794 |
-
Vector2f d21=b2-b1, d10=b1-b0, d20=b2-b0;
|
795 |
-
Vector2f gf=2*(b*d21+d*d10+a*d20);
|
796 |
-
gf=Vector2f(gf.y,-gf.x);
|
797 |
-
Vector2f pp=-f*gf/dot(gf,gf);
|
798 |
-
Vector2f d0p=b0-pp;
|
799 |
-
float ap=det(d0p,d20), bp=2*det(d10,d0p);
|
800 |
-
float t=clamp((ap+bp)/(2*a+b+d),0.f,1.f);
|
801 |
-
float tt = 1 - t;
|
802 |
-
if (t_ != nullptr) {
|
803 |
-
*t_ = t;
|
804 |
-
}
|
805 |
-
return (tt*tt)*b0 + (2*tt*t)*b1 + (t*t)*b2;
|
806 |
-
}
|
807 |
-
|
808 |
-
DEVICE
|
809 |
-
inline
|
810 |
-
Vector2f quadratic_closest_pt_approx(const Vector2f &b0,
|
811 |
-
const Vector2f &b1,
|
812 |
-
const Vector2f &b2,
|
813 |
-
const Vector2f &pt,
|
814 |
-
float *t = nullptr) {
|
815 |
-
// Approximate closest point to a quadratic curve
|
816 |
-
return quadratic_closest_pt_approx(b0 - pt, b1 - pt, b2 - pt, t) + pt;
|
817 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/datasets/dataset_wrappers.py
DELETED
@@ -1,282 +0,0 @@
|
|
1 |
-
import bisect
|
2 |
-
import math
|
3 |
-
from collections import defaultdict
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
from mmcv.utils import print_log
|
7 |
-
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
|
8 |
-
|
9 |
-
from .builder import DATASETS
|
10 |
-
from .coco import CocoDataset
|
11 |
-
|
12 |
-
|
13 |
-
@DATASETS.register_module()
|
14 |
-
class ConcatDataset(_ConcatDataset):
|
15 |
-
"""A wrapper of concatenated dataset.
|
16 |
-
|
17 |
-
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
|
18 |
-
concat the group flag for image aspect ratio.
|
19 |
-
|
20 |
-
Args:
|
21 |
-
datasets (list[:obj:`Dataset`]): A list of datasets.
|
22 |
-
separate_eval (bool): Whether to evaluate the results
|
23 |
-
separately if it is used as validation dataset.
|
24 |
-
Defaults to True.
|
25 |
-
"""
|
26 |
-
|
27 |
-
def __init__(self, datasets, separate_eval=True):
|
28 |
-
super(ConcatDataset, self).__init__(datasets)
|
29 |
-
self.CLASSES = datasets[0].CLASSES
|
30 |
-
self.separate_eval = separate_eval
|
31 |
-
if not separate_eval:
|
32 |
-
if any([isinstance(ds, CocoDataset) for ds in datasets]):
|
33 |
-
raise NotImplementedError(
|
34 |
-
'Evaluating concatenated CocoDataset as a whole is not'
|
35 |
-
' supported! Please set "separate_eval=True"')
|
36 |
-
elif len(set([type(ds) for ds in datasets])) != 1:
|
37 |
-
raise NotImplementedError(
|
38 |
-
'All the datasets should have same types')
|
39 |
-
|
40 |
-
if hasattr(datasets[0], 'flag'):
|
41 |
-
flags = []
|
42 |
-
for i in range(0, len(datasets)):
|
43 |
-
flags.append(datasets[i].flag)
|
44 |
-
self.flag = np.concatenate(flags)
|
45 |
-
|
46 |
-
def get_cat_ids(self, idx):
|
47 |
-
"""Get category ids of concatenated dataset by index.
|
48 |
-
|
49 |
-
Args:
|
50 |
-
idx (int): Index of data.
|
51 |
-
|
52 |
-
Returns:
|
53 |
-
list[int]: All categories in the image of specified index.
|
54 |
-
"""
|
55 |
-
|
56 |
-
if idx < 0:
|
57 |
-
if -idx > len(self):
|
58 |
-
raise ValueError(
|
59 |
-
'absolute value of index should not exceed dataset length')
|
60 |
-
idx = len(self) + idx
|
61 |
-
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
|
62 |
-
if dataset_idx == 0:
|
63 |
-
sample_idx = idx
|
64 |
-
else:
|
65 |
-
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
|
66 |
-
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
|
67 |
-
|
68 |
-
def evaluate(self, results, logger=None, **kwargs):
|
69 |
-
"""Evaluate the results.
|
70 |
-
|
71 |
-
Args:
|
72 |
-
results (list[list | tuple]): Testing results of the dataset.
|
73 |
-
logger (logging.Logger | str | None): Logger used for printing
|
74 |
-
related information during evaluation. Default: None.
|
75 |
-
|
76 |
-
Returns:
|
77 |
-
dict[str: float]: AP results of the total dataset or each separate
|
78 |
-
dataset if `self.separate_eval=True`.
|
79 |
-
"""
|
80 |
-
assert len(results) == self.cumulative_sizes[-1], \
|
81 |
-
('Dataset and results have different sizes: '
|
82 |
-
f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
|
83 |
-
|
84 |
-
# Check whether all the datasets support evaluation
|
85 |
-
for dataset in self.datasets:
|
86 |
-
assert hasattr(dataset, 'evaluate'), \
|
87 |
-
f'{type(dataset)} does not implement evaluate function'
|
88 |
-
|
89 |
-
if self.separate_eval:
|
90 |
-
dataset_idx = -1
|
91 |
-
total_eval_results = dict()
|
92 |
-
for size, dataset in zip(self.cumulative_sizes, self.datasets):
|
93 |
-
start_idx = 0 if dataset_idx == -1 else \
|
94 |
-
self.cumulative_sizes[dataset_idx]
|
95 |
-
end_idx = self.cumulative_sizes[dataset_idx + 1]
|
96 |
-
|
97 |
-
results_per_dataset = results[start_idx:end_idx]
|
98 |
-
print_log(
|
99 |
-
f'\nEvaluateing {dataset.ann_file} with '
|
100 |
-
f'{len(results_per_dataset)} images now',
|
101 |
-
logger=logger)
|
102 |
-
|
103 |
-
eval_results_per_dataset = dataset.evaluate(
|
104 |
-
results_per_dataset, logger=logger, **kwargs)
|
105 |
-
dataset_idx += 1
|
106 |
-
for k, v in eval_results_per_dataset.items():
|
107 |
-
total_eval_results.update({f'{dataset_idx}_{k}': v})
|
108 |
-
|
109 |
-
return total_eval_results
|
110 |
-
elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
|
111 |
-
raise NotImplementedError(
|
112 |
-
'Evaluating concatenated CocoDataset as a whole is not'
|
113 |
-
' supported! Please set "separate_eval=True"')
|
114 |
-
elif len(set([type(ds) for ds in self.datasets])) != 1:
|
115 |
-
raise NotImplementedError(
|
116 |
-
'All the datasets should have same types')
|
117 |
-
else:
|
118 |
-
original_data_infos = self.datasets[0].data_infos
|
119 |
-
self.datasets[0].data_infos = sum(
|
120 |
-
[dataset.data_infos for dataset in self.datasets], [])
|
121 |
-
eval_results = self.datasets[0].evaluate(
|
122 |
-
results, logger=logger, **kwargs)
|
123 |
-
self.datasets[0].data_infos = original_data_infos
|
124 |
-
return eval_results
|
125 |
-
|
126 |
-
|
127 |
-
@DATASETS.register_module()
|
128 |
-
class RepeatDataset(object):
|
129 |
-
"""A wrapper of repeated dataset.
|
130 |
-
|
131 |
-
The length of repeated dataset will be `times` larger than the original
|
132 |
-
dataset. This is useful when the data loading time is long but the dataset
|
133 |
-
is small. Using RepeatDataset can reduce the data loading time between
|
134 |
-
epochs.
|
135 |
-
|
136 |
-
Args:
|
137 |
-
dataset (:obj:`Dataset`): The dataset to be repeated.
|
138 |
-
times (int): Repeat times.
|
139 |
-
"""
|
140 |
-
|
141 |
-
def __init__(self, dataset, times):
|
142 |
-
self.dataset = dataset
|
143 |
-
self.times = times
|
144 |
-
self.CLASSES = dataset.CLASSES
|
145 |
-
if hasattr(self.dataset, 'flag'):
|
146 |
-
self.flag = np.tile(self.dataset.flag, times)
|
147 |
-
|
148 |
-
self._ori_len = len(self.dataset)
|
149 |
-
|
150 |
-
def __getitem__(self, idx):
|
151 |
-
return self.dataset[idx % self._ori_len]
|
152 |
-
|
153 |
-
def get_cat_ids(self, idx):
|
154 |
-
"""Get category ids of repeat dataset by index.
|
155 |
-
|
156 |
-
Args:
|
157 |
-
idx (int): Index of data.
|
158 |
-
|
159 |
-
Returns:
|
160 |
-
list[int]: All categories in the image of specified index.
|
161 |
-
"""
|
162 |
-
|
163 |
-
return self.dataset.get_cat_ids(idx % self._ori_len)
|
164 |
-
|
165 |
-
def __len__(self):
|
166 |
-
"""Length after repetition."""
|
167 |
-
return self.times * self._ori_len
|
168 |
-
|
169 |
-
|
170 |
-
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
|
171 |
-
@DATASETS.register_module()
|
172 |
-
class ClassBalancedDataset(object):
|
173 |
-
"""A wrapper of repeated dataset with repeat factor.
|
174 |
-
|
175 |
-
Suitable for training on class imbalanced datasets like LVIS. Following
|
176 |
-
the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,
|
177 |
-
in each epoch, an image may appear multiple times based on its
|
178 |
-
"repeat factor".
|
179 |
-
The repeat factor for an image is a function of the frequency the rarest
|
180 |
-
category labeled in that image. The "frequency of category c" in [0, 1]
|
181 |
-
is defined by the fraction of images in the training set (without repeats)
|
182 |
-
in which category c appears.
|
183 |
-
The dataset needs to instantiate :func:`self.get_cat_ids` to support
|
184 |
-
ClassBalancedDataset.
|
185 |
-
|
186 |
-
The repeat factor is computed as followed.
|
187 |
-
|
188 |
-
1. For each category c, compute the fraction # of images
|
189 |
-
that contain it: :math:`f(c)`
|
190 |
-
2. For each category c, compute the category-level repeat factor:
|
191 |
-
:math:`r(c) = max(1, sqrt(t/f(c)))`
|
192 |
-
3. For each image I, compute the image-level repeat factor:
|
193 |
-
:math:`r(I) = max_{c in I} r(c)`
|
194 |
-
|
195 |
-
Args:
|
196 |
-
dataset (:obj:`CustomDataset`): The dataset to be repeated.
|
197 |
-
oversample_thr (float): frequency threshold below which data is
|
198 |
-
repeated. For categories with ``f_c >= oversample_thr``, there is
|
199 |
-
no oversampling. For categories with ``f_c < oversample_thr``, the
|
200 |
-
degree of oversampling following the square-root inverse frequency
|
201 |
-
heuristic above.
|
202 |
-
filter_empty_gt (bool, optional): If set true, images without bounding
|
203 |
-
boxes will not be oversampled. Otherwise, they will be categorized
|
204 |
-
as the pure background class and involved into the oversampling.
|
205 |
-
Default: True.
|
206 |
-
"""
|
207 |
-
|
208 |
-
def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
|
209 |
-
self.dataset = dataset
|
210 |
-
self.oversample_thr = oversample_thr
|
211 |
-
self.filter_empty_gt = filter_empty_gt
|
212 |
-
self.CLASSES = dataset.CLASSES
|
213 |
-
|
214 |
-
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
|
215 |
-
repeat_indices = []
|
216 |
-
for dataset_idx, repeat_factor in enumerate(repeat_factors):
|
217 |
-
repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))
|
218 |
-
self.repeat_indices = repeat_indices
|
219 |
-
|
220 |
-
flags = []
|
221 |
-
if hasattr(self.dataset, 'flag'):
|
222 |
-
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
|
223 |
-
flags.extend([flag] * int(math.ceil(repeat_factor)))
|
224 |
-
assert len(flags) == len(repeat_indices)
|
225 |
-
self.flag = np.asarray(flags, dtype=np.uint8)
|
226 |
-
|
227 |
-
def _get_repeat_factors(self, dataset, repeat_thr):
|
228 |
-
"""Get repeat factor for each images in the dataset.
|
229 |
-
|
230 |
-
Args:
|
231 |
-
dataset (:obj:`CustomDataset`): The dataset
|
232 |
-
repeat_thr (float): The threshold of frequency. If an image
|
233 |
-
contains the categories whose frequency below the threshold,
|
234 |
-
it would be repeated.
|
235 |
-
|
236 |
-
Returns:
|
237 |
-
list[float]: The repeat factors for each images in the dataset.
|
238 |
-
"""
|
239 |
-
|
240 |
-
# 1. For each category c, compute the fraction # of images
|
241 |
-
# that contain it: f(c)
|
242 |
-
category_freq = defaultdict(int)
|
243 |
-
num_images = len(dataset)
|
244 |
-
for idx in range(num_images):
|
245 |
-
cat_ids = set(self.dataset.get_cat_ids(idx))
|
246 |
-
if len(cat_ids) == 0 and not self.filter_empty_gt:
|
247 |
-
cat_ids = set([len(self.CLASSES)])
|
248 |
-
for cat_id in cat_ids:
|
249 |
-
category_freq[cat_id] += 1
|
250 |
-
for k, v in category_freq.items():
|
251 |
-
category_freq[k] = v / num_images
|
252 |
-
|
253 |
-
# 2. For each category c, compute the category-level repeat factor:
|
254 |
-
# r(c) = max(1, sqrt(t/f(c)))
|
255 |
-
category_repeat = {
|
256 |
-
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
|
257 |
-
for cat_id, cat_freq in category_freq.items()
|
258 |
-
}
|
259 |
-
|
260 |
-
# 3. For each image I, compute the image-level repeat factor:
|
261 |
-
# r(I) = max_{c in I} r(c)
|
262 |
-
repeat_factors = []
|
263 |
-
for idx in range(num_images):
|
264 |
-
cat_ids = set(self.dataset.get_cat_ids(idx))
|
265 |
-
if len(cat_ids) == 0 and not self.filter_empty_gt:
|
266 |
-
cat_ids = set([len(self.CLASSES)])
|
267 |
-
repeat_factor = 1
|
268 |
-
if len(cat_ids) > 0:
|
269 |
-
repeat_factor = max(
|
270 |
-
{category_repeat[cat_id]
|
271 |
-
for cat_id in cat_ids})
|
272 |
-
repeat_factors.append(repeat_factor)
|
273 |
-
|
274 |
-
return repeat_factors
|
275 |
-
|
276 |
-
def __getitem__(self, idx):
|
277 |
-
ori_index = self.repeat_indices[idx]
|
278 |
-
return self.dataset[ori_index]
|
279 |
-
|
280 |
-
def __len__(self):
|
281 |
-
"""Length after repetition."""
|
282 |
-
return len(self.repeat_indices)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|